mirror of
https://github.com/tlsnotary/tlsn.git
synced 2026-01-10 13:58:03 -05:00
Compare commits
78 Commits
v0.1.0-alp
...
main
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
1801c30599 | ||
|
|
0885d40ddf | ||
|
|
610411aae4 | ||
|
|
37df1baed7 | ||
|
|
aeaebc5c60 | ||
|
|
2e7e3db11d | ||
|
|
0a68837d0a | ||
|
|
0ec2392716 | ||
|
|
f99fce5b5a | ||
|
|
6b9f44e7e5 | ||
|
|
bf1cf2302a | ||
|
|
2884be17e0 | ||
|
|
df8d79c152 | ||
|
|
82d509266b | ||
|
|
d5ad768e7c | ||
|
|
d25fb320d4 | ||
|
|
0539268da7 | ||
|
|
427b2896b5 | ||
|
|
89d1e594d1 | ||
|
|
b4380f021e | ||
|
|
8a823d18ec | ||
|
|
7bcfc56bd8 | ||
|
|
2909d5ebaa | ||
|
|
7918494ccc | ||
|
|
92dd47b376 | ||
|
|
5474a748ce | ||
|
|
92da5adc24 | ||
|
|
e0ce1ad31a | ||
|
|
3b76877920 | ||
|
|
783355772a | ||
|
|
e5c59da90b | ||
|
|
f059c53c2d | ||
|
|
a1367b5428 | ||
|
|
9d8124ac9d | ||
|
|
5034366c72 | ||
|
|
afd8f44261 | ||
|
|
21086d2883 | ||
|
|
cca9a318a4 | ||
|
|
cb804a6025 | ||
|
|
9f849e7c18 | ||
|
|
389bceddef | ||
|
|
657838671a | ||
|
|
2f072b2578 | ||
|
|
33153d1124 | ||
|
|
2d399d5e24 | ||
|
|
b6d7249b6d | ||
|
|
2a8c1c3382 | ||
|
|
7c27162875 | ||
|
|
eef813712d | ||
|
|
2e94e08fa6 | ||
|
|
97d9475335 | ||
|
|
38820d6a3f | ||
|
|
af85fa100f | ||
|
|
008b901913 | ||
|
|
db85f68328 | ||
|
|
fb80aa4cc9 | ||
|
|
8dae57d6a7 | ||
|
|
f2ff4ba792 | ||
|
|
9bf3371873 | ||
|
|
9d853eb496 | ||
|
|
6923ceefd3 | ||
|
|
5239c2328a | ||
|
|
6a7c5384a9 | ||
|
|
7e469006c0 | ||
|
|
55091b5e94 | ||
|
|
bc1eba18c9 | ||
|
|
c128ab16ce | ||
|
|
a87125ff88 | ||
|
|
0933d711d2 | ||
|
|
79c230f2fa | ||
|
|
345d5d45ad | ||
|
|
55a26aad77 | ||
|
|
1132d441e1 | ||
|
|
fa2fdfd601 | ||
|
|
24e10d664f | ||
|
|
c0e084c1ca | ||
|
|
b6845dfc5c | ||
|
|
31def9ea81 |
14
.github/workflows/bench.yml
vendored
14
.github/workflows/bench.yml
vendored
@@ -21,17 +21,21 @@ jobs:
|
||||
|
||||
- name: Build Docker Image
|
||||
run: |
|
||||
docker build -t tlsn-bench . -f ./crates/benches/binary/benches.Dockerfile --build-arg BENCH_TYPE=${{ github.event.inputs.bench_type }}
|
||||
docker build -t tlsn-bench . -f ./crates/harness/harness.Dockerfile
|
||||
|
||||
- name: Run Benchmarks
|
||||
run: |
|
||||
docker run --privileged -v ${{ github.workspace }}/crates/benches/binary:/benches tlsn-bench
|
||||
docker run --privileged -v ./crates/harness/:/benches tlsn-bench bash -c "runner setup; runner --target ${{ github.event.inputs.bench_type }} bench"
|
||||
|
||||
- name: Plot Benchmarks
|
||||
run: |
|
||||
docker run -v ./crates/harness/:/benches tlsn-bench bash -c "tlsn-harness-plot /benches/bench.toml /benches/metrics.csv --min-max-band --prover-kind ${{ github.event.inputs.bench_type }}"
|
||||
- name: Upload graphs
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: benchmark_graphs
|
||||
path: |
|
||||
./crates/benches/binary/runtime_vs_latency.html
|
||||
./crates/benches/binary/runtime_vs_bandwidth.html
|
||||
./crates/benches/binary/download_size_vs_memory.html
|
||||
./crates/harness/metrics.csv
|
||||
./crates/harness/bench.toml
|
||||
./crates/harness/runtime_vs_latency.html
|
||||
./crates/harness/runtime_vs_bandwidth.html
|
||||
239
.github/workflows/ci.yml
vendored
239
.github/workflows/ci.yml
vendored
@@ -11,7 +11,6 @@ on:
|
||||
permissions:
|
||||
id-token: write
|
||||
contents: read
|
||||
attestations: write
|
||||
|
||||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
@@ -19,11 +18,10 @@ env:
|
||||
# We need a higher number of parallel rayon tasks than the default (which is 4)
|
||||
# in order to prevent a deadlock, c.f.
|
||||
# - https://github.com/tlsnotary/tlsn/issues/548
|
||||
# - https://github.com/privacy-scaling-explorations/mpz/issues/178
|
||||
# - https://github.com/privacy-ethereum/mpz/issues/178
|
||||
# 32 seems to be big enough for the foreseeable future
|
||||
RAYON_NUM_THREADS: 32
|
||||
GIT_COMMIT_HASH: ${{ github.event.pull_request.head.sha || github.sha }}
|
||||
RUST_VERSION: 1.87.0
|
||||
RUST_VERSION: 1.90.0
|
||||
|
||||
jobs:
|
||||
clippy:
|
||||
@@ -34,7 +32,7 @@ jobs:
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Install rust toolchain
|
||||
uses: dtolnay/rust-toolchain@stable
|
||||
uses: dtolnay/rust-toolchain@master
|
||||
with:
|
||||
toolchain: ${{ env.RUST_VERSION }}
|
||||
components: clippy
|
||||
@@ -43,7 +41,7 @@ jobs:
|
||||
uses: Swatinem/rust-cache@v2.7.7
|
||||
|
||||
- name: Clippy
|
||||
run: cargo clippy --keep-going --all-features --all-targets -- -D warnings
|
||||
run: cargo clippy --keep-going --all-features --all-targets --locked
|
||||
|
||||
fmt:
|
||||
name: Check formatting
|
||||
@@ -81,10 +79,10 @@ jobs:
|
||||
uses: Swatinem/rust-cache@v2.7.7
|
||||
|
||||
- name: Build
|
||||
run: cargo build --all-targets
|
||||
run: cargo build --all-targets --locked
|
||||
|
||||
- name: Test
|
||||
run: cargo test --no-fail-fast
|
||||
run: cargo test --no-fail-fast --locked
|
||||
|
||||
wasm:
|
||||
name: Build and Test wasm
|
||||
@@ -112,25 +110,29 @@ jobs:
|
||||
sudo apt-get install -y chromium-chromedriver
|
||||
|
||||
- name: Install wasm-pack
|
||||
run: curl https://rustwasm.github.io/wasm-pack/installer/init.sh -sSf | sh
|
||||
# we install a specific version which supports custom profiles
|
||||
run: cargo install --git https://github.com/rustwasm/wasm-pack.git --rev 32e52ca
|
||||
|
||||
- name: Use caching
|
||||
uses: Swatinem/rust-cache@v2.7.7
|
||||
|
||||
- name: Build harness
|
||||
working-directory: crates/harness
|
||||
run: ./build.sh
|
||||
|
||||
- name: Run tests
|
||||
working-directory: crates/harness
|
||||
run: |
|
||||
cd crates/wasm-test-runner
|
||||
./run.sh
|
||||
./bin/runner setup
|
||||
./bin/runner --target browser test
|
||||
|
||||
- name: Run build
|
||||
run: |
|
||||
cd crates/wasm
|
||||
./build.sh
|
||||
working-directory: crates/wasm
|
||||
run: ./build.sh
|
||||
|
||||
- name: Dry Run NPM Publish
|
||||
run: |
|
||||
cd crates/wasm/pkg
|
||||
npm publish --dry-run
|
||||
working-directory: crates/wasm/pkg
|
||||
run: npm publish --dry-run
|
||||
|
||||
- name: Save tlsn-wasm package for tagged builds
|
||||
if: startsWith(github.ref, 'refs/tags/')
|
||||
@@ -155,11 +157,8 @@ jobs:
|
||||
- name: Use caching
|
||||
uses: Swatinem/rust-cache@v2.7.7
|
||||
|
||||
- name: Add custom DNS entry to /etc/hosts for notary TLS test
|
||||
run: echo "127.0.0.1 tlsnotaryserver.io" | sudo tee -a /etc/hosts
|
||||
|
||||
- name: Run integration tests
|
||||
run: cargo test --profile tests-integration --workspace --exclude tlsn-tls-client --exclude tlsn-tls-core --no-fail-fast -- --include-ignored
|
||||
run: cargo test --locked --profile tests-integration --workspace --exclude tlsn-tls-client --exclude tlsn-tls-core --no-fail-fast -- --include-ignored
|
||||
|
||||
coverage:
|
||||
runs-on: ubuntu-latest
|
||||
@@ -174,7 +173,7 @@ jobs:
|
||||
- name: Install cargo-llvm-cov
|
||||
uses: taiki-e/install-action@cargo-llvm-cov
|
||||
- name: Generate code coverage
|
||||
run: cargo llvm-cov --all-features --workspace --lcov --output-path lcov.info
|
||||
run: cargo llvm-cov --all-features --workspace --locked --lcov --output-path lcov.info
|
||||
- name: Upload coverage to Codecov
|
||||
uses: codecov/codecov-action@v4
|
||||
with:
|
||||
@@ -182,201 +181,9 @@ jobs:
|
||||
files: lcov.info
|
||||
fail_ci_if_error: true
|
||||
|
||||
build-sgx:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build-and-test
|
||||
container:
|
||||
image: rust:latest
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Install Clang
|
||||
run: |
|
||||
apt update
|
||||
apt install -y clang
|
||||
|
||||
- name: Use caching
|
||||
uses: Swatinem/rust-cache@v2.7.7
|
||||
|
||||
- name: Build Rust Binary
|
||||
run: |
|
||||
cargo build --bin notary-server --release --features tee_quote
|
||||
cp --verbose target/release/notary-server $GITHUB_WORKSPACE
|
||||
|
||||
- name: Upload Binary for use in the Gramine Job
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: notary-server
|
||||
path: notary-server
|
||||
if-no-files-found: error
|
||||
|
||||
gramine-sgx:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build-sgx
|
||||
container:
|
||||
image: gramineproject/gramine:latest
|
||||
if: github.ref == 'refs/heads/dev' || (startsWith(github.ref, 'refs/tags/v') && contains(github.ref, '.'))
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Restore SGX signing key from secrets
|
||||
run: |
|
||||
mkdir -p "${HOME}/.config/gramine/"
|
||||
echo "${{ secrets.SGX_SIGNING_KEY }}" > "${HOME}/.config/gramine/enclave-key.pem"
|
||||
# verify key
|
||||
openssl rsa -in "${HOME}/.config/gramine/enclave-key.pem" -check -noout
|
||||
|
||||
- name: Download notary-server binary from build job
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: notary-server
|
||||
path: crates/notary/server/tee
|
||||
|
||||
- name: Install jq
|
||||
run: |
|
||||
apt update
|
||||
apt install -y jq
|
||||
|
||||
- name: Use Gramine to calculate measurements
|
||||
run: |
|
||||
cd crates/notary/server/tee
|
||||
|
||||
chmod +x notary-server
|
||||
|
||||
gramine-manifest \
|
||||
-Dlog_level=debug \
|
||||
-Darch_libdir=/lib/x86_64-linux-gnu \
|
||||
-Dself_exe=notary-server \
|
||||
notary-server.manifest.template \
|
||||
notary-server.manifest
|
||||
|
||||
gramine-sgx-sign \
|
||||
--manifest notary-server.manifest \
|
||||
--output notary-server.manifest.sgx
|
||||
|
||||
gramine-sgx-sigstruct-view --verbose --output-format=json notary-server.sig | tee >> notary-server-sigstruct.json
|
||||
|
||||
cat notary-server-sigstruct.json
|
||||
|
||||
mr_enclave=$(jq -r '.mr_enclave' notary-server-sigstruct.json)
|
||||
mr_signer=$(jq -r '.mr_signer' notary-server-sigstruct.json)
|
||||
|
||||
echo "mrenclave=$mr_enclave" >>"$GITHUB_OUTPUT"
|
||||
echo "#### sgx mrenclave" | tee >>$GITHUB_STEP_SUMMARY
|
||||
echo "\`\`\`mr_enclave: ${mr_enclave}\`\`\`" | tee >>$GITHUB_STEP_SUMMARY
|
||||
echo "\`\`\`mr_signer: ${mr_signer}\`\`\`" | tee >>$GITHUB_STEP_SUMMARY
|
||||
|
||||
- name: Upload notary-server and signatures
|
||||
id: upload-notary-server-sgx
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: notary-server-sgx.zip
|
||||
path: |
|
||||
crates/notary/server/tee/notary-server
|
||||
crates/notary/server/tee/notary-server-sigstruct.json
|
||||
crates/notary/server/tee/notary-server.sig
|
||||
crates/notary/server/tee/notary-server.manifest
|
||||
crates/notary/server/tee/notary-server.manifest.sgx
|
||||
crates/notary/server/tee/README.md
|
||||
if-no-files-found: error
|
||||
|
||||
- name: Attest Build Provenance
|
||||
if: startsWith(github.ref, 'refs/tags/') || github.ref == 'refs/heads/dev'
|
||||
uses: actions/attest-build-provenance@v2
|
||||
with:
|
||||
subject-name: notary-server-sgx.zip
|
||||
subject-digest: sha256:${{ steps.upload-notary-server-sgx.outputs.artifact-digest }}
|
||||
|
||||
- uses: geekyeggo/delete-artifact@v5 # Delete notary-server from the build job, It is part of the zipfile with the signature
|
||||
with:
|
||||
name: notary-server
|
||||
|
||||
gramine-sgx-docker:
|
||||
runs-on: ubuntu-latest
|
||||
needs: gramine-sgx
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
env:
|
||||
CONTAINER_REGISTRY: ghcr.io
|
||||
if: github.ref == 'refs/heads/dev' || (startsWith(github.ref, 'refs/tags/v') && contains(github.ref, '.'))
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
sparse-checkout: './crates/notary/server/tee/notary-server-sgx.Dockerfile'
|
||||
|
||||
- name: Download notary-server-sgx.zip from gramine-sgx job
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: notary-server-sgx.zip
|
||||
path: ./notary-server-sgx
|
||||
|
||||
- name: Log in to the Container registry
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: ${{ env.CONTAINER_REGISTRY }}
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Extract metadata (tags, labels) for Docker image of notary server
|
||||
id: meta-notary-server-sgx
|
||||
uses: docker/metadata-action@v4
|
||||
with:
|
||||
images: ${{ env.CONTAINER_REGISTRY }}/${{ github.repository }}/notary-server-sgx
|
||||
|
||||
- name: Build and push Docker image of notary server
|
||||
uses: docker/build-push-action@v4
|
||||
with:
|
||||
context: .
|
||||
push: true
|
||||
tags: ${{ steps.meta-notary-server-sgx.outputs.tags }}
|
||||
labels: ${{ steps.meta-notary-server-sgx.outputs.labels }}
|
||||
file: ./crates/notary/server/tee/notary-server-sgx.Dockerfile
|
||||
|
||||
build_and_publish_notary_server_image:
|
||||
name: Build and publish notary server's image
|
||||
runs-on: ubuntu-latest
|
||||
needs: build-and-test
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
env:
|
||||
CONTAINER_REGISTRY: ghcr.io
|
||||
if: github.ref == 'refs/heads/dev' || (startsWith(github.ref, 'refs/tags/v') && contains(github.ref, '.'))
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Log in to the Container registry
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: ${{ env.CONTAINER_REGISTRY }}
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Extract metadata (tags, labels) for Docker image of notary server
|
||||
id: meta-notary-server
|
||||
uses: docker/metadata-action@v4
|
||||
with:
|
||||
images: ${{ env.CONTAINER_REGISTRY }}/${{ github.repository }}/notary-server
|
||||
|
||||
- name: Build and push Docker image of notary server
|
||||
uses: docker/build-push-action@v4
|
||||
with:
|
||||
context: .
|
||||
push: true
|
||||
tags: ${{ steps.meta-notary-server.outputs.tags }}
|
||||
labels: ${{ steps.meta-notary-server.outputs.labels }}
|
||||
file: ./crates/notary/server/notary-server.Dockerfile
|
||||
|
||||
create-release-draft:
|
||||
name: Create Release Draft
|
||||
needs: build_and_publish_notary_server_image
|
||||
needs: build-and-test
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: write
|
||||
@@ -391,4 +198,4 @@ jobs:
|
||||
draft: true
|
||||
tag_name: ${{ github.ref_name }}
|
||||
prerelease: true
|
||||
generate_release_notes: true
|
||||
generate_release_notes: true
|
||||
|
||||
4
.github/workflows/releng.yml
vendored
4
.github/workflows/releng.yml
vendored
@@ -6,7 +6,7 @@ on:
|
||||
tag:
|
||||
description: 'Tag to publish to NPM'
|
||||
required: true
|
||||
default: 'v0.1.0-alpha.11'
|
||||
default: 'v0.1.0-alpha.13'
|
||||
|
||||
jobs:
|
||||
release:
|
||||
@@ -22,7 +22,7 @@ jobs:
|
||||
RUN_ID=$(gh api \
|
||||
-H "Accept: application/vnd.github+json" \
|
||||
"/repos/tlsnotary/tlsn/actions/workflows/ci.yml/runs?per_page=100" \
|
||||
--jq '.workflow_runs[] | select(.head_branch == "${{ github.event.inputs.tag }}") | .id')
|
||||
--jq '.workflow_runs[] | select(.head_branch == "${{ github.event.inputs.tag }}") | .id' | sort | tail -1)
|
||||
|
||||
if [ -z "$RUN_ID" ]; then
|
||||
echo "No run found for tag ${{ github.event.inputs.tag }}"
|
||||
|
||||
1
.github/workflows/rustdoc.yml
vendored
1
.github/workflows/rustdoc.yml
vendored
@@ -23,7 +23,6 @@ jobs:
|
||||
- name: "rustdoc"
|
||||
run: crates/wasm/build-docs.sh
|
||||
|
||||
|
||||
- name: Deploy
|
||||
uses: peaceiris/actions-gh-pages@v3
|
||||
if: ${{ github.ref == 'refs/heads/dev' }}
|
||||
|
||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -29,6 +29,3 @@ target/
|
||||
|
||||
# metrics
|
||||
*.csv
|
||||
|
||||
# Cargo.lock
|
||||
Cargo.lock
|
||||
|
||||
@@ -61,3 +61,21 @@ Comments for function arguments must adhere to this pattern:
|
||||
/// * `arg2` - The second argument.
|
||||
pub fn compute(...
|
||||
```
|
||||
|
||||
## Cargo.lock
|
||||
|
||||
We check in `Cargo.lock` to ensure reproducible builds. It must be updated whenever `Cargo.toml` changes. The TLSNotary team typically updates `Cargo.lock` in a separate commit after dependency changes.
|
||||
|
||||
If you want to hide `Cargo.lock` changes from your local `git diff`, run:
|
||||
|
||||
```sh
|
||||
git update-index --assume-unchanged Cargo.lock
|
||||
```
|
||||
|
||||
To start tracking changes again:
|
||||
```sh
|
||||
git update-index --no-assume-unchanged Cargo.lock
|
||||
```
|
||||
|
||||
> ⚠️ Note: This only affects your local view. The file is still tracked in the repository and will be checked and used in CI.
|
||||
|
||||
|
||||
5423
Cargo.lock
generated
5423
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
102
Cargo.toml
102
Cargo.toml
@@ -1,11 +1,6 @@
|
||||
[workspace]
|
||||
members = [
|
||||
"crates/benches/binary",
|
||||
"crates/benches/browser/core",
|
||||
"crates/benches/browser/native",
|
||||
"crates/benches/browser/wasm",
|
||||
"crates/benches/library",
|
||||
"crates/common",
|
||||
"crates/attestation",
|
||||
"crates/components/deap",
|
||||
"crates/components/cipher",
|
||||
"crates/components/hmac-sha256",
|
||||
@@ -14,23 +9,20 @@ members = [
|
||||
"crates/data-fixtures",
|
||||
"crates/examples",
|
||||
"crates/formats",
|
||||
"crates/notary/client",
|
||||
"crates/notary/common",
|
||||
"crates/notary/server",
|
||||
"crates/notary/tests-integration",
|
||||
"crates/prover",
|
||||
"crates/server-fixture/certs",
|
||||
"crates/server-fixture/server",
|
||||
"crates/tests-integration",
|
||||
"crates/tls/backend",
|
||||
"crates/tls/client",
|
||||
"crates/tls/client-async",
|
||||
"crates/tls/core",
|
||||
"crates/mpc-tls",
|
||||
"crates/tls/server-fixture",
|
||||
"crates/verifier",
|
||||
"crates/wasm",
|
||||
"crates/wasm-test-runner",
|
||||
"crates/harness/core",
|
||||
"crates/harness/executor",
|
||||
"crates/harness/runner",
|
||||
"crates/harness/plot",
|
||||
"crates/tlsn",
|
||||
]
|
||||
resolver = "2"
|
||||
|
||||
@@ -44,16 +36,16 @@ resolver = "2"
|
||||
inherits = "release"
|
||||
opt-level = 1
|
||||
|
||||
[profile.wasm]
|
||||
inherits = "release"
|
||||
lto = true
|
||||
panic = "abort"
|
||||
codegen-units = 1
|
||||
|
||||
[workspace.dependencies]
|
||||
notary-client = { path = "crates/notary/client" }
|
||||
notary-common = { path = "crates/notary/common" }
|
||||
notary-server = { path = "crates/notary/server" }
|
||||
tls-server-fixture = { path = "crates/tls/server-fixture" }
|
||||
tlsn-attestation = { path = "crates/attestation" }
|
||||
tlsn-cipher = { path = "crates/components/cipher" }
|
||||
tlsn-benches-browser-core = { path = "crates/benches/browser/core" }
|
||||
tlsn-benches-browser-native = { path = "crates/benches/browser/native" }
|
||||
tlsn-benches-library = { path = "crates/benches/library" }
|
||||
tlsn-common = { path = "crates/common" }
|
||||
tlsn-core = { path = "crates/core" }
|
||||
tlsn-data-fixtures = { path = "crates/data-fixtures" }
|
||||
tlsn-deap = { path = "crates/components/deap" }
|
||||
@@ -61,7 +53,6 @@ tlsn-formats = { path = "crates/formats" }
|
||||
tlsn-hmac-sha256 = { path = "crates/components/hmac-sha256" }
|
||||
tlsn-key-exchange = { path = "crates/components/key-exchange" }
|
||||
tlsn-mpc-tls = { path = "crates/mpc-tls" }
|
||||
tlsn-prover = { path = "crates/prover" }
|
||||
tlsn-server-fixture = { path = "crates/server-fixture/server" }
|
||||
tlsn-server-fixture-certs = { path = "crates/server-fixture/certs" }
|
||||
tlsn-tls-backend = { path = "crates/tls/backend" }
|
||||
@@ -69,21 +60,26 @@ tlsn-tls-client = { path = "crates/tls/client" }
|
||||
tlsn-tls-client-async = { path = "crates/tls/client-async" }
|
||||
tlsn-tls-core = { path = "crates/tls/core" }
|
||||
tlsn-utils = { git = "https://github.com/tlsnotary/tlsn-utils", rev = "6168663" }
|
||||
tlsn-verifier = { path = "crates/verifier" }
|
||||
tlsn-harness-core = { path = "crates/harness/core" }
|
||||
tlsn-harness-executor = { path = "crates/harness/executor" }
|
||||
tlsn-harness-runner = { path = "crates/harness/runner" }
|
||||
tlsn-wasm = { path = "crates/wasm" }
|
||||
tlsn = { path = "crates/tlsn" }
|
||||
|
||||
mpz-circuits = { git = "https://github.com/privacy-scaling-explorations/mpz", tag = "v0.1.0-alpha.3" }
|
||||
mpz-memory-core = { git = "https://github.com/privacy-scaling-explorations/mpz", tag = "v0.1.0-alpha.3" }
|
||||
mpz-common = { git = "https://github.com/privacy-scaling-explorations/mpz", tag = "v0.1.0-alpha.3" }
|
||||
mpz-core = { git = "https://github.com/privacy-scaling-explorations/mpz", tag = "v0.1.0-alpha.3" }
|
||||
mpz-vm-core = { git = "https://github.com/privacy-scaling-explorations/mpz", tag = "v0.1.0-alpha.3" }
|
||||
mpz-garble = { git = "https://github.com/privacy-scaling-explorations/mpz", tag = "v0.1.0-alpha.3" }
|
||||
mpz-garble-core = { git = "https://github.com/privacy-scaling-explorations/mpz", tag = "v0.1.0-alpha.3" }
|
||||
mpz-ole = { git = "https://github.com/privacy-scaling-explorations/mpz", tag = "v0.1.0-alpha.3" }
|
||||
mpz-ot = { git = "https://github.com/privacy-scaling-explorations/mpz", tag = "v0.1.0-alpha.3" }
|
||||
mpz-share-conversion = { git = "https://github.com/privacy-scaling-explorations/mpz", tag = "v0.1.0-alpha.3" }
|
||||
mpz-fields = { git = "https://github.com/privacy-scaling-explorations/mpz", tag = "v0.1.0-alpha.3" }
|
||||
mpz-zk = { git = "https://github.com/privacy-scaling-explorations/mpz", tag = "v0.1.0-alpha.3" }
|
||||
mpz-hash = { git = "https://github.com/privacy-scaling-explorations/mpz", tag = "v0.1.0-alpha.3" }
|
||||
mpz-circuits = { git = "https://github.com/privacy-ethereum/mpz", tag = "v0.1.0-alpha.4" }
|
||||
mpz-memory-core = { git = "https://github.com/privacy-ethereum/mpz", tag = "v0.1.0-alpha.4" }
|
||||
mpz-common = { git = "https://github.com/privacy-ethereum/mpz", tag = "v0.1.0-alpha.4" }
|
||||
mpz-core = { git = "https://github.com/privacy-ethereum/mpz", tag = "v0.1.0-alpha.4" }
|
||||
mpz-vm-core = { git = "https://github.com/privacy-ethereum/mpz", tag = "v0.1.0-alpha.4" }
|
||||
mpz-garble = { git = "https://github.com/privacy-ethereum/mpz", tag = "v0.1.0-alpha.4" }
|
||||
mpz-garble-core = { git = "https://github.com/privacy-ethereum/mpz", tag = "v0.1.0-alpha.4" }
|
||||
mpz-ole = { git = "https://github.com/privacy-ethereum/mpz", tag = "v0.1.0-alpha.4" }
|
||||
mpz-ot = { git = "https://github.com/privacy-ethereum/mpz", tag = "v0.1.0-alpha.4" }
|
||||
mpz-share-conversion = { git = "https://github.com/privacy-ethereum/mpz", tag = "v0.1.0-alpha.4" }
|
||||
mpz-fields = { git = "https://github.com/privacy-ethereum/mpz", tag = "v0.1.0-alpha.4" }
|
||||
mpz-zk = { git = "https://github.com/privacy-ethereum/mpz", tag = "v0.1.0-alpha.4" }
|
||||
mpz-hash = { git = "https://github.com/privacy-ethereum/mpz", tag = "v0.1.0-alpha.4" }
|
||||
mpz-ideal-vm = { git = "https://github.com/privacy-ethereum/mpz", tag = "v0.1.0-alpha.4" }
|
||||
|
||||
rangeset = { version = "0.2" }
|
||||
serio = { version = "0.2" }
|
||||
@@ -91,18 +87,22 @@ spansy = { git = "https://github.com/tlsnotary/tlsn-utils", rev = "6168663" }
|
||||
uid-mux = { version = "0.2" }
|
||||
websocket-relay = { git = "https://github.com/tlsnotary/tlsn-utils", rev = "6168663" }
|
||||
|
||||
aead = { version = "0.4" }
|
||||
aes = { version = "0.8" }
|
||||
aes-gcm = { version = "0.9" }
|
||||
anyhow = { version = "1.0" }
|
||||
async-trait = { version = "0.1" }
|
||||
async-tungstenite = { version = "0.28.2" }
|
||||
axum = { version = "0.8" }
|
||||
bcs = { version = "0.1" }
|
||||
bincode = { version = "1.3" }
|
||||
blake3 = { version = "1.5" }
|
||||
bon = { version = "3.6" }
|
||||
bytes = { version = "1.4" }
|
||||
cfg-if = { version = "1" }
|
||||
chromiumoxide = { version = "0.7" }
|
||||
chrono = { version = "0.4" }
|
||||
cipher = { version = "0.4" }
|
||||
clap = { version = "4.5" }
|
||||
criterion = { version = "0.5" }
|
||||
ctr = { version = "0.9" }
|
||||
derive_builder = { version = "0.12" }
|
||||
@@ -111,23 +111,25 @@ elliptic-curve = { version = "0.13" }
|
||||
enum-try-as-inner = { version = "0.1" }
|
||||
env_logger = { version = "0.10" }
|
||||
futures = { version = "0.3" }
|
||||
futures-rustls = { version = "0.26" }
|
||||
futures-util = { version = "0.3" }
|
||||
futures-rustls = { version = "0.25" }
|
||||
generic-array = { version = "0.14" }
|
||||
ghash = { version = "0.5" }
|
||||
hex = { version = "0.4" }
|
||||
hmac = { version = "0.12" }
|
||||
http = { version = "1.1" }
|
||||
http-body-util = { version = "0.1" }
|
||||
hyper = { version = "1.1" }
|
||||
hyper-util = { version = "0.1" }
|
||||
ipnet = { version = "2.11" }
|
||||
inventory = { version = "0.3" }
|
||||
itybity = { version = "0.2" }
|
||||
js-sys = { version = "0.3" }
|
||||
k256 = { version = "0.13" }
|
||||
log = { version = "0.4" }
|
||||
once_cell = { version = "1.19" }
|
||||
opaque-debug = { version = "0.3" }
|
||||
p256 = { version = "0.13" }
|
||||
pkcs8 = { version = "0.10" }
|
||||
pin-project-lite = { version = "0.2" }
|
||||
pollster = { version = "0.4" }
|
||||
rand = { version = "0.9" }
|
||||
rand_chacha = { version = "0.9" }
|
||||
rand_core = { version = "0.9" }
|
||||
@@ -139,21 +141,29 @@ rs_merkle = { git = "https://github.com/tlsnotary/rs-merkle.git", rev = "85f3e82
|
||||
rstest = { version = "0.17" }
|
||||
rustls = { version = "0.21" }
|
||||
rustls-pemfile = { version = "1.0" }
|
||||
rustls-webpki = { version = "0.103" }
|
||||
rustls-pki-types = { version = "1.12" }
|
||||
sct = { version = "0.7" }
|
||||
semver = { version = "1.0" }
|
||||
serde = { version = "1.0" }
|
||||
serde_json = { version = "1.0" }
|
||||
sha2 = { version = "0.10" }
|
||||
signature = { version = "2.2" }
|
||||
thiserror = { version = "1.0" }
|
||||
tiny-keccak = { version = "2.0" }
|
||||
tokio = { version = "1.38" }
|
||||
tokio-rustls = { version = "0.24" }
|
||||
tokio-util = { version = "0.7" }
|
||||
toml = { version = "0.8" }
|
||||
tower = { version = "0.5" }
|
||||
tower-http = { version = "0.5" }
|
||||
tower-service = { version = "0.3" }
|
||||
tracing = { version = "0.1" }
|
||||
tracing-subscriber = { version = "0.3" }
|
||||
uuid = { version = "1.4" }
|
||||
wasm-bindgen = { version = "0.2" }
|
||||
wasm-bindgen-futures = { version = "0.4" }
|
||||
web-spawn = { version = "0.2" }
|
||||
web-time = { version = "0.2" }
|
||||
webpki = { version = "0.22" }
|
||||
webpki-roots = { version = "0.26" }
|
||||
ws_stream_tungstenite = { version = "0.14" }
|
||||
webpki-roots = { version = "1.0" }
|
||||
webpki-root-certs = { version = "1.0" }
|
||||
ws_stream_wasm = { version = "0.7.5" }
|
||||
zeroize = { version = "1.8" }
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
[actions-url]: https://github.com/tlsnotary/tlsn/actions?query=workflow%3Aci+branch%3Adev
|
||||
|
||||
[Website](https://tlsnotary.org) |
|
||||
[Documentation](https://docs.tlsnotary.org) |
|
||||
[Documentation](https://tlsnotary.org/docs/intro) |
|
||||
[API Docs](https://tlsnotary.github.io/tlsn) |
|
||||
[Discord](https://discord.gg/9XwESXtcN7)
|
||||
|
||||
@@ -44,12 +44,9 @@ at your option.
|
||||
## Directory
|
||||
|
||||
- [examples](./crates/examples/): Examples on how to use the TLSNotary protocol.
|
||||
- [tlsn-prover](./crates/prover/): The library for the prover component.
|
||||
- [tlsn-verifier](./crates/verifier/): The library for the verifier component.
|
||||
- [notary](./crates/notary/): Implements the [notary server](https://docs.tlsnotary.org/intro.html#tls-verification-with-a-general-purpose-notary) and its client.
|
||||
- [components](./crates/components/): Houses low-level libraries.
|
||||
- [tlsn](./crates/tlsn/): The TLSNotary library.
|
||||
|
||||
This repository contains the source code for the Rust implementation of the TLSNotary protocol. For additional tools and implementations related to TLSNotary, visit <https://github.com/tlsnotary>. This includes repositories such as [`tlsn-js`](https://github.com/tlsnotary/tlsn-js), [`tlsn-extension`](https://github.com/tlsnotary/tlsn-extension), [`explorer`](https://github.com/tlsnotary/explorer), among others.
|
||||
This repository contains the source code for the Rust implementation of the TLSNotary protocol. For additional tools and implementations related to TLSNotary, visit <https://github.com/tlsnotary>. This includes repositories such as [`tlsn-js`](https://github.com/tlsnotary/tlsn-js), [`tlsn-extension`](https://github.com/tlsnotary/tlsn-extension), among others.
|
||||
|
||||
|
||||
## Development
|
||||
|
||||
39
crates/attestation/Cargo.toml
Normal file
39
crates/attestation/Cargo.toml
Normal file
@@ -0,0 +1,39 @@
|
||||
[package]
|
||||
name = "tlsn-attestation"
|
||||
version = "0.1.0-alpha.13"
|
||||
edition = "2024"
|
||||
|
||||
[features]
|
||||
default = []
|
||||
fixtures = ["tlsn-core/fixtures", "dep:tlsn-data-fixtures"]
|
||||
|
||||
[dependencies]
|
||||
tlsn-tls-core = { workspace = true }
|
||||
tlsn-core = { workspace = true }
|
||||
tlsn-data-fixtures = { workspace = true, optional = true }
|
||||
|
||||
bcs = { workspace = true }
|
||||
blake3 = { workspace = true }
|
||||
p256 = { workspace = true, features = ["serde"] }
|
||||
k256 = { workspace = true }
|
||||
opaque-debug = { workspace = true }
|
||||
rand = { workspace = true }
|
||||
serde = { workspace = true, features = ["derive"] }
|
||||
thiserror = { workspace = true }
|
||||
tiny-keccak = { workspace = true, features = ["keccak"] }
|
||||
|
||||
[dev-dependencies]
|
||||
alloy-primitives = { version = "1.3.1", default-features = false }
|
||||
alloy-signer = { version = "1.0", default-features = false }
|
||||
alloy-signer-local = { version = "1.0", default-features = false }
|
||||
rand06-compat = { workspace = true }
|
||||
rstest = { workspace = true }
|
||||
tlsn-core = { workspace = true, features = ["fixtures"] }
|
||||
tlsn-data-fixtures = { workspace = true }
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
[[test]]
|
||||
name = "api"
|
||||
required-features = ["fixtures"]
|
||||
@@ -1,19 +1,17 @@
|
||||
use std::error::Error;
|
||||
|
||||
use rand::{rng, Rng};
|
||||
use rand::{Rng, rng};
|
||||
|
||||
use crate::{
|
||||
attestation::{
|
||||
Attestation, AttestationConfig, Body, Extension, FieldId, Header, ServerCertCommitment,
|
||||
VERSION,
|
||||
},
|
||||
use tlsn_core::{
|
||||
connection::{ConnectionInfo, ServerEphemKey},
|
||||
hash::HashAlgId,
|
||||
request::Request,
|
||||
serialize::CanonicalSerialize,
|
||||
transcript::{TranscriptCommitment, encoding::EncoderSecret},
|
||||
};
|
||||
|
||||
use crate::{
|
||||
Attestation, AttestationConfig, Body, CryptoProvider, Extension, FieldId, Header,
|
||||
ServerCertCommitment, VERSION, request::Request, serialize::CanonicalSerialize,
|
||||
signing::SignatureAlgId,
|
||||
transcript::TranscriptCommitment,
|
||||
CryptoProvider,
|
||||
};
|
||||
|
||||
/// Attestation builder state for accepting a request.
|
||||
@@ -27,6 +25,7 @@ pub struct Sign {
|
||||
connection_info: Option<ConnectionInfo>,
|
||||
server_ephemeral_key: Option<ServerEphemKey>,
|
||||
cert_commitment: ServerCertCommitment,
|
||||
encoder_secret: Option<EncoderSecret>,
|
||||
extensions: Vec<Extension>,
|
||||
transcript_commitments: Vec<TranscriptCommitment>,
|
||||
}
|
||||
@@ -88,6 +87,7 @@ impl<'a> AttestationBuilder<'a, Accept> {
|
||||
connection_info: None,
|
||||
server_ephemeral_key: None,
|
||||
cert_commitment,
|
||||
encoder_secret: None,
|
||||
transcript_commitments: Vec::new(),
|
||||
extensions,
|
||||
},
|
||||
@@ -108,6 +108,12 @@ impl AttestationBuilder<'_, Sign> {
|
||||
self
|
||||
}
|
||||
|
||||
/// Sets the secret for encoding commitments.
|
||||
pub fn encoder_secret(&mut self, secret: EncoderSecret) -> &mut Self {
|
||||
self.state.encoder_secret = Some(secret);
|
||||
self
|
||||
}
|
||||
|
||||
/// Adds an extension to the attestation.
|
||||
pub fn extension(&mut self, extension: Extension) -> &mut Self {
|
||||
self.state.extensions.push(extension);
|
||||
@@ -131,6 +137,7 @@ impl AttestationBuilder<'_, Sign> {
|
||||
connection_info,
|
||||
server_ephemeral_key,
|
||||
cert_commitment,
|
||||
encoder_secret,
|
||||
extensions,
|
||||
transcript_commitments,
|
||||
} = self.state;
|
||||
@@ -161,6 +168,7 @@ impl AttestationBuilder<'_, Sign> {
|
||||
AttestationBuilderError::new(ErrorKind::Field, "handshake data was not set")
|
||||
})?),
|
||||
cert_commitment: field_id.next(cert_commitment),
|
||||
encoder_secret: encoder_secret.map(|secret| field_id.next(secret)),
|
||||
extensions: extensions
|
||||
.into_iter()
|
||||
.map(|extension| field_id.next(extension))
|
||||
@@ -233,7 +241,7 @@ impl std::fmt::Display for AttestationBuilderError {
|
||||
}
|
||||
|
||||
if let Some(source) = &self.source {
|
||||
write!(f, " caused by: {}", source)?;
|
||||
write!(f, " caused by: {source}")?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
@@ -243,14 +251,15 @@ impl std::fmt::Display for AttestationBuilderError {
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use rstest::{fixture, rstest};
|
||||
use tlsn_data_fixtures::http::{request::GET_WITH_HEADER, response::OK_JSON};
|
||||
|
||||
use crate::{
|
||||
connection::{HandshakeData, HandshakeDataV1_2},
|
||||
fixtures::{encoding_provider, request_fixture, ConnectionFixture, RequestFixture},
|
||||
use tlsn_core::{
|
||||
connection::{CertBinding, CertBindingV1_2},
|
||||
fixtures::{ConnectionFixture, encoding_provider},
|
||||
hash::Blake3,
|
||||
transcript::Transcript,
|
||||
};
|
||||
use tlsn_data_fixtures::http::{request::GET_WITH_HEADER, response::OK_JSON};
|
||||
|
||||
use crate::fixtures::{RequestFixture, request_fixture};
|
||||
|
||||
use super::*;
|
||||
|
||||
@@ -400,10 +409,13 @@ mod test {
|
||||
server_cert_data, ..
|
||||
} = connection;
|
||||
|
||||
let HandshakeData::V1_2(HandshakeDataV1_2 {
|
||||
let CertBinding::V1_2(CertBindingV1_2 {
|
||||
server_ephemeral_key,
|
||||
..
|
||||
}) = server_cert_data.handshake;
|
||||
}) = server_cert_data.binding
|
||||
else {
|
||||
panic!("expected v1.2 handshake data");
|
||||
};
|
||||
|
||||
attestation_builder.server_ephemeral_key(server_ephemeral_key);
|
||||
|
||||
@@ -468,10 +480,13 @@ mod test {
|
||||
..
|
||||
} = connection;
|
||||
|
||||
let HandshakeData::V1_2(HandshakeDataV1_2 {
|
||||
let CertBinding::V1_2(CertBindingV1_2 {
|
||||
server_ephemeral_key,
|
||||
..
|
||||
}) = server_cert_data.handshake;
|
||||
}) = server_cert_data.binding
|
||||
else {
|
||||
panic!("expected v1.2 handshake data");
|
||||
};
|
||||
|
||||
attestation_builder
|
||||
.connection_info(connection_info)
|
||||
@@ -1,9 +1,9 @@
|
||||
use std::{fmt::Debug, sync::Arc};
|
||||
|
||||
use tlsn_core::hash::HashAlgId;
|
||||
|
||||
use crate::{
|
||||
attestation::{Extension, InvalidExtension},
|
||||
hash::{HashAlgId, DEFAULT_SUPPORTED_HASH_ALGS},
|
||||
signing::SignatureAlgId,
|
||||
Extension, InvalidExtension, hash::DEFAULT_SUPPORTED_HASH_ALGS, signing::SignatureAlgId,
|
||||
};
|
||||
|
||||
type ExtensionValidator = Arc<dyn Fn(&[Extension]) -> Result<(), InvalidExtension> + Send + Sync>;
|
||||
@@ -124,7 +124,7 @@ impl AttestationConfigBuilder {
|
||||
///
|
||||
/// # Example
|
||||
/// ```
|
||||
/// # use tlsn_core::attestation::{AttestationConfig, InvalidExtension};
|
||||
/// # use tlsn_attestation::{AttestationConfig, InvalidExtension};
|
||||
/// # let mut builder = AttestationConfig::builder();
|
||||
/// builder.extension_validator(|extensions| {
|
||||
/// for extension in extensions {
|
||||
149
crates/attestation/src/connection.rs
Normal file
149
crates/attestation/src/connection.rs
Normal file
@@ -0,0 +1,149 @@
|
||||
//! Types for committing details of a connection.
|
||||
//!
|
||||
//! ## Commitment
|
||||
//!
|
||||
//! During the TLS handshake the Notary receives the Server's ephemeral public
|
||||
//! key, and this key serves as a binding commitment to the identity of the
|
||||
//! Server. The ephemeral key itself does not reveal the Server's identity, but
|
||||
//! it is bound to it via a signature created using the Server's
|
||||
//! X.509 certificate.
|
||||
//!
|
||||
//! A Prover can withhold the Server's signature and certificate chain from the
|
||||
//! Notary to improve privacy and censorship resistance.
|
||||
//!
|
||||
//! ## Proving the Server's identity
|
||||
//!
|
||||
//! A Prover can prove the Server's identity to a Verifier by sending a
|
||||
//! [`ServerIdentityProof`]. This proof contains all the information required to
|
||||
//! establish the link between the TLS connection and the Server's X.509
|
||||
//! certificate. A Verifier checks the Server's certificate against their own
|
||||
//! trust anchors, the same way a typical TLS client would.
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use tlsn_core::{
|
||||
connection::{HandshakeData, HandshakeVerificationError, ServerEphemKey, ServerName},
|
||||
hash::{Blinded, HashAlgorithm, HashProviderError, TypedHash},
|
||||
};
|
||||
|
||||
use crate::{CryptoProvider, hash::HashAlgorithmExt, serialize::impl_domain_separator};
|
||||
|
||||
/// Opens a [`ServerCertCommitment`].
|
||||
#[derive(Clone, Serialize, Deserialize)]
|
||||
pub struct ServerCertOpening(Blinded<HandshakeData>);
|
||||
|
||||
impl_domain_separator!(ServerCertOpening);
|
||||
|
||||
opaque_debug::implement!(ServerCertOpening);
|
||||
|
||||
impl ServerCertOpening {
|
||||
pub(crate) fn new(data: HandshakeData) -> Self {
|
||||
Self(Blinded::new(data))
|
||||
}
|
||||
|
||||
pub(crate) fn commit(&self, hasher: &dyn HashAlgorithm) -> ServerCertCommitment {
|
||||
ServerCertCommitment(TypedHash {
|
||||
alg: hasher.id(),
|
||||
value: hasher.hash_separated(self),
|
||||
})
|
||||
}
|
||||
|
||||
/// Returns the server identity data.
|
||||
pub fn data(&self) -> &HandshakeData {
|
||||
self.0.data()
|
||||
}
|
||||
}
|
||||
|
||||
/// Commitment to a server certificate.
|
||||
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
|
||||
pub struct ServerCertCommitment(pub(crate) TypedHash);
|
||||
|
||||
impl_domain_separator!(ServerCertCommitment);
|
||||
|
||||
/// TLS server identity proof.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct ServerIdentityProof {
|
||||
name: ServerName,
|
||||
opening: ServerCertOpening,
|
||||
}
|
||||
|
||||
impl ServerIdentityProof {
|
||||
pub(crate) fn new(name: ServerName, opening: ServerCertOpening) -> Self {
|
||||
Self { name, opening }
|
||||
}
|
||||
|
||||
/// Verifies the server identity proof.
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `provider` - Crypto provider.
|
||||
/// * `time` - The time of the connection.
|
||||
/// * `server_ephemeral_key` - The server's ephemeral key.
|
||||
/// * `commitment` - Commitment to the server certificate.
|
||||
pub fn verify_with_provider(
|
||||
self,
|
||||
provider: &CryptoProvider,
|
||||
time: u64,
|
||||
server_ephemeral_key: &ServerEphemKey,
|
||||
commitment: &ServerCertCommitment,
|
||||
) -> Result<ServerName, ServerIdentityProofError> {
|
||||
let hasher = provider.hash.get(&commitment.0.alg)?;
|
||||
|
||||
if commitment.0.value != hasher.hash_separated(&self.opening) {
|
||||
return Err(ServerIdentityProofError {
|
||||
kind: ErrorKind::Commitment,
|
||||
message: "certificate opening does not match commitment".to_string(),
|
||||
});
|
||||
}
|
||||
|
||||
// Verify certificate and identity.
|
||||
self.opening
|
||||
.data()
|
||||
.verify(&provider.cert, time, server_ephemeral_key, &self.name)?;
|
||||
|
||||
Ok(self.name)
|
||||
}
|
||||
}
|
||||
|
||||
/// Error for [`ServerIdentityProof`].
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
#[error("server identity proof error: {kind}: {message}")]
|
||||
pub struct ServerIdentityProofError {
|
||||
kind: ErrorKind,
|
||||
message: String,
|
||||
}
|
||||
|
||||
impl From<HashProviderError> for ServerIdentityProofError {
|
||||
fn from(err: HashProviderError) -> Self {
|
||||
Self {
|
||||
kind: ErrorKind::Provider,
|
||||
message: err.to_string(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<HandshakeVerificationError> for ServerIdentityProofError {
|
||||
fn from(err: HandshakeVerificationError) -> Self {
|
||||
Self {
|
||||
kind: ErrorKind::Certificate,
|
||||
message: err.to_string(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
enum ErrorKind {
|
||||
Provider,
|
||||
Commitment,
|
||||
Certificate,
|
||||
}
|
||||
|
||||
impl std::fmt::Display for ErrorKind {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
ErrorKind::Provider => write!(f, "provider"),
|
||||
ErrorKind::Commitment => write!(f, "commitment"),
|
||||
ErrorKind::Certificate => write!(f, "certificate"),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -2,7 +2,7 @@ use std::error::Error;
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::hash::impl_domain_separator;
|
||||
use crate::serialize::impl_domain_separator;
|
||||
|
||||
/// An attestation extension.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
|
||||
124
crates/attestation/src/fixtures.rs
Normal file
124
crates/attestation/src/fixtures.rs
Normal file
@@ -0,0 +1,124 @@
|
||||
//! Attestation fixtures.
|
||||
|
||||
use tlsn_core::{
|
||||
connection::{CertBinding, CertBindingV1_2},
|
||||
fixtures::ConnectionFixture,
|
||||
hash::HashAlgorithm,
|
||||
transcript::{
|
||||
Transcript, TranscriptCommitConfigBuilder, TranscriptCommitment,
|
||||
encoding::{EncodingProvider, EncodingTree},
|
||||
},
|
||||
};
|
||||
|
||||
use crate::{
|
||||
Attestation, AttestationConfig, CryptoProvider, Extension,
|
||||
request::{Request, RequestConfig},
|
||||
signing::SignatureAlgId,
|
||||
};
|
||||
|
||||
/// A Request fixture used for testing.
|
||||
#[allow(missing_docs)]
|
||||
pub struct RequestFixture {
|
||||
pub encoding_tree: EncodingTree,
|
||||
pub request: Request,
|
||||
}
|
||||
|
||||
/// Returns a request fixture for testing.
|
||||
pub fn request_fixture(
|
||||
transcript: Transcript,
|
||||
encodings_provider: impl EncodingProvider,
|
||||
connection: ConnectionFixture,
|
||||
encoding_hasher: impl HashAlgorithm,
|
||||
extensions: Vec<Extension>,
|
||||
) -> RequestFixture {
|
||||
let provider = CryptoProvider::default();
|
||||
let (sent_len, recv_len) = transcript.len();
|
||||
|
||||
let ConnectionFixture {
|
||||
server_name,
|
||||
server_cert_data,
|
||||
..
|
||||
} = connection;
|
||||
|
||||
let mut transcript_commitment_builder = TranscriptCommitConfigBuilder::new(&transcript);
|
||||
transcript_commitment_builder
|
||||
.commit_sent(&(0..sent_len))
|
||||
.unwrap()
|
||||
.commit_recv(&(0..recv_len))
|
||||
.unwrap();
|
||||
let transcripts_commitment_config = transcript_commitment_builder.build().unwrap();
|
||||
|
||||
// Prover constructs encoding tree.
|
||||
let encoding_tree = EncodingTree::new(
|
||||
&encoding_hasher,
|
||||
transcripts_commitment_config.iter_encoding(),
|
||||
&encodings_provider,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let mut builder = RequestConfig::builder();
|
||||
|
||||
for extension in extensions {
|
||||
builder.extension(extension);
|
||||
}
|
||||
|
||||
let request_config = builder.build().unwrap();
|
||||
|
||||
let mut request_builder = Request::builder(&request_config);
|
||||
request_builder
|
||||
.server_name(server_name)
|
||||
.handshake_data(server_cert_data)
|
||||
.transcript(transcript);
|
||||
|
||||
let (request, _) = request_builder.build(&provider).unwrap();
|
||||
|
||||
RequestFixture {
|
||||
encoding_tree,
|
||||
request,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns an attestation fixture for testing.
|
||||
pub fn attestation_fixture(
|
||||
request: Request,
|
||||
connection: ConnectionFixture,
|
||||
signature_alg: SignatureAlgId,
|
||||
transcript_commitments: &[TranscriptCommitment],
|
||||
) -> Attestation {
|
||||
let ConnectionFixture {
|
||||
connection_info,
|
||||
server_cert_data,
|
||||
..
|
||||
} = connection;
|
||||
|
||||
let CertBinding::V1_2(CertBindingV1_2 {
|
||||
server_ephemeral_key,
|
||||
..
|
||||
}) = server_cert_data.binding
|
||||
else {
|
||||
panic!("expected v1.2 binding data");
|
||||
};
|
||||
|
||||
let mut provider = CryptoProvider::default();
|
||||
match signature_alg {
|
||||
SignatureAlgId::SECP256K1 => provider.signer.set_secp256k1(&[42u8; 32]).unwrap(),
|
||||
SignatureAlgId::SECP256R1 => provider.signer.set_secp256r1(&[42u8; 32]).unwrap(),
|
||||
_ => unimplemented!(),
|
||||
};
|
||||
|
||||
let attestation_config = AttestationConfig::builder()
|
||||
.supported_signature_algs([signature_alg])
|
||||
.build()
|
||||
.unwrap();
|
||||
|
||||
let mut attestation_builder = Attestation::builder(&attestation_config)
|
||||
.accept_request(request)
|
||||
.unwrap();
|
||||
|
||||
attestation_builder
|
||||
.connection_info(connection_info)
|
||||
.server_ephemeral_key(server_ephemeral_key)
|
||||
.transcript_commitments(transcript_commitments.to_vec());
|
||||
|
||||
attestation_builder.build(&provider).unwrap()
|
||||
}
|
||||
19
crates/attestation/src/hash.rs
Normal file
19
crates/attestation/src/hash.rs
Normal file
@@ -0,0 +1,19 @@
|
||||
use tlsn_core::hash::{Hash, HashAlgId, HashAlgorithm};
|
||||
|
||||
use crate::serialize::{CanonicalSerialize, DomainSeparator};
|
||||
|
||||
pub(crate) const DEFAULT_SUPPORTED_HASH_ALGS: &[HashAlgId] =
|
||||
&[HashAlgId::SHA256, HashAlgId::BLAKE3, HashAlgId::KECCAK256];
|
||||
|
||||
pub(crate) trait HashAlgorithmExt: HashAlgorithm {
|
||||
#[allow(dead_code)]
|
||||
fn hash_canonical<T: CanonicalSerialize>(&self, data: &T) -> Hash {
|
||||
self.hash(&data.serialize())
|
||||
}
|
||||
|
||||
fn hash_separated<T: DomainSeparator + CanonicalSerialize>(&self, data: &T) -> Hash {
|
||||
self.hash_prefixed(data.domain(), &data.serialize())
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: HashAlgorithm + ?Sized> HashAlgorithmExt for T {}
|
||||
@@ -1,4 +1,28 @@
|
||||
//! Attestation types.
|
||||
//! TLSNotary attestation types.
|
||||
//!
|
||||
//! # Introduction
|
||||
//!
|
||||
//! This library provides core functionality for TLSNotary **attestations**.
|
||||
//!
|
||||
//! Once the TLS commitment protocol has been completed the Prover holds a
|
||||
//! collection of commitments pertaining to the TLS connection. Most
|
||||
//! importantly, the Prover is committed to the
|
||||
//! [`ServerName`](tlsn_core::connection::ServerName),
|
||||
//! and the [`Transcript`](tlsn_core::transcript::Transcript) of application
|
||||
//! data. Subsequently, the Prover can request an [`Attestation`] from the
|
||||
//! Notary who will include the commitments as well as any additional
|
||||
//! information which may be useful to an attestation Verifier.
|
||||
//!
|
||||
//! Holding an attestation, the Prover can construct a
|
||||
//! [`Presentation`](crate::presentation::Presentation) which facilitates
|
||||
//! selectively disclosing various aspects of the TLS connection to a Verifier.
|
||||
//! If the Verifier trusts the Notary, or more specifically the verifying key of
|
||||
//! the attestation, then the Verifier can trust the authenticity of the
|
||||
//! information disclosed in the presentation.
|
||||
//!
|
||||
//! **Be sure to check out the various submodules for more information.**
|
||||
//!
|
||||
//! # Structure
|
||||
//!
|
||||
//! An attestation is a cryptographically signed document issued by a Notary who
|
||||
//! witnessed a TLS connection. It contains various fields which can be used to
|
||||
@@ -24,37 +48,194 @@
|
||||
//! extensions](crate::request::RequestConfigBuilder::extension)
|
||||
//! to their attestation request, provided that the Notary supports them
|
||||
//! (disallowed by default). A Notary may also be configured to
|
||||
//! [validate](crate::attestation::AttestationConfigBuilder::extension_validator)
|
||||
//! [validate](crate::AttestationConfigBuilder::extension_validator)
|
||||
//! any extensions requested by a Prover using custom application logic.
|
||||
//! Additionally, a Notary may
|
||||
//! [include](crate::attestation::AttestationBuilder::extension)
|
||||
//! [include](crate::AttestationBuilder::extension)
|
||||
//! their own extensions.
|
||||
//!
|
||||
//! # Committing to the transcript
|
||||
//!
|
||||
//! The TLS commitment protocol produces commitments to the entire transcript of
|
||||
//! application data. However, we may want to disclose only a subset of the data
|
||||
//! in a presentation. Prior to attestation, the Prover has the opportunity to
|
||||
//! slice and dice the commitments into smaller sections which can be
|
||||
//! selectively disclosed. Additionally, the Prover may want to use different
|
||||
//! commitment schemes depending on the context they expect to disclose.
|
||||
//!
|
||||
//! The primary API for this process is the
|
||||
//! [`TranscriptCommitConfigBuilder`](tlsn_core::transcript::TranscriptCommitConfigBuilder)
|
||||
//! which is used to build up a configuration.
|
||||
//!
|
||||
//! ```no_run
|
||||
//! # use tlsn_core::transcript::{TranscriptCommitConfigBuilder, Transcript, Direction};
|
||||
//! # use tlsn_core::hash::HashAlgId;
|
||||
//! # fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||
//! # let transcript: Transcript = unimplemented!();
|
||||
//! let (sent_len, recv_len) = transcript.len();
|
||||
//!
|
||||
//! // Create a new configuration builder.
|
||||
//! let mut builder = TranscriptCommitConfigBuilder::new(&transcript);
|
||||
//!
|
||||
//! // Specify all the transcript commitments we want to make.
|
||||
//! builder
|
||||
//! // Use BLAKE3 for encoding commitments.
|
||||
//! .encoding_hash_alg(HashAlgId::BLAKE3)
|
||||
//! // Commit to all sent data.
|
||||
//! .commit_sent(&(0..sent_len))?
|
||||
//! // Commit to the first 10 bytes of sent data.
|
||||
//! .commit_sent(&(0..10))?
|
||||
//! // Skip some bytes so it can be omitted in the presentation.
|
||||
//! .commit_sent(&(20..sent_len))?
|
||||
//! // Commit to all received data.
|
||||
//! .commit_recv(&(0..recv_len))?;
|
||||
//!
|
||||
//! let config = builder.build()?;
|
||||
//! # Ok(())
|
||||
//! # }
|
||||
//! ```
|
||||
//!
|
||||
//! # Requesting an attestation
|
||||
//!
|
||||
//! The first step in the attestation protocol is for the Prover to make a
|
||||
//! [`Request`](crate::request::Request), which can be configured using the
|
||||
//! associated [builder](crate::request::RequestConfigBuilder). With it the
|
||||
//! Prover can configure some of the details of the attestation, such as which
|
||||
//! cryptographic algorithms are used (if the Notary supports them).
|
||||
//!
|
||||
//! The Prover may also request for extensions to be added to the attestation,
|
||||
//! see [here](#extensions) for more information.
|
||||
//!
|
||||
//! Upon being issued an attestation, the Prover will also hold a corresponding
|
||||
//! [`Secrets`] which contains all private information. This pair can be stored
|
||||
//! and used later to construct a
|
||||
//! [`Presentation`](crate::presentation::Presentation), [see
|
||||
//! below](#constructing-a-presentation).
|
||||
//!
|
||||
//! # Issuing an attestation
|
||||
//!
|
||||
//! Upon receiving a request, the Notary can issue an [`Attestation`] which can
|
||||
//! be configured using the associated
|
||||
//! [builder](crate::AttestationConfigBuilder).
|
||||
//!
|
||||
//! The Notary's [`CryptoProvider`] must be configured with an appropriate
|
||||
//! signing key for attestations. See
|
||||
//! [`SignerProvider`](crate::signing::SignerProvider) for more information.
|
||||
//!
|
||||
//! # Constructing a presentation
|
||||
//!
|
||||
//! A Prover can use an [`Attestation`] and the corresponding [`Secrets`] to
|
||||
//! construct a verifiable [`Presentation`](crate::presentation::Presentation).
|
||||
//!
|
||||
//! ```no_run
|
||||
//! # use tlsn_attestation::{Attestation, CryptoProvider, Secrets, presentation::Presentation};
|
||||
//! # use tlsn_core::transcript::{TranscriptCommitmentKind, Direction};
|
||||
//! # fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||
//! # let attestation: Attestation = unimplemented!();
|
||||
//! # let secrets: Secrets = unimplemented!();
|
||||
//! # let crypto_provider: CryptoProvider = unimplemented!();
|
||||
//! let (_sent_len, recv_len) = secrets.transcript().len();
|
||||
//!
|
||||
//! // First, we decide which application data we would like to disclose.
|
||||
//! let mut builder = secrets.transcript_proof_builder();
|
||||
//!
|
||||
//! builder
|
||||
//! // Use transcript encoding commitments.
|
||||
//! .commitment_kinds(&[TranscriptCommitmentKind::Encoding])
|
||||
//! // Disclose the first 10 bytes of the sent data.
|
||||
//! .reveal(&(0..10), Direction::Sent)?
|
||||
//! // Disclose all of the received data.
|
||||
//! .reveal(&(0..recv_len), Direction::Received)?;
|
||||
//!
|
||||
//! let transcript_proof = builder.build()?;
|
||||
//!
|
||||
//! // Most cases we will also disclose the server identity.
|
||||
//! let identity_proof = secrets.identity_proof();
|
||||
//!
|
||||
//! // Now we can construct the presentation.
|
||||
//! let mut builder = attestation.presentation_builder(&crypto_provider);
|
||||
//!
|
||||
//! builder
|
||||
//! .identity_proof(identity_proof)
|
||||
//! .transcript_proof(transcript_proof);
|
||||
//!
|
||||
//! // Finally, we build the presentation. Send it to a verifier!
|
||||
//! let presentation: Presentation = builder.build()?;
|
||||
//! # Ok(())
|
||||
//! # }
|
||||
//! ```
|
||||
//!
|
||||
//! # Verifying a presentation
|
||||
//!
|
||||
//! Verifying a presentation is as simple as checking the verifier trusts the
|
||||
//! verifying key then calling
|
||||
//! [`Presentation::verify`](crate::presentation::Presentation::verify).
|
||||
//!
|
||||
//! ```no_run
|
||||
//! # use tlsn_attestation::{CryptoProvider, presentation::{Presentation, PresentationOutput}, signing::VerifyingKey};
|
||||
//! # fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||
//! # let presentation: Presentation = unimplemented!();
|
||||
//! # let trusted_key: VerifyingKey = unimplemented!();
|
||||
//! # let crypto_provider: CryptoProvider = unimplemented!();
|
||||
//! // Assert that we trust the verifying key.
|
||||
//! assert_eq!(presentation.verifying_key(), &trusted_key);
|
||||
//!
|
||||
//! let PresentationOutput {
|
||||
//! attestation,
|
||||
//! server_name,
|
||||
//! connection_info,
|
||||
//! transcript,
|
||||
//! ..
|
||||
//! } = presentation.verify(&crypto_provider)?;
|
||||
//! # Ok(())
|
||||
//! # }
|
||||
//! ```
|
||||
|
||||
#![deny(missing_docs, unreachable_pub, unused_must_use)]
|
||||
#![deny(clippy::all)]
|
||||
#![forbid(unsafe_code)]
|
||||
|
||||
mod builder;
|
||||
mod config;
|
||||
pub mod connection;
|
||||
mod extension;
|
||||
#[cfg(any(test, feature = "fixtures"))]
|
||||
pub mod fixtures;
|
||||
pub(crate) mod hash;
|
||||
pub mod presentation;
|
||||
mod proof;
|
||||
mod provider;
|
||||
pub mod request;
|
||||
mod secrets;
|
||||
pub(crate) mod serialize;
|
||||
pub mod signing;
|
||||
|
||||
use std::fmt;
|
||||
|
||||
use rand::distr::{Distribution, StandardUniform};
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::{
|
||||
connection::{ConnectionInfo, ServerCertCommitment, ServerEphemKey},
|
||||
hash::{impl_domain_separator, Hash, HashAlgorithm, HashAlgorithmExt, TypedHash},
|
||||
use tlsn_core::{
|
||||
connection::{ConnectionInfo, ServerEphemKey},
|
||||
hash::{Hash, HashAlgorithm, TypedHash},
|
||||
merkle::MerkleTree,
|
||||
transcript::{TranscriptCommitment, encoding::EncoderSecret},
|
||||
};
|
||||
|
||||
use crate::{
|
||||
connection::ServerCertCommitment,
|
||||
hash::HashAlgorithmExt,
|
||||
presentation::PresentationBuilder,
|
||||
serialize::impl_domain_separator,
|
||||
signing::{Signature, VerifyingKey},
|
||||
transcript::TranscriptCommitment,
|
||||
CryptoProvider,
|
||||
};
|
||||
|
||||
pub use builder::{AttestationBuilder, AttestationBuilderError};
|
||||
pub use config::{AttestationConfig, AttestationConfigBuilder, AttestationConfigError};
|
||||
pub use extension::{Extension, InvalidExtension};
|
||||
pub use proof::{AttestationError, AttestationProof};
|
||||
|
||||
pub use provider::CryptoProvider;
|
||||
pub use secrets::Secrets;
|
||||
/// Current version of attestations.
|
||||
pub const VERSION: Version = Version(0);
|
||||
|
||||
@@ -127,8 +308,6 @@ pub enum FieldKind {
|
||||
}
|
||||
|
||||
/// Attestation header.
|
||||
///
|
||||
/// See [module level documentation](crate::attestation) for more information.
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
pub struct Header {
|
||||
/// An identifier for the attestation.
|
||||
@@ -142,14 +321,13 @@ pub struct Header {
|
||||
impl_domain_separator!(Header);
|
||||
|
||||
/// Attestation body.
|
||||
///
|
||||
/// See [module level documentation](crate::attestation) for more information.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct Body {
|
||||
verifying_key: Field<VerifyingKey>,
|
||||
connection_info: Field<ConnectionInfo>,
|
||||
server_ephemeral_key: Field<ServerEphemKey>,
|
||||
cert_commitment: Field<ServerCertCommitment>,
|
||||
encoder_secret: Option<Field<EncoderSecret>>,
|
||||
extensions: Vec<Field<Extension>>,
|
||||
transcript_commitments: Vec<Field<TranscriptCommitment>>,
|
||||
}
|
||||
@@ -195,6 +373,7 @@ impl Body {
|
||||
connection_info: conn_info,
|
||||
server_ephemeral_key,
|
||||
cert_commitment,
|
||||
encoder_secret,
|
||||
extensions,
|
||||
transcript_commitments,
|
||||
} = self;
|
||||
@@ -212,6 +391,13 @@ impl Body {
|
||||
),
|
||||
];
|
||||
|
||||
if let Some(encoder_secret) = encoder_secret {
|
||||
fields.push((
|
||||
encoder_secret.id,
|
||||
hasher.hash_separated(&encoder_secret.data),
|
||||
));
|
||||
}
|
||||
|
||||
for field in extensions.iter() {
|
||||
fields.push((field.id, hasher.hash_separated(&field.data)));
|
||||
}
|
||||
@@ -246,8 +432,6 @@ impl Body {
|
||||
}
|
||||
|
||||
/// An attestation document.
|
||||
///
|
||||
/// See [module level documentation](crate::attestation) for more information.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct Attestation {
|
||||
/// The signature of the attestation.
|
||||
@@ -26,12 +26,15 @@ use std::fmt;
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::{
|
||||
attestation::{Attestation, AttestationError, AttestationProof, Extension},
|
||||
connection::{ConnectionInfo, ServerIdentityProof, ServerIdentityProofError, ServerName},
|
||||
signing::VerifyingKey,
|
||||
use tlsn_core::{
|
||||
connection::{ConnectionInfo, ServerName},
|
||||
transcript::{PartialTranscript, TranscriptProof, TranscriptProofError},
|
||||
CryptoProvider,
|
||||
};
|
||||
|
||||
use crate::{
|
||||
Attestation, AttestationError, AttestationProof, CryptoProvider, Extension,
|
||||
connection::{ServerIdentityProof, ServerIdentityProofError},
|
||||
signing::VerifyingKey,
|
||||
};
|
||||
|
||||
/// A verifiable presentation.
|
||||
@@ -86,8 +89,13 @@ impl Presentation {
|
||||
let transcript = transcript
|
||||
.map(|transcript| {
|
||||
transcript.verify_with_provider(
|
||||
provider,
|
||||
&provider.hash,
|
||||
&attestation.body.connection_info().transcript_length,
|
||||
attestation
|
||||
.body
|
||||
.encoder_secret
|
||||
.as_ref()
|
||||
.map(|field| &field.data),
|
||||
attestation.body.transcript_commitments(),
|
||||
)
|
||||
})
|
||||
@@ -186,7 +194,7 @@ impl fmt::Display for PresentationBuilderError {
|
||||
}
|
||||
|
||||
if let Some(source) = &self.source {
|
||||
write!(f, " caused by: {}", source)?;
|
||||
write!(f, " caused by: {source}")?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
@@ -227,7 +235,7 @@ impl fmt::Display for PresentationError {
|
||||
}
|
||||
|
||||
if let Some(source) = &self.source {
|
||||
write!(f, " caused by: {}", source)?;
|
||||
write!(f, " caused by: {source}")?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
@@ -2,13 +2,15 @@ use std::fmt;
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::{
|
||||
attestation::{Attestation, Body, Header},
|
||||
use tlsn_core::{
|
||||
hash::HashAlgorithm,
|
||||
merkle::{MerkleProof, MerkleTree},
|
||||
};
|
||||
|
||||
use crate::{
|
||||
Attestation, Body, CryptoProvider, Header,
|
||||
serialize::CanonicalSerialize,
|
||||
signing::{Signature, VerifyingKey},
|
||||
CryptoProvider,
|
||||
};
|
||||
|
||||
/// Proof of an attestation.
|
||||
@@ -165,7 +167,7 @@ impl fmt::Display for AttestationError {
|
||||
}
|
||||
|
||||
if let Some(source) = &self.source {
|
||||
write!(f, " caused by: {}", source)?;
|
||||
write!(f, " caused by: {source}")?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
@@ -1,12 +1,6 @@
|
||||
use tls_core::{
|
||||
anchors::{OwnedTrustAnchor, RootCertStore},
|
||||
verify::WebPkiVerifier,
|
||||
};
|
||||
use tlsn_core::{hash::HashProvider, webpki::ServerCertVerifier};
|
||||
|
||||
use crate::{
|
||||
hash::HashProvider,
|
||||
signing::{SignatureVerifierProvider, SignerProvider},
|
||||
};
|
||||
use crate::signing::{SignatureVerifierProvider, SignerProvider};
|
||||
|
||||
/// Cryptography provider.
|
||||
///
|
||||
@@ -17,7 +11,7 @@ use crate::{
|
||||
/// implementations.
|
||||
///
|
||||
/// Algorithms are uniquely identified using an 8-bit ID, eg.
|
||||
/// [`HashAlgId`](crate::hash::HashAlgId), half of which is reserved for the
|
||||
/// [`HashAlgId`](tlsn_core::hash::HashAlgId), half of which is reserved for the
|
||||
/// officially supported algorithms. If you think that a new algorithm should be
|
||||
/// added to the official set, please open an issue. Beware that other parties
|
||||
/// may assign different algorithms to the same ID as you, and we make no effort
|
||||
@@ -30,7 +24,7 @@ pub struct CryptoProvider {
|
||||
/// This is used to verify the server's certificate chain.
|
||||
///
|
||||
/// The default verifier uses the Mozilla root certificates.
|
||||
pub cert: WebPkiVerifier,
|
||||
pub cert: ServerCertVerifier,
|
||||
/// Signer provider.
|
||||
///
|
||||
/// This is used for signing attestations.
|
||||
@@ -47,21 +41,9 @@ impl Default for CryptoProvider {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
hash: Default::default(),
|
||||
cert: default_cert_verifier(),
|
||||
cert: ServerCertVerifier::mozilla(),
|
||||
signer: Default::default(),
|
||||
signature: Default::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn default_cert_verifier() -> WebPkiVerifier {
|
||||
let mut root_store = RootCertStore::empty();
|
||||
root_store.add_server_trust_anchors(webpki_roots::TLS_SERVER_ROOTS.iter().map(|ta| {
|
||||
OwnedTrustAnchor::from_subject_spki_name_constraints(
|
||||
ta.subject.as_ref(),
|
||||
ta.subject_public_key_info.as_ref(),
|
||||
ta.name_constraints.as_ref().map(|nc| nc.as_ref()),
|
||||
)
|
||||
}));
|
||||
WebPkiVerifier::new(root_store, None)
|
||||
}
|
||||
@@ -18,12 +18,9 @@ mod config;
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::{
|
||||
attestation::{Attestation, Extension},
|
||||
connection::ServerCertCommitment,
|
||||
hash::HashAlgId,
|
||||
signing::SignatureAlgId,
|
||||
};
|
||||
use tlsn_core::hash::HashAlgId;
|
||||
|
||||
use crate::{Attestation, Extension, connection::ServerCertCommitment, signing::SignatureAlgId};
|
||||
|
||||
pub use builder::{RequestBuilder, RequestBuilderError};
|
||||
pub use config::{RequestConfig, RequestConfigBuilder, RequestConfigBuilderError};
|
||||
@@ -39,7 +36,7 @@ pub struct Request {
|
||||
|
||||
impl Request {
|
||||
/// Returns a new request builder.
|
||||
pub fn builder(config: &RequestConfig) -> RequestBuilder {
|
||||
pub fn builder(config: &RequestConfig) -> RequestBuilder<'_> {
|
||||
RequestBuilder::new(config)
|
||||
}
|
||||
|
||||
@@ -85,18 +82,19 @@ pub struct InconsistentAttestation(String);
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use tlsn_core::{
|
||||
connection::TranscriptLength,
|
||||
fixtures::{ConnectionFixture, encoding_provider},
|
||||
hash::{Blake3, HashAlgId},
|
||||
transcript::Transcript,
|
||||
};
|
||||
use tlsn_data_fixtures::http::{request::GET_WITH_HEADER, response::OK_JSON};
|
||||
|
||||
use crate::{
|
||||
connection::{ServerCertOpening, TranscriptLength},
|
||||
fixtures::{
|
||||
attestation_fixture, encoding_provider, request_fixture, ConnectionFixture,
|
||||
RequestFixture,
|
||||
},
|
||||
hash::{Blake3, HashAlgId},
|
||||
signing::SignatureAlgId,
|
||||
transcript::Transcript,
|
||||
CryptoProvider,
|
||||
connection::ServerCertOpening,
|
||||
fixtures::{RequestFixture, attestation_fixture, request_fixture},
|
||||
signing::SignatureAlgId,
|
||||
};
|
||||
|
||||
#[test]
|
||||
@@ -1,16 +1,19 @@
|
||||
use crate::{
|
||||
connection::{ServerCertData, ServerCertOpening, ServerName},
|
||||
request::{Request, RequestConfig},
|
||||
secrets::Secrets,
|
||||
use tlsn_core::{
|
||||
connection::{HandshakeData, ServerName},
|
||||
transcript::{Transcript, TranscriptCommitment, TranscriptSecret},
|
||||
CryptoProvider,
|
||||
};
|
||||
|
||||
use crate::{
|
||||
CryptoProvider, Secrets,
|
||||
connection::ServerCertOpening,
|
||||
request::{Request, RequestConfig},
|
||||
};
|
||||
|
||||
/// Builder for [`Request`].
|
||||
pub struct RequestBuilder<'a> {
|
||||
config: &'a RequestConfig,
|
||||
server_name: Option<ServerName>,
|
||||
server_cert_data: Option<ServerCertData>,
|
||||
handshake_data: Option<HandshakeData>,
|
||||
transcript: Option<Transcript>,
|
||||
transcript_commitments: Vec<TranscriptCommitment>,
|
||||
transcript_commitment_secrets: Vec<TranscriptSecret>,
|
||||
@@ -22,7 +25,7 @@ impl<'a> RequestBuilder<'a> {
|
||||
Self {
|
||||
config,
|
||||
server_name: None,
|
||||
server_cert_data: None,
|
||||
handshake_data: None,
|
||||
transcript: None,
|
||||
transcript_commitments: Vec::new(),
|
||||
transcript_commitment_secrets: Vec::new(),
|
||||
@@ -35,9 +38,9 @@ impl<'a> RequestBuilder<'a> {
|
||||
self
|
||||
}
|
||||
|
||||
/// Sets the server identity data.
|
||||
pub fn server_cert_data(&mut self, data: ServerCertData) -> &mut Self {
|
||||
self.server_cert_data = Some(data);
|
||||
/// Sets the handshake data.
|
||||
pub fn handshake_data(&mut self, data: HandshakeData) -> &mut Self {
|
||||
self.handshake_data = Some(data);
|
||||
self
|
||||
}
|
||||
|
||||
@@ -66,7 +69,7 @@ impl<'a> RequestBuilder<'a> {
|
||||
let Self {
|
||||
config,
|
||||
server_name,
|
||||
server_cert_data,
|
||||
handshake_data: server_cert_data,
|
||||
transcript,
|
||||
transcript_commitments,
|
||||
transcript_commitment_secrets,
|
||||
@@ -1,7 +1,6 @@
|
||||
use crate::{
|
||||
attestation::Extension, hash::HashAlgId, signing::SignatureAlgId,
|
||||
transcript::TranscriptCommitConfig,
|
||||
};
|
||||
use tlsn_core::{hash::HashAlgId, transcript::TranscriptCommitConfig};
|
||||
|
||||
use crate::{Extension, signing::SignatureAlgId};
|
||||
|
||||
/// Request configuration.
|
||||
#[derive(Debug, Clone)]
|
||||
@@ -1,11 +1,13 @@
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::{
|
||||
connection::{ServerCertOpening, ServerIdentityProof, ServerName},
|
||||
use tlsn_core::{
|
||||
connection::ServerName,
|
||||
transcript::{Transcript, TranscriptCommitment, TranscriptProofBuilder, TranscriptSecret},
|
||||
};
|
||||
|
||||
/// Secret data of an [`Attestation`](crate::attestation::Attestation).
|
||||
use crate::connection::{ServerCertOpening, ServerIdentityProof};
|
||||
|
||||
/// Secret data of an [`Attestation`](crate::Attestation).
|
||||
#[derive(Clone, Serialize, Deserialize)]
|
||||
pub struct Secrets {
|
||||
pub(crate) server_name: ServerName,
|
||||
54
crates/attestation/src/serialize.rs
Normal file
54
crates/attestation/src/serialize.rs
Normal file
@@ -0,0 +1,54 @@
|
||||
/// Canonical serialization of TLSNotary types.
|
||||
///
|
||||
/// This trait is used to serialize types into a canonical byte representation.
|
||||
pub(crate) trait CanonicalSerialize {
|
||||
/// Serializes the type.
|
||||
fn serialize(&self) -> Vec<u8>;
|
||||
}
|
||||
|
||||
impl<T> CanonicalSerialize for T
|
||||
where
|
||||
T: serde::Serialize,
|
||||
{
|
||||
fn serialize(&self) -> Vec<u8> {
|
||||
// For now we use BCS for serialization. In future releases we will want to
|
||||
// consider this further, particularly with respect to EVM compatibility.
|
||||
bcs::to_bytes(self).unwrap()
|
||||
}
|
||||
}
|
||||
|
||||
/// A type with a domain separator which is used during hashing to mitigate type
|
||||
/// confusion attacks.
|
||||
pub(crate) trait DomainSeparator {
|
||||
/// Returns the domain separator for the type.
|
||||
fn domain(&self) -> &[u8];
|
||||
}
|
||||
|
||||
macro_rules! impl_domain_separator {
|
||||
($type:ty) => {
|
||||
impl $crate::serialize::DomainSeparator for $type {
|
||||
fn domain(&self) -> &[u8] {
|
||||
use std::sync::LazyLock;
|
||||
|
||||
// Computes a 16 byte hash of the type's name to use as a domain separator.
|
||||
static DOMAIN: LazyLock<[u8; 16]> = LazyLock::new(|| {
|
||||
let domain: [u8; 32] = blake3::hash(stringify!($type).as_bytes()).into();
|
||||
domain[..16].try_into().unwrap()
|
||||
});
|
||||
|
||||
&*DOMAIN
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
pub(crate) use impl_domain_separator;
|
||||
|
||||
impl_domain_separator!(tlsn_core::connection::ServerEphemKey);
|
||||
impl_domain_separator!(tlsn_core::connection::ConnectionInfo);
|
||||
impl_domain_separator!(tlsn_core::connection::CertBinding);
|
||||
impl_domain_separator!(tlsn_core::transcript::TranscriptCommitment);
|
||||
impl_domain_separator!(tlsn_core::transcript::TranscriptSecret);
|
||||
impl_domain_separator!(tlsn_core::transcript::encoding::EncoderSecret);
|
||||
impl_domain_separator!(tlsn_core::transcript::encoding::EncodingCommitment);
|
||||
impl_domain_separator!(tlsn_core::transcript::hash::PlaintextHash);
|
||||
@@ -4,7 +4,7 @@ use std::collections::HashMap;
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::hash::impl_domain_separator;
|
||||
use crate::serialize::impl_domain_separator;
|
||||
|
||||
/// Key algorithm identifier.
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)]
|
||||
@@ -242,8 +242,8 @@ mod secp256k1 {
|
||||
use std::sync::{Arc, Mutex};
|
||||
|
||||
use k256::ecdsa::{
|
||||
signature::{SignerMut, Verifier},
|
||||
Signature as Secp256K1Signature, SigningKey,
|
||||
signature::{SignerMut, Verifier},
|
||||
};
|
||||
|
||||
use super::*;
|
||||
@@ -318,8 +318,8 @@ mod secp256r1 {
|
||||
use std::sync::{Arc, Mutex};
|
||||
|
||||
use p256::ecdsa::{
|
||||
signature::{SignerMut, Verifier},
|
||||
Signature as Secp256R1Signature, SigningKey,
|
||||
signature::{SignerMut, Verifier},
|
||||
};
|
||||
|
||||
use super::*;
|
||||
@@ -394,7 +394,7 @@ mod secp256k1eth {
|
||||
use std::sync::{Arc, Mutex};
|
||||
|
||||
use k256::ecdsa::{
|
||||
signature::hazmat::PrehashVerifier, Signature as Secp256K1Signature, SigningKey,
|
||||
Signature as Secp256K1Signature, SigningKey, signature::hazmat::PrehashVerifier,
|
||||
};
|
||||
use tiny_keccak::{Hasher, Keccak};
|
||||
|
||||
@@ -437,8 +437,7 @@ mod secp256k1eth {
|
||||
// Based on Ethereum Yellow Paper Appendix F, only values 0 and 1 are valid.
|
||||
if recid > 1 {
|
||||
return Err(SignatureError(format!(
|
||||
"expected recovery id 0 or 1, got {:?}",
|
||||
recid
|
||||
"expected recovery id 0 or 1, got {recid:?}"
|
||||
)));
|
||||
}
|
||||
// `ecrecover` expects that 0 and 1 are mapped to 27 and 28.
|
||||
@@ -1,17 +1,18 @@
|
||||
use tlsn_core::{
|
||||
attestation::{Attestation, AttestationConfig},
|
||||
connection::{HandshakeData, HandshakeDataV1_2},
|
||||
fixtures::{self, encoder_secret, ConnectionFixture},
|
||||
hash::Blake3,
|
||||
use tlsn_attestation::{
|
||||
Attestation, AttestationConfig, CryptoProvider,
|
||||
presentation::PresentationOutput,
|
||||
request::{Request, RequestConfig},
|
||||
signing::SignatureAlgId,
|
||||
};
|
||||
use tlsn_core::{
|
||||
connection::{CertBinding, CertBindingV1_2},
|
||||
fixtures::{self, ConnectionFixture, encoder_secret},
|
||||
hash::Blake3,
|
||||
transcript::{
|
||||
encoding::{EncodingCommitment, EncodingTree},
|
||||
Direction, Transcript, TranscriptCommitConfigBuilder, TranscriptCommitment,
|
||||
TranscriptSecret,
|
||||
encoding::{EncodingCommitment, EncodingTree},
|
||||
},
|
||||
CryptoProvider,
|
||||
};
|
||||
use tlsn_data_fixtures::http::{request::GET_WITH_HEADER, response::OK_JSON};
|
||||
|
||||
@@ -35,10 +36,10 @@ fn test_api() {
|
||||
server_cert_data,
|
||||
} = ConnectionFixture::tlsnotary(transcript.length());
|
||||
|
||||
let HandshakeData::V1_2(HandshakeDataV1_2 {
|
||||
let CertBinding::V1_2(CertBindingV1_2 {
|
||||
server_ephemeral_key,
|
||||
..
|
||||
}) = server_cert_data.handshake.clone()
|
||||
}) = server_cert_data.binding.clone()
|
||||
else {
|
||||
unreachable!()
|
||||
};
|
||||
@@ -63,7 +64,6 @@ fn test_api() {
|
||||
|
||||
let encoding_commitment = EncodingCommitment {
|
||||
root: encoding_tree.root(),
|
||||
secret: encoder_secret(),
|
||||
};
|
||||
|
||||
let request_config = RequestConfig::default();
|
||||
@@ -71,7 +71,7 @@ fn test_api() {
|
||||
|
||||
request_builder
|
||||
.server_name(server_name.clone())
|
||||
.server_cert_data(server_cert_data)
|
||||
.handshake_data(server_cert_data)
|
||||
.transcript(transcript)
|
||||
.transcript_commitments(
|
||||
vec![TranscriptSecret::Encoding(encoding_tree)],
|
||||
@@ -95,6 +95,7 @@ fn test_api() {
|
||||
.connection_info(connection_info.clone())
|
||||
// Server key Notary received during handshake
|
||||
.server_ephemeral_key(server_ephemeral_key)
|
||||
.encoder_secret(encoder_secret())
|
||||
.transcript_commitments(vec![TranscriptCommitment::Encoding(encoding_commitment)]);
|
||||
|
||||
let attestation = attestation_builder.build(&provider).unwrap();
|
||||
@@ -1,74 +0,0 @@
|
||||
[package]
|
||||
edition = "2021"
|
||||
name = "tlsn-benches"
|
||||
publish = false
|
||||
version = "0.0.0"
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
[features]
|
||||
default = []
|
||||
# Enables benchmarks in the browser.
|
||||
browser-bench = ["tlsn-benches-browser-native"]
|
||||
|
||||
[dependencies]
|
||||
mpz-common = { workspace = true }
|
||||
mpz-core = { workspace = true }
|
||||
mpz-garble = { workspace = true }
|
||||
mpz-ot = { workspace = true, features = ["ideal"] }
|
||||
tlsn-benches-library = { workspace = true }
|
||||
tlsn-benches-browser-native = { workspace = true, optional = true }
|
||||
tlsn-common = { workspace = true }
|
||||
tlsn-core = { workspace = true }
|
||||
tlsn-hmac-sha256 = { workspace = true }
|
||||
tlsn-prover = { workspace = true }
|
||||
tlsn-server-fixture = { workspace = true }
|
||||
tlsn-server-fixture-certs = { workspace = true }
|
||||
tlsn-tls-core = { workspace = true }
|
||||
tlsn-verifier = { workspace = true }
|
||||
|
||||
anyhow = { workspace = true }
|
||||
async-trait = { workspace = true }
|
||||
charming = { version = "0.3.1", features = ["ssr"] }
|
||||
csv = "1.3.0"
|
||||
dhat = { version = "0.3.3" }
|
||||
env_logger = { version = "0.6.0", default-features = false }
|
||||
futures = { workspace = true }
|
||||
serde = { workspace = true }
|
||||
tokio = { workspace = true, features = [
|
||||
"rt",
|
||||
"rt-multi-thread",
|
||||
"macros",
|
||||
"net",
|
||||
"io-std",
|
||||
"fs",
|
||||
] }
|
||||
tokio-util = { workspace = true }
|
||||
toml = "0.8.11"
|
||||
tracing-subscriber = { workspace = true, features = ["env-filter"] }
|
||||
rand = { workspace = true }
|
||||
|
||||
[[bin]]
|
||||
name = "bench"
|
||||
path = "bin/bench.rs"
|
||||
|
||||
[[bin]]
|
||||
name = "prover"
|
||||
path = "bin/prover.rs"
|
||||
|
||||
[[bin]]
|
||||
name = "prover-memory"
|
||||
path = "bin/prover_memory.rs"
|
||||
|
||||
[[bin]]
|
||||
name = "verifier"
|
||||
path = "bin/verifier.rs"
|
||||
|
||||
[[bin]]
|
||||
name = "verifier-memory"
|
||||
path = "bin/verifier_memory.rs"
|
||||
|
||||
[[bin]]
|
||||
name = "plot"
|
||||
path = "bin/plot.rs"
|
||||
@@ -1,53 +0,0 @@
|
||||
# TLSNotary bench utilities
|
||||
|
||||
This crate provides utilities for benchmarking protocol performance under various network conditions and usage patterns.
|
||||
|
||||
As the protocol is mostly IO bound, it's important to track how it performs in low bandwidth and/or high latency environments. To do this we set up temporary network namespaces and add virtual ethernet interfaces which we can control using the linux `tc` (Traffic Control) utility.
|
||||
|
||||
## Configuration
|
||||
|
||||
See the `bench.toml` file for benchmark configurations.
|
||||
|
||||
## Preliminaries
|
||||
|
||||
To run the benchmarks you will need `iproute2` installed, eg:
|
||||
```sh
|
||||
sudo apt-get install iproute2 -y
|
||||
```
|
||||
|
||||
## Running benches
|
||||
|
||||
Running the benches requires root privileges because they will set up virtual interfaces. The script is designed to fully clean up when the benches are done, but run them at your own risk.
|
||||
|
||||
#### Native benches
|
||||
|
||||
Make sure you're in the `crates/benches/` directory, build the binaries, and then run the script:
|
||||
|
||||
```sh
|
||||
cd binary
|
||||
cargo build --release
|
||||
sudo ./bench.sh
|
||||
```
|
||||
|
||||
#### Browser benches
|
||||
|
||||
(Note, we recommend running browser benches inside a docker container (see docker.md) to avoid
|
||||
facing incompatibility issues observed in the latest versions of Chrome.)
|
||||
|
||||
With a Chrome browser installed on your system, make sure you're in the `crates/benches/`
|
||||
directory, build the wasm module, build the binaries, and then run the script:
|
||||
```sh
|
||||
cd browser/wasm
|
||||
wasm-pack build --release --target web
|
||||
cd ../../binary
|
||||
cargo build --release --features browser-bench
|
||||
sudo ./bench.sh
|
||||
```
|
||||
|
||||
## Metrics
|
||||
|
||||
After you run the benches you will see a `metrics.csv` file in the working directory. It will be owned by `root`, so you probably want to run
|
||||
|
||||
```sh
|
||||
sudo chown $USER metrics.csv
|
||||
```
|
||||
@@ -1,13 +0,0 @@
|
||||
#! /bin/bash
|
||||
|
||||
# Check if we are running as root.
|
||||
if [ "$EUID" -ne 0 ]; then
|
||||
echo "This script must be run as root"
|
||||
exit
|
||||
fi
|
||||
|
||||
# Run the benchmark binary.
|
||||
../../../target/release/bench
|
||||
|
||||
# Plot the results.
|
||||
../../../target/release/plot metrics.csv
|
||||
@@ -1,45 +0,0 @@
|
||||
[[benches]]
|
||||
name = "latency"
|
||||
upload = 250
|
||||
upload-delay = [10, 25, 50]
|
||||
download = 250
|
||||
download-delay = [10, 25, 50]
|
||||
upload-size = 1024
|
||||
download-size = 4096
|
||||
defer-decryption = true
|
||||
memory-profile = false
|
||||
|
||||
[[benches]]
|
||||
name = "download_bandwidth"
|
||||
upload = 250
|
||||
upload-delay = 25
|
||||
download = [10, 25, 50, 100, 250]
|
||||
download-delay = 25
|
||||
upload-size = 1024
|
||||
download-size = 4096
|
||||
defer-decryption = true
|
||||
memory-profile = false
|
||||
|
||||
[[benches]]
|
||||
name = "upload_bandwidth"
|
||||
upload = [10, 25, 50, 100, 250]
|
||||
upload-delay = 25
|
||||
download = 250
|
||||
download-delay = 25
|
||||
upload-size = 1024
|
||||
download-size = 4096
|
||||
defer-decryption = [false, true]
|
||||
memory-profile = false
|
||||
|
||||
[[benches]]
|
||||
name = "download_volume"
|
||||
upload = 250
|
||||
upload-delay = 25
|
||||
download = 250
|
||||
download-delay = 25
|
||||
upload-size = 1024
|
||||
# It was observed that setting download-size > 30K causes browser errors that need to
|
||||
# be investigated.
|
||||
download-size = [1024, 4096, 16384]
|
||||
defer-decryption = true
|
||||
memory-profile = true
|
||||
@@ -1,56 +0,0 @@
|
||||
FROM rust AS builder
|
||||
WORKDIR /usr/src/tlsn
|
||||
COPY . .
|
||||
|
||||
ARG BENCH_TYPE=native
|
||||
|
||||
RUN \
|
||||
rustup update; \
|
||||
if [ "$BENCH_TYPE" = "browser" ]; then \
|
||||
# ring's build script needs clang.
|
||||
apt update && apt install -y clang; \
|
||||
rustup install nightly; \
|
||||
rustup component add rust-src --toolchain nightly; \
|
||||
cargo install wasm-pack; \
|
||||
cd crates/benches/browser/wasm; \
|
||||
wasm-pack build --release --target web; \
|
||||
cd ../../binary; \
|
||||
cargo build --release --features browser-bench; \
|
||||
else \
|
||||
cd crates/benches/binary; \
|
||||
cargo build --release; \
|
||||
fi
|
||||
|
||||
FROM debian:latest
|
||||
|
||||
ARG BENCH_TYPE=native
|
||||
|
||||
RUN apt update && apt upgrade -y && apt install -y --no-install-recommends \
|
||||
iproute2 \
|
||||
sudo
|
||||
|
||||
RUN \
|
||||
if [ "$BENCH_TYPE" = "browser" ]; then \
|
||||
# Using Chromium since Chrome for Linux is not available on ARM.
|
||||
apt install -y chromium; \
|
||||
fi
|
||||
|
||||
RUN apt clean && rm -rf /var/lib/apt/lists/*
|
||||
|
||||
COPY --from=builder \
|
||||
["/usr/src/tlsn/target/release/bench", \
|
||||
"/usr/src/tlsn/target/release/prover", \
|
||||
"/usr/src/tlsn/target/release/prover-memory", \
|
||||
"/usr/src/tlsn/target/release/verifier", \
|
||||
"/usr/src/tlsn/target/release/verifier-memory", \
|
||||
"/usr/src/tlsn/target/release/plot", \
|
||||
"/usr/local/bin/"]
|
||||
|
||||
ENV PROVER_PATH="/usr/local/bin/prover"
|
||||
ENV VERIFIER_PATH="/usr/local/bin/verifier"
|
||||
ENV PROVER_MEMORY_PATH="/usr/local/bin/prover-memory"
|
||||
ENV VERIFIER_MEMORY_PATH="/usr/local/bin/verifier-memory"
|
||||
|
||||
VOLUME [ "/benches" ]
|
||||
WORKDIR "/benches"
|
||||
CMD ["/bin/bash", "-c", "bench && bench --memory-profiling && plot /benches/metrics.csv && cat /benches/metrics.csv"]
|
||||
@@ -1,62 +0,0 @@
|
||||
use std::{env, process::Command, thread, time::Duration};
|
||||
|
||||
use tlsn_benches::{clean_up, set_up};
|
||||
|
||||
fn main() {
|
||||
let args: Vec<String> = env::args().collect();
|
||||
let is_memory_profiling = args.contains(&"--memory-profiling".to_string());
|
||||
|
||||
let (prover_path, verifier_path) = if is_memory_profiling {
|
||||
(
|
||||
std::env::var("PROVER_MEMORY_PATH")
|
||||
.unwrap_or_else(|_| "../../../target/release/prover-memory".to_string()),
|
||||
std::env::var("VERIFIER_MEMORY_PATH")
|
||||
.unwrap_or_else(|_| "../../../target/release/verifier-memory".to_string()),
|
||||
)
|
||||
} else {
|
||||
(
|
||||
std::env::var("PROVER_PATH")
|
||||
.unwrap_or_else(|_| "../../../target/release/prover".to_string()),
|
||||
std::env::var("VERIFIER_PATH")
|
||||
.unwrap_or_else(|_| "../../../target/release/verifier".to_string()),
|
||||
)
|
||||
};
|
||||
|
||||
if let Err(e) = set_up() {
|
||||
println!("Error setting up: {}", e);
|
||||
clean_up();
|
||||
}
|
||||
|
||||
// Run prover and verifier binaries in parallel.
|
||||
let Ok(mut verifier) = Command::new("ip")
|
||||
.arg("netns")
|
||||
.arg("exec")
|
||||
.arg("verifier-ns")
|
||||
.arg(verifier_path)
|
||||
.spawn()
|
||||
else {
|
||||
println!("Failed to start verifier");
|
||||
return clean_up();
|
||||
};
|
||||
|
||||
// Allow the verifier some time to start listening before the prover attempts to
|
||||
// connect.
|
||||
thread::sleep(Duration::from_secs(1));
|
||||
|
||||
let Ok(mut prover) = Command::new("ip")
|
||||
.arg("netns")
|
||||
.arg("exec")
|
||||
.arg("prover-ns")
|
||||
.arg(prover_path)
|
||||
.spawn()
|
||||
else {
|
||||
println!("Failed to start prover");
|
||||
return clean_up();
|
||||
};
|
||||
|
||||
// Wait for both to finish.
|
||||
_ = prover.wait();
|
||||
_ = verifier.wait();
|
||||
|
||||
clean_up();
|
||||
}
|
||||
@@ -1,248 +0,0 @@
|
||||
use tlsn_benches::metrics::Metrics;
|
||||
|
||||
use charming::{
|
||||
component::{
|
||||
Axis, DataView, Feature, Legend, Restore, SaveAsImage, Title, Toolbox, ToolboxDataZoom,
|
||||
},
|
||||
element::{NameLocation, Orient, Tooltip, Trigger},
|
||||
series::{Line, Scatter},
|
||||
theme::Theme,
|
||||
Chart, HtmlRenderer,
|
||||
};
|
||||
use csv::Reader;
|
||||
|
||||
const THEME: Theme = Theme::Default;
|
||||
|
||||
fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||
let csv_file = std::env::args()
|
||||
.nth(1)
|
||||
.expect("Usage: plot <path_to_csv_file>");
|
||||
|
||||
let mut rdr = Reader::from_path(csv_file)?;
|
||||
|
||||
// Prepare data for plotting.
|
||||
let all_data: Vec<Metrics> = rdr
|
||||
.deserialize::<Metrics>()
|
||||
.collect::<Result<Vec<_>, _>>()?; // Attempt to collect all results, return an error if any fail.
|
||||
|
||||
let _chart = runtime_vs_latency(&all_data)?;
|
||||
let _chart = runtime_vs_bandwidth(&all_data)?;
|
||||
|
||||
// Memory profiling is not compatible with browser benches.
|
||||
if cfg!(not(feature = "browser-bench")) {
|
||||
let _chart = download_size_vs_memory(&all_data)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn download_size_vs_memory(all_data: &[Metrics]) -> Result<Chart, Box<dyn std::error::Error>> {
|
||||
const TITLE: &str = "Download Size vs Memory";
|
||||
|
||||
let prover_kind: String = all_data
|
||||
.first()
|
||||
.map(|s| s.kind.clone().into())
|
||||
.unwrap_or_default();
|
||||
|
||||
let data: Vec<Vec<f32>> = all_data
|
||||
.iter()
|
||||
.filter(|record| record.name == "download_volume" && record.heap_max_bytes.is_some())
|
||||
.map(|record| {
|
||||
vec![
|
||||
record.download_size as f32,
|
||||
record.heap_max_bytes.unwrap() as f32 / 1024.0 / 1024.0,
|
||||
]
|
||||
})
|
||||
.collect();
|
||||
|
||||
// https://github.com/yuankunzhang/charming
|
||||
let chart = Chart::new()
|
||||
.title(
|
||||
Title::new()
|
||||
.text(TITLE)
|
||||
.subtext(format!("{} Prover", prover_kind)),
|
||||
)
|
||||
.tooltip(Tooltip::new().trigger(Trigger::Axis))
|
||||
.legend(Legend::new().orient(Orient::Vertical))
|
||||
.toolbox(
|
||||
Toolbox::new().show(true).feature(
|
||||
Feature::new()
|
||||
.save_as_image(SaveAsImage::new())
|
||||
.restore(Restore::new())
|
||||
.data_zoom(ToolboxDataZoom::new().y_axis_index("none"))
|
||||
.data_view(DataView::new().read_only(false)),
|
||||
),
|
||||
)
|
||||
.x_axis(
|
||||
Axis::new()
|
||||
.scale(true)
|
||||
.name("Download Size (bytes)")
|
||||
.name_gap(30)
|
||||
.name_location(NameLocation::Center),
|
||||
)
|
||||
.y_axis(
|
||||
Axis::new()
|
||||
.scale(true)
|
||||
.name("Heap Memory (Mbytes)")
|
||||
.name_gap(40)
|
||||
.name_location(NameLocation::Middle),
|
||||
)
|
||||
.series(
|
||||
Scatter::new()
|
||||
.name("Allocated Heap Memory")
|
||||
.symbol_size(10)
|
||||
.data(data),
|
||||
);
|
||||
|
||||
// Save the chart as HTML file.
|
||||
HtmlRenderer::new(TITLE, 1000, 800)
|
||||
.theme(THEME)
|
||||
.save(&chart, "download_size_vs_memory.html")
|
||||
.unwrap();
|
||||
|
||||
Ok(chart)
|
||||
}
|
||||
|
||||
fn runtime_vs_latency(all_data: &[Metrics]) -> Result<Chart, Box<dyn std::error::Error>> {
|
||||
const TITLE: &str = "Runtime vs Latency";
|
||||
|
||||
let prover_kind: String = all_data
|
||||
.first()
|
||||
.map(|s| s.kind.clone().into())
|
||||
.unwrap_or_default();
|
||||
|
||||
let data: Vec<Vec<f32>> = all_data
|
||||
.iter()
|
||||
.filter(|record| record.name == "latency")
|
||||
.map(|record| {
|
||||
let total_delay = record.upload_delay + record.download_delay; // Calculate the sum of upload and download delays.
|
||||
vec![total_delay as f32, record.runtime as f32]
|
||||
})
|
||||
.collect();
|
||||
|
||||
// https://github.com/yuankunzhang/charming
|
||||
let chart = Chart::new()
|
||||
.title(
|
||||
Title::new()
|
||||
.text(TITLE)
|
||||
.subtext(format!("{} Prover", prover_kind)),
|
||||
)
|
||||
.tooltip(Tooltip::new().trigger(Trigger::Axis))
|
||||
.legend(Legend::new().orient(Orient::Vertical))
|
||||
.toolbox(
|
||||
Toolbox::new().show(true).feature(
|
||||
Feature::new()
|
||||
.save_as_image(SaveAsImage::new())
|
||||
.restore(Restore::new())
|
||||
.data_zoom(ToolboxDataZoom::new().y_axis_index("none"))
|
||||
.data_view(DataView::new().read_only(false)),
|
||||
),
|
||||
)
|
||||
.x_axis(
|
||||
Axis::new()
|
||||
.scale(true)
|
||||
.name("Upload + Download Latency (ms)")
|
||||
.name_location(NameLocation::Center),
|
||||
)
|
||||
.y_axis(
|
||||
Axis::new()
|
||||
.scale(true)
|
||||
.name("Runtime (s)")
|
||||
.name_location(NameLocation::Middle),
|
||||
)
|
||||
.series(
|
||||
Scatter::new()
|
||||
.name("Combined Latency")
|
||||
.symbol_size(10)
|
||||
.data(data),
|
||||
);
|
||||
|
||||
// Save the chart as HTML file.
|
||||
HtmlRenderer::new(TITLE, 1000, 800)
|
||||
.theme(THEME)
|
||||
.save(&chart, "runtime_vs_latency.html")
|
||||
.unwrap();
|
||||
|
||||
Ok(chart)
|
||||
}
|
||||
|
||||
fn runtime_vs_bandwidth(all_data: &[Metrics]) -> Result<Chart, Box<dyn std::error::Error>> {
|
||||
const TITLE: &str = "Runtime vs Bandwidth";
|
||||
|
||||
let prover_kind: String = all_data
|
||||
.first()
|
||||
.map(|s| s.kind.clone().into())
|
||||
.unwrap_or_default();
|
||||
|
||||
let download_data: Vec<Vec<f32>> = all_data
|
||||
.iter()
|
||||
.filter(|record| record.name == "download_bandwidth")
|
||||
.map(|record| vec![record.download as f32, record.runtime as f32])
|
||||
.collect();
|
||||
let upload_deferred_data: Vec<Vec<f32>> = all_data
|
||||
.iter()
|
||||
.filter(|record| record.name == "upload_bandwidth" && record.defer_decryption)
|
||||
.map(|record| vec![record.upload as f32, record.runtime as f32])
|
||||
.collect();
|
||||
let upload_non_deferred_data: Vec<Vec<f32>> = all_data
|
||||
.iter()
|
||||
.filter(|record| record.name == "upload_bandwidth" && !record.defer_decryption)
|
||||
.map(|record| vec![record.upload as f32, record.runtime as f32])
|
||||
.collect();
|
||||
|
||||
// https://github.com/yuankunzhang/charming
|
||||
let chart = Chart::new()
|
||||
.title(
|
||||
Title::new()
|
||||
.text(TITLE)
|
||||
.subtext(format!("{} Prover", prover_kind)),
|
||||
)
|
||||
.tooltip(Tooltip::new().trigger(Trigger::Axis))
|
||||
.legend(Legend::new().orient(Orient::Vertical))
|
||||
.toolbox(
|
||||
Toolbox::new().show(true).feature(
|
||||
Feature::new()
|
||||
.save_as_image(SaveAsImage::new())
|
||||
.restore(Restore::new())
|
||||
.data_zoom(ToolboxDataZoom::new().y_axis_index("none"))
|
||||
.data_view(DataView::new().read_only(false)),
|
||||
),
|
||||
)
|
||||
.x_axis(
|
||||
Axis::new()
|
||||
.scale(true)
|
||||
.name("Bandwidth (Mbps)")
|
||||
.name_location(NameLocation::Center),
|
||||
)
|
||||
.y_axis(
|
||||
Axis::new()
|
||||
.scale(true)
|
||||
.name("Runtime (s)")
|
||||
.name_location(NameLocation::Middle),
|
||||
)
|
||||
.series(
|
||||
Line::new()
|
||||
.name("Download bandwidth")
|
||||
.symbol_size(10)
|
||||
.data(download_data),
|
||||
)
|
||||
.series(
|
||||
Line::new()
|
||||
.name("Upload bandwidth (deferred decryption)")
|
||||
.symbol_size(10)
|
||||
.data(upload_deferred_data),
|
||||
)
|
||||
.series(
|
||||
Line::new()
|
||||
.name("Upload bandwidth")
|
||||
.symbol_size(10)
|
||||
.data(upload_non_deferred_data),
|
||||
);
|
||||
// Save the chart as HTML file.
|
||||
HtmlRenderer::new(TITLE, 1000, 800)
|
||||
.theme(THEME)
|
||||
.save(&chart, "runtime_vs_bandwidth.html")
|
||||
.unwrap();
|
||||
|
||||
Ok(chart)
|
||||
}
|
||||
@@ -1,8 +0,0 @@
|
||||
//! A Prover without memory profiling.
|
||||
|
||||
use tlsn_benches::prover_main::prover_main;
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> anyhow::Result<()> {
|
||||
prover_main(false).await
|
||||
}
|
||||
@@ -1,15 +0,0 @@
|
||||
//! A Prover with memory profiling.
|
||||
|
||||
use tlsn_benches::prover_main::prover_main;
|
||||
|
||||
#[global_allocator]
|
||||
static ALLOC: dhat::Alloc = dhat::Alloc;
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> anyhow::Result<()> {
|
||||
if cfg!(feature = "browser-bench") {
|
||||
// Memory profiling is not compatible with browser benches.
|
||||
return Ok(());
|
||||
}
|
||||
prover_main(true).await
|
||||
}
|
||||
@@ -1,8 +0,0 @@
|
||||
//! A Verifier without memory profiling.
|
||||
|
||||
use tlsn_benches::verifier_main::verifier_main;
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> anyhow::Result<()> {
|
||||
verifier_main(false).await
|
||||
}
|
||||
@@ -1,15 +0,0 @@
|
||||
//! A Verifier with memory profiling.
|
||||
|
||||
use tlsn_benches::verifier_main::verifier_main;
|
||||
|
||||
#[global_allocator]
|
||||
static ALLOC: dhat::Alloc = dhat::Alloc;
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> anyhow::Result<()> {
|
||||
if cfg!(feature = "browser-bench") {
|
||||
// Memory profiling is not compatible with browser benches.
|
||||
return Ok(());
|
||||
}
|
||||
verifier_main(true).await
|
||||
}
|
||||
@@ -1,13 +0,0 @@
|
||||
# Run the TLSN benches with Docker
|
||||
|
||||
In the root folder of this repository, run:
|
||||
```
|
||||
# Change to BENCH_TYPE=browser if you want benchmarks to run in the browser.
|
||||
docker build -t tlsn-bench . -f ./crates/benches/binary/benches.Dockerfile --build-arg BENCH_TYPE=native
|
||||
```
|
||||
|
||||
Next run the benches with:
|
||||
```
|
||||
docker run -it --privileged -v ./crates/benches/binary:/benches tlsn-bench
|
||||
```
|
||||
The `--privileged` parameter is required because this test bench needs permission to create networks with certain parameters
|
||||
@@ -1,123 +0,0 @@
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
#[derive(Deserialize)]
|
||||
#[serde(untagged)]
|
||||
pub enum Field<T> {
|
||||
Single(T),
|
||||
Multiple(Vec<T>),
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
pub struct Config {
|
||||
pub benches: Vec<Bench>,
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
pub struct Bench {
|
||||
pub name: String,
|
||||
pub upload: Field<usize>,
|
||||
#[serde(rename = "upload-delay")]
|
||||
pub upload_delay: Field<usize>,
|
||||
pub download: Field<usize>,
|
||||
#[serde(rename = "download-delay")]
|
||||
pub download_delay: Field<usize>,
|
||||
#[serde(rename = "upload-size")]
|
||||
pub upload_size: Field<usize>,
|
||||
#[serde(rename = "download-size")]
|
||||
pub download_size: Field<usize>,
|
||||
#[serde(rename = "defer-decryption")]
|
||||
pub defer_decryption: Field<bool>,
|
||||
#[serde(rename = "memory-profile")]
|
||||
pub memory_profile: Field<bool>,
|
||||
}
|
||||
|
||||
impl Bench {
|
||||
/// Flattens the config into a list of instances
|
||||
pub fn flatten(self) -> Vec<BenchInstance> {
|
||||
let mut instances = vec![];
|
||||
|
||||
let upload = match self.upload {
|
||||
Field::Single(u) => vec![u],
|
||||
Field::Multiple(u) => u,
|
||||
};
|
||||
|
||||
let upload_delay = match self.upload_delay {
|
||||
Field::Single(u) => vec![u],
|
||||
Field::Multiple(u) => u,
|
||||
};
|
||||
|
||||
let download = match self.download {
|
||||
Field::Single(u) => vec![u],
|
||||
Field::Multiple(u) => u,
|
||||
};
|
||||
|
||||
let download_latency = match self.download_delay {
|
||||
Field::Single(u) => vec![u],
|
||||
Field::Multiple(u) => u,
|
||||
};
|
||||
|
||||
let upload_size = match self.upload_size {
|
||||
Field::Single(u) => vec![u],
|
||||
Field::Multiple(u) => u,
|
||||
};
|
||||
|
||||
let download_size = match self.download_size {
|
||||
Field::Single(u) => vec![u],
|
||||
Field::Multiple(u) => u,
|
||||
};
|
||||
|
||||
let defer_decryption = match self.defer_decryption {
|
||||
Field::Single(u) => vec![u],
|
||||
Field::Multiple(u) => u,
|
||||
};
|
||||
|
||||
let memory_profile = match self.memory_profile {
|
||||
Field::Single(u) => vec![u],
|
||||
Field::Multiple(u) => u,
|
||||
};
|
||||
|
||||
for u in upload {
|
||||
for ul in &upload_delay {
|
||||
for d in &download {
|
||||
for dl in &download_latency {
|
||||
for us in &upload_size {
|
||||
for ds in &download_size {
|
||||
for dd in &defer_decryption {
|
||||
for mp in &memory_profile {
|
||||
instances.push(BenchInstance {
|
||||
name: self.name.clone(),
|
||||
upload: u,
|
||||
upload_delay: *ul,
|
||||
download: *d,
|
||||
download_delay: *dl,
|
||||
upload_size: *us,
|
||||
download_size: *ds,
|
||||
defer_decryption: *dd,
|
||||
memory_profile: *mp,
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
instances
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
pub struct BenchInstance {
|
||||
pub name: String,
|
||||
pub upload: usize,
|
||||
pub upload_delay: usize,
|
||||
pub download: usize,
|
||||
pub download_delay: usize,
|
||||
pub upload_size: usize,
|
||||
pub download_size: usize,
|
||||
pub defer_decryption: bool,
|
||||
/// Whether this instance should be used for memory profiling.
|
||||
pub memory_profile: bool,
|
||||
}
|
||||
@@ -1,272 +0,0 @@
|
||||
pub mod config;
|
||||
pub mod metrics;
|
||||
pub mod prover;
|
||||
pub mod prover_main;
|
||||
pub mod verifier_main;
|
||||
|
||||
use std::{
|
||||
io,
|
||||
process::{Command, Stdio},
|
||||
};
|
||||
|
||||
pub const PROVER_NAMESPACE: &str = "prover-ns";
|
||||
pub const PROVER_INTERFACE: &str = "prover-veth";
|
||||
pub const PROVER_SUBNET: &str = "10.10.1.0/24";
|
||||
pub const VERIFIER_NAMESPACE: &str = "verifier-ns";
|
||||
pub const VERIFIER_INTERFACE: &str = "verifier-veth";
|
||||
pub const VERIFIER_SUBNET: &str = "10.10.1.1/24";
|
||||
|
||||
pub fn set_up() -> io::Result<()> {
|
||||
// Create network namespaces
|
||||
create_network_namespace(PROVER_NAMESPACE)?;
|
||||
create_network_namespace(VERIFIER_NAMESPACE)?;
|
||||
|
||||
// Create veth pair and attach to namespaces
|
||||
create_veth_pair(
|
||||
PROVER_NAMESPACE,
|
||||
PROVER_INTERFACE,
|
||||
VERIFIER_NAMESPACE,
|
||||
VERIFIER_INTERFACE,
|
||||
)?;
|
||||
|
||||
// Set devices up
|
||||
set_device_up(PROVER_NAMESPACE, PROVER_INTERFACE)?;
|
||||
set_device_up(VERIFIER_NAMESPACE, VERIFIER_INTERFACE)?;
|
||||
|
||||
// Bring up the loopback interface.
|
||||
set_device_up(PROVER_NAMESPACE, "lo")?;
|
||||
set_device_up(VERIFIER_NAMESPACE, "lo")?;
|
||||
|
||||
// Assign IPs
|
||||
assign_ip_to_interface(PROVER_NAMESPACE, PROVER_INTERFACE, PROVER_SUBNET)?;
|
||||
assign_ip_to_interface(VERIFIER_NAMESPACE, VERIFIER_INTERFACE, VERIFIER_SUBNET)?;
|
||||
|
||||
// Set default routes
|
||||
set_default_route(
|
||||
PROVER_NAMESPACE,
|
||||
PROVER_INTERFACE,
|
||||
PROVER_SUBNET.split('/').next().unwrap(),
|
||||
)?;
|
||||
set_default_route(
|
||||
VERIFIER_NAMESPACE,
|
||||
VERIFIER_INTERFACE,
|
||||
VERIFIER_SUBNET.split('/').next().unwrap(),
|
||||
)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn clean_up() {
|
||||
// Delete interface pair
|
||||
if let Err(e) = Command::new("ip")
|
||||
.args([
|
||||
"netns",
|
||||
"exec",
|
||||
PROVER_NAMESPACE,
|
||||
"ip",
|
||||
"link",
|
||||
"delete",
|
||||
PROVER_INTERFACE,
|
||||
])
|
||||
.status()
|
||||
{
|
||||
println!("Error deleting interface {}: {}", PROVER_INTERFACE, e);
|
||||
}
|
||||
|
||||
// Delete namespaces
|
||||
if let Err(e) = Command::new("ip")
|
||||
.args(["netns", "del", PROVER_NAMESPACE])
|
||||
.status()
|
||||
{
|
||||
println!("Error deleting namespace {}: {}", PROVER_NAMESPACE, e);
|
||||
}
|
||||
|
||||
if let Err(e) = Command::new("ip")
|
||||
.args(["netns", "del", VERIFIER_NAMESPACE])
|
||||
.status()
|
||||
{
|
||||
println!("Error deleting namespace {}: {}", VERIFIER_NAMESPACE, e);
|
||||
}
|
||||
}
|
||||
|
||||
/// Sets the interface parameters.
|
||||
///
|
||||
/// Must be run in the correct namespace.
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `egress` - The egress bandwidth in mbps.
|
||||
/// * `burst` - The burst in mbps.
|
||||
/// * `delay` - The delay in ms.
|
||||
pub fn set_interface(interface: &str, egress: usize, burst: usize, delay: usize) -> io::Result<()> {
|
||||
// Clear rules
|
||||
let output = Command::new("tc")
|
||||
.arg("qdisc")
|
||||
.arg("del")
|
||||
.arg("dev")
|
||||
.arg(interface)
|
||||
.arg("root")
|
||||
.stdout(Stdio::piped())
|
||||
.output()?;
|
||||
|
||||
if output.stderr == "Error: Cannot delete qdisc with handle of zero.\n".as_bytes() {
|
||||
// This error is informative, do not log it to stderr.
|
||||
} else if !output.status.success() {
|
||||
return Err(io::Error::other("Failed to clear rules"));
|
||||
}
|
||||
|
||||
// Egress
|
||||
Command::new("tc")
|
||||
.arg("qdisc")
|
||||
.arg("add")
|
||||
.arg("dev")
|
||||
.arg(interface)
|
||||
.arg("root")
|
||||
.arg("handle")
|
||||
.arg("1:")
|
||||
.arg("tbf")
|
||||
.arg("rate")
|
||||
.arg(format!("{}mbit", egress))
|
||||
.arg("burst")
|
||||
.arg(format!("{}mbit", burst))
|
||||
.arg("latency")
|
||||
.arg("60s")
|
||||
.status()?;
|
||||
|
||||
// Delay
|
||||
Command::new("tc")
|
||||
.arg("qdisc")
|
||||
.arg("add")
|
||||
.arg("dev")
|
||||
.arg(interface)
|
||||
.arg("parent")
|
||||
.arg("1:1")
|
||||
.arg("handle")
|
||||
.arg("10:")
|
||||
.arg("netem")
|
||||
.arg("delay")
|
||||
.arg(format!("{}ms", delay))
|
||||
.status()?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Create a network namespace with the given name if it does not already exist.
|
||||
fn create_network_namespace(name: &str) -> io::Result<()> {
|
||||
// Check if namespace already exists
|
||||
if Command::new("ip")
|
||||
.args(["netns", "list"])
|
||||
.output()?
|
||||
.stdout
|
||||
.windows(name.len())
|
||||
.any(|ns| ns == name.as_bytes())
|
||||
{
|
||||
println!("Namespace {} already exists", name);
|
||||
return Ok(());
|
||||
} else {
|
||||
println!("Creating namespace {}", name);
|
||||
Command::new("ip").args(["netns", "add", name]).status()?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn create_veth_pair(
|
||||
left_namespace: &str,
|
||||
left_interface: &str,
|
||||
right_namespace: &str,
|
||||
right_interface: &str,
|
||||
) -> io::Result<()> {
|
||||
// Check if interfaces are already present in namespaces
|
||||
if is_interface_present_in_namespace(left_namespace, left_interface)?
|
||||
|| is_interface_present_in_namespace(right_namespace, right_interface)?
|
||||
{
|
||||
println!("Virtual interface already exists.");
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// Create veth pair
|
||||
Command::new("ip")
|
||||
.args([
|
||||
"link",
|
||||
"add",
|
||||
left_interface,
|
||||
"type",
|
||||
"veth",
|
||||
"peer",
|
||||
"name",
|
||||
right_interface,
|
||||
])
|
||||
.status()?;
|
||||
|
||||
println!(
|
||||
"Created veth pair {} and {}",
|
||||
left_interface, right_interface
|
||||
);
|
||||
|
||||
// Attach veth pair to namespaces
|
||||
attach_interface_to_namespace(left_namespace, left_interface)?;
|
||||
attach_interface_to_namespace(right_namespace, right_interface)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn attach_interface_to_namespace(namespace: &str, interface: &str) -> io::Result<()> {
|
||||
Command::new("ip")
|
||||
.args(["link", "set", interface, "netns", namespace])
|
||||
.status()?;
|
||||
|
||||
println!("Attached {} to namespace {}", interface, namespace);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn set_default_route(namespace: &str, interface: &str, ip: &str) -> io::Result<()> {
|
||||
Command::new("ip")
|
||||
.args([
|
||||
"netns", "exec", namespace, "ip", "route", "add", "default", "via", ip, "dev",
|
||||
interface,
|
||||
])
|
||||
.status()?;
|
||||
|
||||
println!(
|
||||
"Set default route for namespace {} ip {} to {}",
|
||||
namespace, ip, interface
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn is_interface_present_in_namespace(
|
||||
namespace: &str,
|
||||
interface: &str,
|
||||
) -> Result<bool, std::io::Error> {
|
||||
Ok(Command::new("ip")
|
||||
.args([
|
||||
"netns", "exec", namespace, "ip", "link", "list", "dev", interface,
|
||||
])
|
||||
.output()?
|
||||
.stdout
|
||||
.windows(interface.len())
|
||||
.any(|ns| ns == interface.as_bytes()))
|
||||
}
|
||||
|
||||
fn set_device_up(namespace: &str, interface: &str) -> io::Result<()> {
|
||||
Command::new("ip")
|
||||
.args([
|
||||
"netns", "exec", namespace, "ip", "link", "set", interface, "up",
|
||||
])
|
||||
.status()?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn assign_ip_to_interface(namespace: &str, interface: &str, ip: &str) -> io::Result<()> {
|
||||
Command::new("ip")
|
||||
.args([
|
||||
"netns", "exec", namespace, "ip", "addr", "add", ip, "dev", interface,
|
||||
])
|
||||
.status()?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -1,31 +0,0 @@
|
||||
use serde::{Deserialize, Serialize};
|
||||
use tlsn_benches_library::ProverKind;
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct Metrics {
|
||||
pub name: String,
|
||||
/// The kind of the prover, either native or browser.
|
||||
pub kind: ProverKind,
|
||||
/// Upload bandwidth in Mbps.
|
||||
pub upload: usize,
|
||||
/// Upload latency in ms.
|
||||
pub upload_delay: usize,
|
||||
/// Download bandwidth in Mbps.
|
||||
pub download: usize,
|
||||
/// Download latency in ms.
|
||||
pub download_delay: usize,
|
||||
/// Total bytes sent to the server.
|
||||
pub upload_size: usize,
|
||||
/// Total bytes received from the server.
|
||||
pub download_size: usize,
|
||||
/// Whether deferred decryption was used.
|
||||
pub defer_decryption: bool,
|
||||
/// The total runtime of the benchmark in seconds.
|
||||
pub runtime: u64,
|
||||
/// The total amount of data uploaded to the verifier in bytes.
|
||||
pub uploaded: u64,
|
||||
/// The total amount of data downloaded from the verifier in bytes.
|
||||
pub downloaded: u64,
|
||||
/// The peak heap memory usage in bytes.
|
||||
pub heap_max_bytes: Option<usize>,
|
||||
}
|
||||
@@ -1,57 +0,0 @@
|
||||
use std::time::Instant;
|
||||
|
||||
use tlsn_benches_library::{run_prover, AsyncIo, ProverKind, ProverTrait};
|
||||
|
||||
use async_trait::async_trait;
|
||||
|
||||
pub struct NativeProver {
|
||||
upload_size: usize,
|
||||
download_size: usize,
|
||||
defer_decryption: bool,
|
||||
io: Option<Box<dyn AsyncIo>>,
|
||||
client_conn: Option<Box<dyn AsyncIo>>,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl ProverTrait for NativeProver {
|
||||
async fn setup(
|
||||
upload_size: usize,
|
||||
download_size: usize,
|
||||
defer_decryption: bool,
|
||||
io: Box<dyn AsyncIo>,
|
||||
client_conn: Box<dyn AsyncIo>,
|
||||
) -> anyhow::Result<Self>
|
||||
where
|
||||
Self: Sized,
|
||||
{
|
||||
Ok(Self {
|
||||
upload_size,
|
||||
download_size,
|
||||
defer_decryption,
|
||||
io: Some(io),
|
||||
client_conn: Some(client_conn),
|
||||
})
|
||||
}
|
||||
|
||||
async fn run(&mut self) -> anyhow::Result<u64> {
|
||||
let io = std::mem::take(&mut self.io).unwrap();
|
||||
let client_conn = std::mem::take(&mut self.client_conn).unwrap();
|
||||
|
||||
let start_time = Instant::now();
|
||||
|
||||
run_prover(
|
||||
self.upload_size,
|
||||
self.download_size,
|
||||
self.defer_decryption,
|
||||
io,
|
||||
client_conn,
|
||||
)
|
||||
.await?;
|
||||
|
||||
Ok(Instant::now().duration_since(start_time).as_secs())
|
||||
}
|
||||
|
||||
fn kind(&self) -> ProverKind {
|
||||
ProverKind::Native
|
||||
}
|
||||
}
|
||||
@@ -1,171 +0,0 @@
|
||||
//! Contains the actual main() function of the prover binary. It is moved here
|
||||
//! in order to enable cargo to build two prover binaries - with and without
|
||||
//! memory profiling.
|
||||
|
||||
use std::{
|
||||
fs::metadata,
|
||||
io::Write,
|
||||
sync::{
|
||||
atomic::{AtomicU64, Ordering},
|
||||
Arc,
|
||||
},
|
||||
};
|
||||
|
||||
use crate::{
|
||||
config::{BenchInstance, Config},
|
||||
metrics::Metrics,
|
||||
set_interface, PROVER_INTERFACE,
|
||||
};
|
||||
use anyhow::Context;
|
||||
use tlsn_benches_library::{AsyncIo, ProverTrait};
|
||||
use tlsn_server_fixture::bind;
|
||||
|
||||
use csv::WriterBuilder;
|
||||
|
||||
use tokio_util::{
|
||||
compat::TokioAsyncReadCompatExt,
|
||||
io::{InspectReader, InspectWriter},
|
||||
};
|
||||
use tracing_subscriber::{fmt::format::FmtSpan, EnvFilter};
|
||||
|
||||
#[cfg(not(feature = "browser-bench"))]
|
||||
use crate::prover::NativeProver as BenchProver;
|
||||
#[cfg(feature = "browser-bench")]
|
||||
use tlsn_benches_browser_native::BrowserProver as BenchProver;
|
||||
|
||||
pub async fn prover_main(is_memory_profiling: bool) -> anyhow::Result<()> {
|
||||
let config_path = std::env::var("CFG").unwrap_or_else(|_| "bench.toml".to_string());
|
||||
let config: Config = toml::from_str(
|
||||
&std::fs::read_to_string(config_path).context("failed to read config file")?,
|
||||
)
|
||||
.context("failed to parse config")?;
|
||||
|
||||
tracing_subscriber::fmt()
|
||||
.with_env_filter(EnvFilter::from_default_env())
|
||||
.with_span_events(FmtSpan::NEW | FmtSpan::CLOSE)
|
||||
.init();
|
||||
|
||||
let ip = std::env::var("VERIFIER_IP").unwrap_or_else(|_| "10.10.1.1".to_string());
|
||||
let port: u16 = std::env::var("VERIFIER_PORT")
|
||||
.map(|port| port.parse().expect("port is valid u16"))
|
||||
.unwrap_or(8000);
|
||||
let verifier_host = (ip.as_str(), port);
|
||||
|
||||
let mut file = std::fs::OpenOptions::new()
|
||||
.create(true)
|
||||
.append(true)
|
||||
.open("metrics.csv")
|
||||
.context("failed to open metrics file")?;
|
||||
|
||||
{
|
||||
let mut metric_wrt = WriterBuilder::new()
|
||||
// If file is not empty, assume that the CSV header is already present in the file.
|
||||
.has_headers(metadata("metrics.csv")?.len() == 0)
|
||||
.from_writer(&mut file);
|
||||
for bench in config.benches {
|
||||
let instances = bench.flatten();
|
||||
for instance in instances {
|
||||
if is_memory_profiling && !instance.memory_profile {
|
||||
continue;
|
||||
}
|
||||
|
||||
println!("{:?}", &instance);
|
||||
|
||||
let io = tokio::net::TcpStream::connect(verifier_host)
|
||||
.await
|
||||
.context("failed to open tcp connection")?;
|
||||
metric_wrt.serialize(
|
||||
run_instance(instance, io, is_memory_profiling)
|
||||
.await
|
||||
.context("failed to run instance")?,
|
||||
)?;
|
||||
metric_wrt.flush()?;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
file.flush()?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn run_instance(
|
||||
instance: BenchInstance,
|
||||
io: impl AsyncIo,
|
||||
is_memory_profiling: bool,
|
||||
) -> anyhow::Result<Metrics> {
|
||||
let uploaded = Arc::new(AtomicU64::new(0));
|
||||
let downloaded = Arc::new(AtomicU64::new(0));
|
||||
let io = InspectWriter::new(
|
||||
InspectReader::new(io, {
|
||||
let downloaded = downloaded.clone();
|
||||
move |data| {
|
||||
downloaded.fetch_add(data.len() as u64, Ordering::Relaxed);
|
||||
}
|
||||
}),
|
||||
{
|
||||
let uploaded = uploaded.clone();
|
||||
move |data| {
|
||||
uploaded.fetch_add(data.len() as u64, Ordering::Relaxed);
|
||||
}
|
||||
},
|
||||
);
|
||||
|
||||
let BenchInstance {
|
||||
name,
|
||||
upload,
|
||||
upload_delay,
|
||||
download,
|
||||
download_delay,
|
||||
upload_size,
|
||||
download_size,
|
||||
defer_decryption,
|
||||
memory_profile,
|
||||
} = instance.clone();
|
||||
|
||||
set_interface(PROVER_INTERFACE, upload, 1, upload_delay)?;
|
||||
|
||||
let _profiler = if is_memory_profiling {
|
||||
assert!(memory_profile, "Instance doesn't have `memory_profile` set");
|
||||
// Build a testing profiler as it won't output to stderr.
|
||||
Some(dhat::Profiler::builder().testing().build())
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let (client_conn, server_conn) = tokio::io::duplex(1 << 16);
|
||||
tokio::spawn(bind(server_conn.compat()));
|
||||
|
||||
let mut prover = BenchProver::setup(
|
||||
upload_size,
|
||||
download_size,
|
||||
defer_decryption,
|
||||
Box::new(io),
|
||||
Box::new(client_conn),
|
||||
)
|
||||
.await?;
|
||||
|
||||
let runtime = prover.run().await?;
|
||||
|
||||
let heap_max_bytes = if is_memory_profiling {
|
||||
Some(dhat::HeapStats::get().max_bytes)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
Ok(Metrics {
|
||||
name,
|
||||
kind: prover.kind(),
|
||||
upload,
|
||||
upload_delay,
|
||||
download,
|
||||
download_delay,
|
||||
upload_size,
|
||||
download_size,
|
||||
defer_decryption,
|
||||
runtime,
|
||||
uploaded: uploaded.load(Ordering::SeqCst),
|
||||
downloaded: downloaded.load(Ordering::SeqCst),
|
||||
heap_max_bytes,
|
||||
})
|
||||
}
|
||||
@@ -1,128 +0,0 @@
|
||||
//! Contains the actual main() function of the verifier binary. It is moved here
|
||||
//! in order to enable cargo to build two verifier binaries - with and without
|
||||
//! memory profiling.
|
||||
|
||||
use crate::{
|
||||
config::{BenchInstance, Config},
|
||||
set_interface, VERIFIER_INTERFACE,
|
||||
};
|
||||
use tls_core::verify::WebPkiVerifier;
|
||||
use tlsn_common::config::ProtocolConfigValidator;
|
||||
use tlsn_core::{CryptoProvider, VerifyConfig};
|
||||
use tlsn_server_fixture_certs::CA_CERT_DER;
|
||||
use tlsn_verifier::{Verifier, VerifierConfig};
|
||||
|
||||
use anyhow::Context;
|
||||
use tokio::io::{AsyncRead, AsyncWrite};
|
||||
use tokio_util::compat::TokioAsyncReadCompatExt;
|
||||
use tracing_subscriber::{fmt::format::FmtSpan, EnvFilter};
|
||||
|
||||
pub async fn verifier_main(is_memory_profiling: bool) -> anyhow::Result<()> {
|
||||
let config_path = std::env::var("CFG").unwrap_or_else(|_| "bench.toml".to_string());
|
||||
let config: Config = toml::from_str(
|
||||
&std::fs::read_to_string(config_path).context("failed to read config file")?,
|
||||
)
|
||||
.context("failed to parse config")?;
|
||||
|
||||
tracing_subscriber::fmt()
|
||||
.with_env_filter(EnvFilter::from_default_env())
|
||||
.with_span_events(FmtSpan::NEW | FmtSpan::CLOSE)
|
||||
.init();
|
||||
|
||||
let ip = std::env::var("VERIFIER_IP").unwrap_or_else(|_| "10.10.1.1".to_string());
|
||||
let port: u16 = std::env::var("VERIFIER_PORT")
|
||||
.map(|port| port.parse().expect("port is valid u16"))
|
||||
.unwrap_or(8000);
|
||||
let host = (ip.as_str(), port);
|
||||
|
||||
let listener = tokio::net::TcpListener::bind(host)
|
||||
.await
|
||||
.context("failed to bind to port")?;
|
||||
|
||||
for bench in config.benches {
|
||||
for instance in bench.flatten() {
|
||||
if is_memory_profiling && !instance.memory_profile {
|
||||
continue;
|
||||
}
|
||||
|
||||
let (io, _) = listener
|
||||
.accept()
|
||||
.await
|
||||
.context("failed to accept connection")?;
|
||||
run_instance(instance, io, is_memory_profiling)
|
||||
.await
|
||||
.context("failed to run instance")?;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn run_instance<S: AsyncWrite + AsyncRead + Send + Sync + Unpin + 'static>(
|
||||
instance: BenchInstance,
|
||||
io: S,
|
||||
is_memory_profiling: bool,
|
||||
) -> anyhow::Result<()> {
|
||||
let BenchInstance {
|
||||
download,
|
||||
download_delay,
|
||||
upload_size,
|
||||
download_size,
|
||||
memory_profile,
|
||||
..
|
||||
} = instance;
|
||||
|
||||
set_interface(VERIFIER_INTERFACE, download, 1, download_delay)?;
|
||||
|
||||
let _profiler = if is_memory_profiling {
|
||||
assert!(memory_profile, "Instance doesn't have `memory_profile` set");
|
||||
// Build a testing profiler as it won't output to stderr.
|
||||
Some(dhat::Profiler::builder().testing().build())
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let provider = CryptoProvider {
|
||||
cert: cert_verifier(),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let config_validator = ProtocolConfigValidator::builder()
|
||||
.max_sent_data(upload_size + 256)
|
||||
.max_recv_data(download_size + 256)
|
||||
.build()
|
||||
.unwrap();
|
||||
|
||||
let verifier = Verifier::new(
|
||||
VerifierConfig::builder()
|
||||
.protocol_config_validator(config_validator)
|
||||
.crypto_provider(provider)
|
||||
.build()?,
|
||||
);
|
||||
|
||||
verifier
|
||||
.verify(io.compat(), &VerifyConfig::default())
|
||||
.await?;
|
||||
|
||||
println!("verifier done");
|
||||
|
||||
if is_memory_profiling {
|
||||
// XXX: we may want to profile the Verifier's memory usage at a future
|
||||
// point.
|
||||
// println!(
|
||||
// "verifier peak heap memory usage: {}",
|
||||
// dhat::HeapStats::get().max_bytes
|
||||
// );
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn cert_verifier() -> WebPkiVerifier {
|
||||
let mut root_store = tls_core::anchors::RootCertStore::empty();
|
||||
root_store
|
||||
.add(&tls_core::key::Certificate(CA_CERT_DER.to_vec()))
|
||||
.unwrap();
|
||||
|
||||
WebPkiVerifier::new(root_store, None)
|
||||
}
|
||||
@@ -1,16 +0,0 @@
|
||||
[package]
|
||||
edition = "2021"
|
||||
name = "tlsn-benches-browser-core"
|
||||
publish = false
|
||||
version = "0.0.0"
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
[dependencies]
|
||||
tlsn-benches-library = { workspace = true }
|
||||
|
||||
serio = { workspace = true }
|
||||
|
||||
serde = { workspace = true }
|
||||
tokio-util= { workspace = true, features = ["compat", "io-util"] }
|
||||
@@ -1,68 +0,0 @@
|
||||
//! Contains core types shared by the native and the wasm components.
|
||||
|
||||
use std::{
|
||||
io::Error,
|
||||
pin::Pin,
|
||||
task::{Context, Poll},
|
||||
};
|
||||
|
||||
use tlsn_benches_library::AsyncIo;
|
||||
|
||||
use serio::{
|
||||
codec::{Bincode, Framed},
|
||||
Sink, Stream,
|
||||
};
|
||||
use tokio_util::codec::LengthDelimitedCodec;
|
||||
|
||||
pub mod msg;
|
||||
|
||||
/// A sink/stream for serializable types with a framed transport.
|
||||
pub struct FramedIo {
|
||||
inner:
|
||||
serio::Framed<tokio_util::codec::Framed<Box<dyn AsyncIo>, LengthDelimitedCodec>, Bincode>,
|
||||
}
|
||||
|
||||
impl FramedIo {
|
||||
/// Creates a new `FramedIo` from the given async `io`.
|
||||
#[allow(clippy::default_constructed_unit_structs)]
|
||||
pub fn new(io: Box<dyn AsyncIo>) -> Self {
|
||||
let io = LengthDelimitedCodec::builder().new_framed(io);
|
||||
Self {
|
||||
inner: Framed::new(io, Bincode::default()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Sink for FramedIo {
|
||||
type Error = Error;
|
||||
|
||||
fn poll_ready(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
|
||||
Pin::new(&mut self.inner).poll_ready(cx)
|
||||
}
|
||||
|
||||
fn poll_close(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
|
||||
Pin::new(&mut self.inner).poll_close(cx)
|
||||
}
|
||||
|
||||
fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
|
||||
Pin::new(&mut self.inner).poll_flush(cx)
|
||||
}
|
||||
|
||||
fn start_send<Item: serio::Serialize>(
|
||||
mut self: Pin<&mut Self>,
|
||||
item: Item,
|
||||
) -> std::result::Result<(), Self::Error> {
|
||||
Pin::new(&mut self.inner).start_send(item)
|
||||
}
|
||||
}
|
||||
|
||||
impl Stream for FramedIo {
|
||||
type Error = Error;
|
||||
|
||||
fn poll_next<Item: serio::Deserialize>(
|
||||
mut self: Pin<&mut Self>,
|
||||
cx: &mut Context<'_>,
|
||||
) -> Poll<Option<Result<Item, Error>>> {
|
||||
Pin::new(&mut self.inner).poll_next(cx)
|
||||
}
|
||||
}
|
||||
@@ -1,17 +0,0 @@
|
||||
//! Messages exchanged by the native and the wasm components of the browser
|
||||
//! prover.
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
#[derive(Serialize, Deserialize, PartialEq)]
|
||||
/// The config sent to the wasm component.
|
||||
pub struct Config {
|
||||
pub upload_size: usize,
|
||||
pub download_size: usize,
|
||||
pub defer_decryption: bool,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, PartialEq)]
|
||||
/// Sent by the wasm component when proving process is finished. Contains total
|
||||
/// runtime in seconds.
|
||||
pub struct Runtime(pub u64);
|
||||
@@ -1,25 +0,0 @@
|
||||
[package]
|
||||
edition = "2021"
|
||||
name = "tlsn-benches-browser-native"
|
||||
publish = false
|
||||
version = "0.0.0"
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
[dependencies]
|
||||
tlsn-benches-browser-core = { workspace = true }
|
||||
tlsn-benches-library = { workspace = true }
|
||||
|
||||
serio = { workspace = true }
|
||||
websocket-relay = { workspace = true }
|
||||
|
||||
anyhow = { workspace = true }
|
||||
async-trait = { workspace = true }
|
||||
chromiumoxide = { version = "0.6.0" , features = ["tokio-runtime"] }
|
||||
futures = { workspace = true }
|
||||
rust-embed = "8.5.0"
|
||||
tokio = { workspace = true, features = ["rt", "io-std"] }
|
||||
tracing = { workspace = true }
|
||||
warp = "0.3.7"
|
||||
warp-embed = "0.5.0"
|
||||
@@ -1,336 +0,0 @@
|
||||
//! Contains the native component of the browser prover.
|
||||
//!
|
||||
//! Conceptually the browser prover consists of the native and the wasm
|
||||
//! components. The native component is responsible for starting the browser,
|
||||
//! loading the wasm component and driving it.
|
||||
|
||||
use std::{env, net::IpAddr, time::Duration};
|
||||
|
||||
use anyhow::{anyhow, Context, Result};
|
||||
use async_trait::async_trait;
|
||||
use chromiumoxide::{
|
||||
cdp::{
|
||||
browser_protocol::log::{EventEntryAdded, LogEntryLevel},
|
||||
js_protocol::runtime::EventExceptionThrown,
|
||||
},
|
||||
Browser, BrowserConfig, Page,
|
||||
};
|
||||
use futures::{Future, FutureExt, StreamExt};
|
||||
use rust_embed::RustEmbed;
|
||||
use serio::{stream::IoStreamExt, SinkExt as _};
|
||||
use tokio::{io, io::AsyncWriteExt, net::TcpListener, task::JoinHandle};
|
||||
use tracing::{debug, error, info};
|
||||
use warp::Filter;
|
||||
|
||||
use tlsn_benches_browser_core::{
|
||||
msg::{Config, Runtime},
|
||||
FramedIo,
|
||||
};
|
||||
use tlsn_benches_library::{AsyncIo, ProverKind, ProverTrait};
|
||||
|
||||
/// The IP on which the wasm component is served.
|
||||
pub static DEFAULT_WASM_IP: &str = "127.0.0.1";
|
||||
/// The IP of the websocket relay.
|
||||
pub static DEFAULT_WS_IP: &str = "127.0.0.1";
|
||||
|
||||
/// The port on which the wasm component is served.
|
||||
pub static DEFAULT_WASM_PORT: u16 = 9001;
|
||||
/// The port of the websocket relay.
|
||||
pub static DEFAULT_WS_PORT: u16 = 9002;
|
||||
/// The port for the wasm component to communicate with the TLS server.
|
||||
pub static DEFAULT_WASM_TO_SERVER_PORT: u16 = 9003;
|
||||
/// The port for the wasm component to communicate with the verifier.
|
||||
pub static DEFAULT_WASM_TO_VERIFIER_PORT: u16 = 9004;
|
||||
/// The port for the wasm component to communicate with the native component.
|
||||
pub static DEFAULT_WASM_TO_NATIVE_PORT: u16 = 9005;
|
||||
|
||||
// The `pkg` dir will be embedded into the binary at compile-time.
|
||||
#[derive(RustEmbed)]
|
||||
#[folder = "../wasm/pkg"]
|
||||
struct Data;
|
||||
|
||||
/// The native component of the prover which runs in the browser.
|
||||
pub struct BrowserProver {
|
||||
/// Io for communication with the wasm component.
|
||||
wasm_io: FramedIo,
|
||||
/// The browser spawned by the prover.
|
||||
browser: Browser,
|
||||
/// A handle to the http server.
|
||||
http_server: JoinHandle<()>,
|
||||
/// Handles to the relays.
|
||||
relays: Vec<JoinHandle<Result<(), anyhow::Error>>>,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl ProverTrait for BrowserProver {
|
||||
async fn setup(
|
||||
upload_size: usize,
|
||||
download_size: usize,
|
||||
defer_decryption: bool,
|
||||
verifier_io: Box<dyn AsyncIo>,
|
||||
server_io: Box<dyn AsyncIo>,
|
||||
) -> anyhow::Result<Self>
|
||||
where
|
||||
Self: Sized,
|
||||
{
|
||||
let wasm_port: u16 = env::var("WASM_PORT")
|
||||
.map(|port| port.parse().expect("port should be valid integer"))
|
||||
.unwrap_or(DEFAULT_WASM_PORT);
|
||||
let ws_port: u16 = env::var("WS_PORT")
|
||||
.map(|port| port.parse().expect("port should be valid integer"))
|
||||
.unwrap_or(DEFAULT_WS_PORT);
|
||||
let wasm_to_server_port: u16 = env::var("WASM_TO_SERVER_PORT")
|
||||
.map(|port| port.parse().expect("port should be valid integer"))
|
||||
.unwrap_or(DEFAULT_WASM_TO_SERVER_PORT);
|
||||
let wasm_to_verifier_port: u16 = env::var("WASM_TO_VERIFIER_PORT")
|
||||
.map(|port| port.parse().expect("port should be valid integer"))
|
||||
.unwrap_or(DEFAULT_WASM_TO_VERIFIER_PORT);
|
||||
let wasm_to_native_port: u16 = env::var("WASM_TO_NATIVE_PORT")
|
||||
.map(|port| port.parse().expect("port should be valid integer"))
|
||||
.unwrap_or(DEFAULT_WASM_TO_NATIVE_PORT);
|
||||
|
||||
let wasm_ip: IpAddr = env::var("WASM_IP")
|
||||
.map(|addr| addr.parse().expect("should be valid IP address"))
|
||||
.unwrap_or(IpAddr::V4(DEFAULT_WASM_IP.parse().unwrap()));
|
||||
let ws_ip: IpAddr = env::var("WS_IP")
|
||||
.map(|addr| addr.parse().expect("should be valid IP address"))
|
||||
.unwrap_or(IpAddr::V4(DEFAULT_WS_IP.parse().unwrap()));
|
||||
|
||||
let mut relays = Vec::with_capacity(4);
|
||||
|
||||
relays.push(spawn_websocket_relay(ws_ip, ws_port).await?);
|
||||
|
||||
// Create a framed connection to the wasm component.
|
||||
let (wasm_left, wasm_right) = tokio::io::duplex(1 << 16);
|
||||
|
||||
relays.push(spawn_port_relay(wasm_to_native_port, Box::new(wasm_right)).await?);
|
||||
let mut wasm_io = FramedIo::new(Box::new(wasm_left));
|
||||
|
||||
let http_server = spawn_http_server(wasm_ip, wasm_port)?;
|
||||
|
||||
// Relay data from the wasm component to the server.
|
||||
relays.push(spawn_port_relay(wasm_to_server_port, server_io).await?);
|
||||
|
||||
// Relay data from the wasm component to the verifier.
|
||||
relays.push(spawn_port_relay(wasm_to_verifier_port, verifier_io).await?);
|
||||
|
||||
info!("spawning browser");
|
||||
|
||||
// Note that the browser must be spawned only when the WebSocket relay is
|
||||
// running.
|
||||
let browser = spawn_browser(
|
||||
wasm_ip,
|
||||
ws_ip,
|
||||
wasm_port,
|
||||
ws_port,
|
||||
wasm_to_server_port,
|
||||
wasm_to_verifier_port,
|
||||
wasm_to_native_port,
|
||||
)
|
||||
.await?;
|
||||
|
||||
// Without this sleep, it was observed that `wasm_io.send(Config)`
|
||||
// msg does not reach the browser component.
|
||||
tokio::time::sleep(Duration::from_secs(2)).await;
|
||||
|
||||
info!("sending config to the browser component");
|
||||
|
||||
wasm_io
|
||||
.send(Config {
|
||||
upload_size,
|
||||
download_size,
|
||||
defer_decryption,
|
||||
})
|
||||
.await?;
|
||||
|
||||
Ok(Self {
|
||||
wasm_io,
|
||||
browser,
|
||||
http_server,
|
||||
relays,
|
||||
})
|
||||
}
|
||||
|
||||
async fn run(&mut self) -> anyhow::Result<u64> {
|
||||
let runtime: Runtime = self.wasm_io.expect_next().await.unwrap();
|
||||
|
||||
_ = self.clean_up().await?;
|
||||
|
||||
Ok(runtime.0)
|
||||
}
|
||||
|
||||
fn kind(&self) -> ProverKind {
|
||||
ProverKind::Browser
|
||||
}
|
||||
}
|
||||
|
||||
impl BrowserProver {
|
||||
async fn clean_up(&mut self) -> anyhow::Result<()> {
|
||||
// Kill the http server.
|
||||
self.http_server.abort();
|
||||
|
||||
// Kill all relays.
|
||||
let _ = self
|
||||
.relays
|
||||
.iter_mut()
|
||||
.map(|task| task.abort())
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
// Close the browser.
|
||||
self.browser.close().await?;
|
||||
self.browser.wait().await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn spawn_websocket_relay(
|
||||
ip: IpAddr,
|
||||
port: u16,
|
||||
) -> anyhow::Result<JoinHandle<Result<(), anyhow::Error>>> {
|
||||
let listener = TcpListener::bind((ip, port)).await?;
|
||||
Ok(tokio::spawn(websocket_relay::run(listener)))
|
||||
}
|
||||
|
||||
/// Binds to the given localhost `port`, accepts a connection and relays data
|
||||
/// between the connection and the `channel`.
|
||||
pub async fn spawn_port_relay(
|
||||
port: u16,
|
||||
channel: Box<dyn AsyncIo>,
|
||||
) -> anyhow::Result<JoinHandle<Result<(), anyhow::Error>>> {
|
||||
let listener = tokio::net::TcpListener::bind(("127.0.0.1", port))
|
||||
.await
|
||||
.context("failed to bind to port")?;
|
||||
|
||||
let handle = tokio::spawn(async move {
|
||||
let (tcp, _) = listener
|
||||
.accept()
|
||||
.await
|
||||
.context("failed to accept a connection")
|
||||
.unwrap();
|
||||
|
||||
relay_data(Box::new(tcp), channel).await
|
||||
});
|
||||
|
||||
Ok(handle)
|
||||
}
|
||||
|
||||
/// Relays data between two sources.
|
||||
pub async fn relay_data(left: Box<dyn AsyncIo>, right: Box<dyn AsyncIo>) -> Result<()> {
|
||||
let (mut left_read, mut left_write) = io::split(left);
|
||||
let (mut right_read, mut right_write) = io::split(right);
|
||||
|
||||
let left_to_right = async {
|
||||
io::copy(&mut left_read, &mut right_write).await?;
|
||||
right_write.shutdown().await
|
||||
};
|
||||
|
||||
let right_to_left = async {
|
||||
io::copy(&mut right_read, &mut left_write).await?;
|
||||
left_write.shutdown().await
|
||||
};
|
||||
|
||||
tokio::try_join!(left_to_right, right_to_left)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Spawns the browser and starts the wasm component.
|
||||
async fn spawn_browser(
|
||||
wasm_ip: IpAddr,
|
||||
ws_ip: IpAddr,
|
||||
wasm_port: u16,
|
||||
ws_port: u16,
|
||||
wasm_to_server_port: u16,
|
||||
wasm_to_verifier_port: u16,
|
||||
wasm_to_native_port: u16,
|
||||
) -> anyhow::Result<Browser> {
|
||||
// Chrome requires --no-sandbox when running as root.
|
||||
let config = BrowserConfig::builder()
|
||||
.no_sandbox()
|
||||
.incognito()
|
||||
.build()
|
||||
.map_err(|s| anyhow!(s))?;
|
||||
|
||||
debug!("launching chromedriver");
|
||||
|
||||
let (browser, mut handler) = Browser::launch(config).await?;
|
||||
|
||||
debug!("chromedriver started");
|
||||
|
||||
tokio::spawn(async move {
|
||||
while let Some(res) = handler.next().await {
|
||||
res.unwrap();
|
||||
}
|
||||
});
|
||||
|
||||
let page = browser
|
||||
.new_page(&format!("http://{}:{}/index.html", wasm_ip, wasm_port))
|
||||
.await?;
|
||||
|
||||
tokio::spawn(register_listeners(&page).await?);
|
||||
|
||||
page.wait_for_navigation().await?;
|
||||
|
||||
// Note that `format!` needs double {{ }} in order to escape them.
|
||||
let _ = page
|
||||
.evaluate_function(&format!(
|
||||
r#"
|
||||
async function() {{
|
||||
await window.benchWorker.init();
|
||||
// Do not `await` run() or else it will block the browser.
|
||||
window.benchWorker.run("{}", {}, {}, {}, {});
|
||||
}}
|
||||
"#,
|
||||
ws_ip, ws_port, wasm_to_server_port, wasm_to_verifier_port, wasm_to_native_port
|
||||
))
|
||||
.await?;
|
||||
|
||||
Ok(browser)
|
||||
}
|
||||
|
||||
pub fn spawn_http_server(ip: IpAddr, port: u16) -> anyhow::Result<JoinHandle<()>> {
|
||||
let handle = tokio::spawn(async move {
|
||||
// Serve embedded files with additional headers.
|
||||
let data_serve = warp_embed::embed(&Data);
|
||||
|
||||
let data_serve_with_headers = data_serve
|
||||
.map(|reply| {
|
||||
warp::reply::with_header(reply, "Cross-Origin-Opener-Policy", "same-origin")
|
||||
})
|
||||
.map(|reply| {
|
||||
warp::reply::with_header(reply, "Cross-Origin-Embedder-Policy", "require-corp")
|
||||
});
|
||||
|
||||
warp::serve(data_serve_with_headers).run((ip, port)).await;
|
||||
});
|
||||
|
||||
Ok(handle)
|
||||
}
|
||||
|
||||
async fn register_listeners(page: &Page) -> Result<impl Future<Output = ()>> {
|
||||
let mut logs = page.event_listener::<EventEntryAdded>().await?.fuse();
|
||||
let mut exceptions = page.event_listener::<EventExceptionThrown>().await?.fuse();
|
||||
|
||||
Ok(futures::future::join(
|
||||
async move {
|
||||
while let Some(event) = logs.next().await {
|
||||
let entry = &event.entry;
|
||||
match entry.level {
|
||||
LogEntryLevel::Error => {
|
||||
error!("{:?}", entry);
|
||||
}
|
||||
_ => {
|
||||
debug!("{:?}: {}", entry.timestamp, entry.text);
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
async move {
|
||||
while let Some(event) = exceptions.next().await {
|
||||
error!("{:?}", event);
|
||||
}
|
||||
},
|
||||
)
|
||||
.map(|_| ()))
|
||||
}
|
||||
@@ -1,16 +0,0 @@
|
||||
[build]
|
||||
target = "wasm32-unknown-unknown"
|
||||
|
||||
[unstable]
|
||||
build-std = ["panic_abort", "std"]
|
||||
|
||||
[target.wasm32-unknown-unknown]
|
||||
rustflags = [
|
||||
"-C",
|
||||
"target-feature=+atomics,+bulk-memory,+mutable-globals",
|
||||
"-C",
|
||||
# 4GB
|
||||
"link-arg=--max-memory=4294967296",
|
||||
"--cfg",
|
||||
'getrandom_backend="wasm_js"',
|
||||
]
|
||||
@@ -1,35 +0,0 @@
|
||||
[package]
|
||||
edition = "2021"
|
||||
name = "tlsn-benches-browser-wasm"
|
||||
publish = false
|
||||
version = "0.0.0"
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
[lib]
|
||||
crate-type = ["cdylib", "rlib"]
|
||||
|
||||
[dependencies]
|
||||
tlsn-benches-browser-core = { workspace = true }
|
||||
tlsn-benches-library = { workspace = true }
|
||||
tlsn-wasm = { path = "../../../wasm" }
|
||||
|
||||
serio = { workspace = true }
|
||||
|
||||
anyhow = { workspace = true }
|
||||
rayon = { workspace = true }
|
||||
tracing = { workspace = true }
|
||||
wasm-bindgen = { version = "0.2" }
|
||||
wasm-bindgen-futures = { version = "0.4" }
|
||||
web-spawn = { workspace = true, features = ["no-bundler"] }
|
||||
web-time = { workspace = true }
|
||||
# Use the patched ws_stream_wasm to fix the issue https://github.com/najamelan/ws_stream_wasm/issues/12#issuecomment-1711902958
|
||||
ws_stream_wasm = { version = "0.7.4", git = "https://github.com/tlsnotary/ws_stream_wasm", rev = "2ed12aad9f0236e5321f577672f309920b2aef51", features = [
|
||||
"tokio_io",
|
||||
] }
|
||||
|
||||
[package.metadata.wasm-pack.profile.release]
|
||||
# Note: these wasm-pack options should match those in crates/wasm/Cargo.toml
|
||||
opt-level = "z"
|
||||
wasm-opt = true
|
||||
@@ -1,7 +0,0 @@
|
||||
<!DOCTYPE html>
|
||||
<head>
|
||||
</head>
|
||||
<body>
|
||||
<script src="index.js" type="module"></script>
|
||||
</body>
|
||||
</html>
|
||||
@@ -1,5 +0,0 @@
|
||||
import * as Comlink from "./comlink.mjs";
|
||||
|
||||
const benchWorker = Comlink.wrap(new Worker("worker.js", { type: "module" }));
|
||||
|
||||
window.benchWorker = benchWorker;
|
||||
@@ -1,41 +0,0 @@
|
||||
import * as Comlink from "./comlink.mjs";
|
||||
|
||||
import init_wasm, * as wasm from './tlsn_benches_browser_wasm.js';
|
||||
|
||||
class BenchWorker {
|
||||
async init() {
|
||||
try {
|
||||
await init_wasm();
|
||||
// Using Error level since excessive logging may interfere with the
|
||||
// benchmark results.
|
||||
await wasm.initialize_bench({ level: "Error" }, navigator.hardwareConcurrency);
|
||||
} catch (e) {
|
||||
console.error(e);
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
|
||||
async run(
|
||||
ws_ip,
|
||||
ws_port,
|
||||
wasm_to_server_port,
|
||||
wasm_to_verifier_port,
|
||||
wasm_to_native_port
|
||||
) {
|
||||
try {
|
||||
await wasm.wasm_main(
|
||||
ws_ip,
|
||||
ws_port,
|
||||
wasm_to_server_port,
|
||||
wasm_to_verifier_port,
|
||||
wasm_to_native_port);
|
||||
} catch (e) {
|
||||
console.error(e);
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const worker = new BenchWorker();
|
||||
|
||||
Comlink.expose(worker);
|
||||
@@ -1,4 +0,0 @@
|
||||
[toolchain]
|
||||
channel = "nightly"
|
||||
components = ["rust-src"]
|
||||
targets = ["wasm32-unknown-unknown"]
|
||||
@@ -1,115 +0,0 @@
|
||||
#![cfg(target_arch = "wasm32")]
|
||||
|
||||
//! Contains the wasm component of the browser prover.
|
||||
//!
|
||||
//! Conceptually the browser prover consists of the native and the wasm
|
||||
//! components.
|
||||
|
||||
use anyhow::Result;
|
||||
use serio::{stream::IoStreamExt, SinkExt as _};
|
||||
use tracing::info;
|
||||
use wasm_bindgen::prelude::*;
|
||||
use web_time::Instant;
|
||||
use ws_stream_wasm::WsMeta;
|
||||
|
||||
use tlsn_benches_browser_core::{
|
||||
msg::{Config, Runtime},
|
||||
FramedIo,
|
||||
};
|
||||
use tlsn_benches_library::run_prover;
|
||||
use tlsn_wasm::LoggingConfig;
|
||||
|
||||
#[wasm_bindgen]
|
||||
pub async fn wasm_main(
|
||||
ws_ip: String,
|
||||
ws_port: u16,
|
||||
wasm_to_server_port: u16,
|
||||
wasm_to_verifier_port: u16,
|
||||
wasm_to_native_port: u16,
|
||||
) -> Result<(), JsError> {
|
||||
// Wrapping main() since wasm_bindgen doesn't support anyhow.
|
||||
main(
|
||||
ws_ip,
|
||||
ws_port,
|
||||
wasm_to_server_port,
|
||||
wasm_to_verifier_port,
|
||||
wasm_to_native_port,
|
||||
)
|
||||
.await
|
||||
.map_err(|err| JsError::new(&err.to_string()))
|
||||
}
|
||||
|
||||
pub async fn main(
|
||||
ws_ip: String,
|
||||
ws_port: u16,
|
||||
wasm_to_server_port: u16,
|
||||
wasm_to_verifier_port: u16,
|
||||
wasm_to_native_port: u16,
|
||||
) -> Result<()> {
|
||||
info!("starting main");
|
||||
|
||||
// Connect to the server.
|
||||
let (_, server_io_ws) = WsMeta::connect(
|
||||
&format!(
|
||||
"ws://{}:{}/tcp?addr=localhost%3A{}",
|
||||
ws_ip, ws_port, wasm_to_server_port
|
||||
),
|
||||
None,
|
||||
)
|
||||
.await?;
|
||||
let server_io = server_io_ws.into_io();
|
||||
|
||||
// Connect to the verifier.
|
||||
let (_, verifier_io_ws) = WsMeta::connect(
|
||||
&format!(
|
||||
"ws://{}:{}/tcp?addr=localhost%3A{}",
|
||||
ws_ip, ws_port, wasm_to_verifier_port
|
||||
),
|
||||
None,
|
||||
)
|
||||
.await?;
|
||||
let verifier_io = verifier_io_ws.into_io();
|
||||
|
||||
// Connect to the native component of the browser prover.
|
||||
let (_, native_io_ws) = WsMeta::connect(
|
||||
&format!(
|
||||
"ws://{}:{}/tcp?addr=localhost%3A{}",
|
||||
ws_ip, ws_port, wasm_to_native_port
|
||||
),
|
||||
None,
|
||||
)
|
||||
.await?;
|
||||
let mut native_io = FramedIo::new(Box::new(native_io_ws.into_io()));
|
||||
|
||||
info!("expecting config from the native component");
|
||||
|
||||
let cfg: Config = native_io.expect_next().await?;
|
||||
|
||||
let start_time = Instant::now();
|
||||
|
||||
info!("running the prover");
|
||||
|
||||
run_prover(
|
||||
cfg.upload_size,
|
||||
cfg.download_size,
|
||||
cfg.defer_decryption,
|
||||
Box::new(verifier_io),
|
||||
Box::new(server_io),
|
||||
)
|
||||
.await?;
|
||||
|
||||
native_io
|
||||
.send(Runtime(start_time.elapsed().as_secs()))
|
||||
.await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Initializes the module.
|
||||
#[wasm_bindgen]
|
||||
pub async fn initialize_bench(
|
||||
logging_config: Option<LoggingConfig>,
|
||||
thread_count: usize,
|
||||
) -> Result<(), JsValue> {
|
||||
tlsn_wasm::initialize(logging_config, thread_count).await
|
||||
}
|
||||
@@ -1,22 +0,0 @@
|
||||
[package]
|
||||
edition = "2021"
|
||||
name = "tlsn-benches-library"
|
||||
publish = false
|
||||
version = "0.0.0"
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
[dependencies]
|
||||
tlsn-common = { workspace = true }
|
||||
tlsn-core = { workspace = true }
|
||||
tlsn-prover = { workspace = true }
|
||||
tlsn-server-fixture-certs = { workspace = true }
|
||||
tlsn-tls-core = { workspace = true }
|
||||
|
||||
anyhow = "1.0"
|
||||
async-trait = "0.1.81"
|
||||
futures = { version = "0.3", features = ["compat"] }
|
||||
serde = { workspace = true }
|
||||
tokio = {version = "1", default-features = false, features = ["rt", "macros"]}
|
||||
tokio-util= {version = "0.7", features = ["compat", "io"]}
|
||||
@@ -1,137 +0,0 @@
|
||||
use tls_core::{anchors::RootCertStore, verify::WebPkiVerifier};
|
||||
use tlsn_common::config::ProtocolConfig;
|
||||
use tlsn_core::{CryptoProvider, ProveConfig};
|
||||
use tlsn_prover::{Prover, ProverConfig};
|
||||
use tlsn_server_fixture_certs::{CA_CERT_DER, SERVER_DOMAIN};
|
||||
|
||||
use anyhow::Context;
|
||||
use async_trait::async_trait;
|
||||
use futures::{future::try_join, AsyncReadExt as _, AsyncWriteExt as _, TryFutureExt};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use tokio::io::{AsyncRead, AsyncWrite};
|
||||
use tokio_util::compat::TokioAsyncReadCompatExt;
|
||||
|
||||
pub trait AsyncIo: AsyncRead + AsyncWrite + Send + Sync + Unpin + 'static {}
|
||||
impl<T> AsyncIo for T where T: AsyncRead + AsyncWrite + Send + Sync + Unpin + 'static {}
|
||||
|
||||
#[async_trait]
|
||||
pub trait ProverTrait {
|
||||
/// Sets up the prover preparing it to be run. Returns a prover ready to be
|
||||
/// run.
|
||||
async fn setup(
|
||||
upload_size: usize,
|
||||
download_size: usize,
|
||||
defer_decryption: bool,
|
||||
verifier_io: Box<dyn AsyncIo>,
|
||||
server_io: Box<dyn AsyncIo>,
|
||||
) -> anyhow::Result<Self>
|
||||
where
|
||||
Self: Sized;
|
||||
|
||||
/// Runs the prover. Returns the total run time in seconds.
|
||||
async fn run(&mut self) -> anyhow::Result<u64>;
|
||||
|
||||
/// Returns the kind of the prover.
|
||||
fn kind(&self) -> ProverKind;
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
/// The kind of a prover.
|
||||
pub enum ProverKind {
|
||||
/// The prover compiled into a native binary.
|
||||
Native,
|
||||
/// The prover compiled into a wasm binary.
|
||||
Browser,
|
||||
}
|
||||
|
||||
impl From<ProverKind> for String {
|
||||
fn from(value: ProverKind) -> Self {
|
||||
match value {
|
||||
ProverKind::Native => "Native".to_string(),
|
||||
ProverKind::Browser => "Browser".to_string(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn run_prover(
|
||||
upload_size: usize,
|
||||
download_size: usize,
|
||||
defer_decryption: bool,
|
||||
io: Box<dyn AsyncIo>,
|
||||
client_conn: Box<dyn AsyncIo>,
|
||||
) -> anyhow::Result<()> {
|
||||
let provider = CryptoProvider {
|
||||
cert: WebPkiVerifier::new(root_store(), None),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let mut protocol_config = ProtocolConfig::builder();
|
||||
if defer_decryption {
|
||||
protocol_config
|
||||
.max_sent_data(upload_size + 256)
|
||||
.max_recv_data(download_size + 256)
|
||||
} else {
|
||||
protocol_config
|
||||
.max_sent_data(upload_size + 256)
|
||||
.max_recv_data(download_size + 256)
|
||||
.max_recv_data_online(download_size + 256)
|
||||
};
|
||||
let protocol_config = protocol_config
|
||||
.defer_decryption_from_start(defer_decryption)
|
||||
.build()
|
||||
.unwrap();
|
||||
|
||||
let prover = Prover::new(
|
||||
ProverConfig::builder()
|
||||
.server_name(SERVER_DOMAIN)
|
||||
.protocol_config(protocol_config)
|
||||
.crypto_provider(provider)
|
||||
.build()
|
||||
.context("invalid prover config")?,
|
||||
)
|
||||
.setup(io.compat())
|
||||
.await?;
|
||||
|
||||
let (mut mpc_tls_connection, prover_fut) = prover.connect(client_conn.compat()).await?;
|
||||
let tls_fut = async move {
|
||||
let request = format!(
|
||||
"GET /bytes?size={} HTTP/1.1\r\nConnection: close\r\nData: {}\r\n\r\n",
|
||||
download_size,
|
||||
String::from_utf8(vec![0x42u8; upload_size]).unwrap(),
|
||||
);
|
||||
|
||||
mpc_tls_connection.write_all(request.as_bytes()).await?;
|
||||
mpc_tls_connection.close().await?;
|
||||
|
||||
let mut response = vec![];
|
||||
mpc_tls_connection.read_to_end(&mut response).await?;
|
||||
|
||||
dbg!(response.len());
|
||||
|
||||
Ok::<(), anyhow::Error>(())
|
||||
};
|
||||
|
||||
let (mut prover, _) = try_join(prover_fut.map_err(anyhow::Error::from), tls_fut).await?;
|
||||
|
||||
let (sent_len, recv_len) = prover.transcript().len();
|
||||
|
||||
let mut builder = ProveConfig::builder(prover.transcript());
|
||||
|
||||
builder.reveal_sent(&(0..sent_len)).unwrap();
|
||||
builder.reveal_recv(&(0..recv_len)).unwrap();
|
||||
|
||||
let config = builder.build().unwrap();
|
||||
|
||||
prover.prove(&config).await?;
|
||||
prover.close().await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn root_store() -> RootCertStore {
|
||||
let mut root_store = RootCertStore::empty();
|
||||
root_store
|
||||
.add(&tls_core::key::Certificate(CA_CERT_DER.to_vec()))
|
||||
.unwrap();
|
||||
root_store
|
||||
}
|
||||
@@ -1,43 +0,0 @@
|
||||
[package]
|
||||
name = "tlsn-common"
|
||||
description = "Common code shared between tlsn-prover and tlsn-verifier"
|
||||
version = "0.1.0-alpha.11"
|
||||
edition = "2021"
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
[features]
|
||||
default = []
|
||||
|
||||
[dependencies]
|
||||
tlsn-core = { workspace = true }
|
||||
tlsn-tls-core = { workspace = true }
|
||||
tlsn-cipher = { workspace = true }
|
||||
mpz-core = { workspace = true }
|
||||
mpz-common = { workspace = true }
|
||||
mpz-memory-core = { workspace = true }
|
||||
mpz-hash = { workspace = true }
|
||||
mpz-vm-core = { workspace = true }
|
||||
mpz-zk = { workspace = true }
|
||||
|
||||
async-trait = { workspace = true }
|
||||
derive_builder = { workspace = true }
|
||||
futures = { workspace = true }
|
||||
once_cell = { workspace = true }
|
||||
opaque-debug = { workspace = true }
|
||||
rand = { workspace = true }
|
||||
rangeset = { workspace = true }
|
||||
serio = { workspace = true, features = ["codec", "bincode"] }
|
||||
thiserror = { workspace = true }
|
||||
tracing = { workspace = true }
|
||||
uid-mux = { workspace = true, features = ["serio"] }
|
||||
serde = { workspace = true, features = ["derive"] }
|
||||
semver = { version = "1.0", features = ["serde"] }
|
||||
|
||||
[target.'cfg(target_arch = "wasm32")'.dependencies]
|
||||
wasm-bindgen = { version = "0.2" }
|
||||
web-spawn = { workspace = true }
|
||||
|
||||
[dev-dependencies]
|
||||
rstest = { workspace = true }
|
||||
@@ -1,110 +0,0 @@
|
||||
//! Plaintext commitment and proof of encryption.
|
||||
|
||||
pub mod hash;
|
||||
|
||||
use mpz_core::bitvec::BitVec;
|
||||
use mpz_memory_core::{binary::Binary, DecodeFutureTyped};
|
||||
use mpz_vm_core::{prelude::*, Vm};
|
||||
|
||||
use crate::{
|
||||
transcript::Record,
|
||||
zk_aes::{ZkAesCtr, ZkAesCtrError},
|
||||
Role,
|
||||
};
|
||||
|
||||
/// Commits the plaintext of the provided records, returning a proof of
|
||||
/// encryption.
|
||||
///
|
||||
/// Writes the plaintext VM reference to the provided records.
|
||||
pub fn commit_records<'record>(
|
||||
vm: &mut dyn Vm<Binary>,
|
||||
aes: &mut ZkAesCtr,
|
||||
records: impl IntoIterator<Item = &'record mut Record>,
|
||||
) -> Result<RecordProof, RecordProofError> {
|
||||
let mut ciphertexts = Vec::new();
|
||||
for record in records {
|
||||
if record.plaintext_ref.is_some() {
|
||||
return Err(ErrorRepr::PlaintextRefAlreadySet.into());
|
||||
}
|
||||
|
||||
let (plaintext_ref, ciphertext_ref) = aes
|
||||
.encrypt(vm, record.explicit_nonce.clone(), record.ciphertext.len())
|
||||
.map_err(ErrorRepr::Aes)?;
|
||||
|
||||
record.plaintext_ref = Some(plaintext_ref);
|
||||
|
||||
if let Role::Prover = aes.role() {
|
||||
let Some(plaintext) = record.plaintext.clone() else {
|
||||
return Err(ErrorRepr::MissingPlaintext.into());
|
||||
};
|
||||
|
||||
vm.assign(plaintext_ref, plaintext)
|
||||
.map_err(RecordProofError::vm)?;
|
||||
}
|
||||
vm.commit(plaintext_ref).map_err(RecordProofError::vm)?;
|
||||
|
||||
let ciphertext = vm.decode(ciphertext_ref).map_err(RecordProofError::vm)?;
|
||||
ciphertexts.push((ciphertext, record.ciphertext.clone()));
|
||||
}
|
||||
|
||||
Ok(RecordProof { ciphertexts })
|
||||
}
|
||||
|
||||
/// Proof of encryption.
|
||||
#[derive(Debug)]
|
||||
#[must_use]
|
||||
#[allow(clippy::type_complexity)]
|
||||
pub struct RecordProof {
|
||||
ciphertexts: Vec<(DecodeFutureTyped<BitVec, Vec<u8>>, Vec<u8>)>,
|
||||
}
|
||||
|
||||
impl RecordProof {
|
||||
/// Verifies the proof.
|
||||
pub fn verify(self) -> Result<(), RecordProofError> {
|
||||
let Self { ciphertexts } = self;
|
||||
|
||||
for (mut ciphertext, expected) in ciphertexts {
|
||||
let ciphertext = ciphertext
|
||||
.try_recv()
|
||||
.map_err(RecordProofError::vm)?
|
||||
.ok_or_else(|| ErrorRepr::NotDecoded)?;
|
||||
|
||||
if ciphertext != expected {
|
||||
return Err(ErrorRepr::InvalidCiphertext.into());
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Error for [`RecordProof`].
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
#[error(transparent)]
|
||||
pub struct RecordProofError(#[from] ErrorRepr);
|
||||
|
||||
impl RecordProofError {
|
||||
fn vm<E>(err: E) -> Self
|
||||
where
|
||||
E: Into<Box<dyn std::error::Error + Send + Sync + 'static>>,
|
||||
{
|
||||
Self(ErrorRepr::Vm(err.into()))
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
#[error("record proof error: {0}")]
|
||||
enum ErrorRepr {
|
||||
#[error("VM error: {0}")]
|
||||
Vm(Box<dyn std::error::Error + Send + Sync + 'static>),
|
||||
#[error("zk aes error: {0}")]
|
||||
Aes(ZkAesCtrError),
|
||||
#[error("plaintext is missing")]
|
||||
MissingPlaintext,
|
||||
#[error("plaintext reference is already set")]
|
||||
PlaintextRefAlreadySet,
|
||||
#[error("ciphertext was not decoded")]
|
||||
NotDecoded,
|
||||
#[error("ciphertext does not match expected")]
|
||||
InvalidCiphertext,
|
||||
}
|
||||
@@ -1,238 +0,0 @@
|
||||
//! Encoding commitment protocol.
|
||||
|
||||
use std::ops::Range;
|
||||
|
||||
use mpz_common::Context;
|
||||
use mpz_memory_core::{
|
||||
binary::U8,
|
||||
correlated::{Delta, Key, Mac},
|
||||
Vector,
|
||||
};
|
||||
use rand::Rng;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serio::{stream::IoStreamExt, SinkExt};
|
||||
use tlsn_core::{
|
||||
hash::HashAlgorithm,
|
||||
transcript::{
|
||||
encoding::{
|
||||
new_encoder, Encoder, EncoderSecret, EncodingCommitment, EncodingProvider,
|
||||
EncodingProviderError, EncodingTree, EncodingTreeError,
|
||||
},
|
||||
Direction, Idx,
|
||||
},
|
||||
};
|
||||
|
||||
use crate::transcript::TranscriptRefs;
|
||||
|
||||
/// Bytes of encoding, per byte.
|
||||
const ENCODING_SIZE: usize = 128;
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
struct Encodings {
|
||||
sent: Vec<u8>,
|
||||
recv: Vec<u8>,
|
||||
}
|
||||
|
||||
/// Transfers the encodings using the provided seed and keys.
|
||||
///
|
||||
/// The keys must be consistent with the global delta used in the encodings.
|
||||
pub async fn transfer<'a>(
|
||||
ctx: &mut Context,
|
||||
refs: &TranscriptRefs,
|
||||
delta: &Delta,
|
||||
f: impl Fn(Vector<U8>) -> &'a [Key],
|
||||
) -> Result<EncodingCommitment, EncodingError> {
|
||||
let secret = EncoderSecret::new(rand::rng().random(), delta.as_block().to_bytes());
|
||||
let encoder = new_encoder(&secret);
|
||||
|
||||
let sent_keys: Vec<u8> = refs
|
||||
.sent()
|
||||
.iter()
|
||||
.copied()
|
||||
.flat_map(&f)
|
||||
.flat_map(|key| key.as_block().as_bytes())
|
||||
.copied()
|
||||
.collect();
|
||||
let recv_keys: Vec<u8> = refs
|
||||
.recv()
|
||||
.iter()
|
||||
.copied()
|
||||
.flat_map(&f)
|
||||
.flat_map(|key| key.as_block().as_bytes())
|
||||
.copied()
|
||||
.collect();
|
||||
|
||||
assert_eq!(sent_keys.len() % ENCODING_SIZE, 0);
|
||||
assert_eq!(recv_keys.len() % ENCODING_SIZE, 0);
|
||||
|
||||
let mut sent_encoding = Vec::with_capacity(sent_keys.len());
|
||||
let mut recv_encoding = Vec::with_capacity(recv_keys.len());
|
||||
|
||||
encoder.encode_range(
|
||||
Direction::Sent,
|
||||
0..sent_keys.len() / ENCODING_SIZE,
|
||||
&mut sent_encoding,
|
||||
);
|
||||
encoder.encode_range(
|
||||
Direction::Received,
|
||||
0..recv_keys.len() / ENCODING_SIZE,
|
||||
&mut recv_encoding,
|
||||
);
|
||||
|
||||
sent_encoding
|
||||
.iter_mut()
|
||||
.zip(sent_keys)
|
||||
.for_each(|(enc, key)| *enc ^= key);
|
||||
recv_encoding
|
||||
.iter_mut()
|
||||
.zip(recv_keys)
|
||||
.for_each(|(enc, key)| *enc ^= key);
|
||||
|
||||
ctx.io_mut()
|
||||
.send(Encodings {
|
||||
sent: sent_encoding,
|
||||
recv: recv_encoding,
|
||||
})
|
||||
.await?;
|
||||
|
||||
let root = ctx.io_mut().expect_next().await?;
|
||||
ctx.io_mut().send(secret.clone()).await?;
|
||||
|
||||
Ok(EncodingCommitment {
|
||||
root,
|
||||
secret: secret.clone(),
|
||||
})
|
||||
}
|
||||
|
||||
/// Receives the encodings using the provided MACs.
|
||||
///
|
||||
/// The MACs must be consistent with the global delta used in the encodings.
|
||||
pub async fn receive<'a>(
|
||||
ctx: &mut Context,
|
||||
hasher: &(dyn HashAlgorithm + Send + Sync),
|
||||
refs: &TranscriptRefs,
|
||||
f: impl Fn(Vector<U8>) -> &'a [Mac],
|
||||
idxs: impl IntoIterator<Item = &(Direction, Idx)>,
|
||||
) -> Result<(EncodingCommitment, EncodingTree), EncodingError> {
|
||||
let Encodings { mut sent, mut recv } = ctx.io_mut().expect_next().await?;
|
||||
|
||||
let sent_macs: Vec<u8> = refs
|
||||
.sent()
|
||||
.iter()
|
||||
.copied()
|
||||
.flat_map(&f)
|
||||
.flat_map(|mac| mac.as_bytes())
|
||||
.copied()
|
||||
.collect();
|
||||
let recv_macs: Vec<u8> = refs
|
||||
.recv()
|
||||
.iter()
|
||||
.copied()
|
||||
.flat_map(&f)
|
||||
.flat_map(|mac| mac.as_bytes())
|
||||
.copied()
|
||||
.collect();
|
||||
|
||||
assert_eq!(sent_macs.len() % ENCODING_SIZE, 0);
|
||||
assert_eq!(recv_macs.len() % ENCODING_SIZE, 0);
|
||||
|
||||
if sent.len() != sent_macs.len() {
|
||||
return Err(ErrorRepr::IncorrectMacCount {
|
||||
direction: Direction::Sent,
|
||||
expected: sent_macs.len(),
|
||||
got: sent.len(),
|
||||
}
|
||||
.into());
|
||||
}
|
||||
|
||||
if recv.len() != recv_macs.len() {
|
||||
return Err(ErrorRepr::IncorrectMacCount {
|
||||
direction: Direction::Received,
|
||||
expected: recv_macs.len(),
|
||||
got: recv.len(),
|
||||
}
|
||||
.into());
|
||||
}
|
||||
|
||||
sent.iter_mut()
|
||||
.zip(sent_macs)
|
||||
.for_each(|(enc, mac)| *enc ^= mac);
|
||||
recv.iter_mut()
|
||||
.zip(recv_macs)
|
||||
.for_each(|(enc, mac)| *enc ^= mac);
|
||||
|
||||
let provider = Provider { sent, recv };
|
||||
|
||||
let tree = EncodingTree::new(hasher, idxs, &provider)?;
|
||||
let root = tree.root();
|
||||
|
||||
ctx.io_mut().send(root.clone()).await?;
|
||||
let secret = ctx.io_mut().expect_next().await?;
|
||||
|
||||
let commitment = EncodingCommitment { root, secret };
|
||||
|
||||
Ok((commitment, tree))
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
struct Provider {
|
||||
sent: Vec<u8>,
|
||||
recv: Vec<u8>,
|
||||
}
|
||||
|
||||
impl EncodingProvider for Provider {
|
||||
fn provide_encoding(
|
||||
&self,
|
||||
direction: Direction,
|
||||
range: Range<usize>,
|
||||
dest: &mut Vec<u8>,
|
||||
) -> Result<(), EncodingProviderError> {
|
||||
let encodings = match direction {
|
||||
Direction::Sent => &self.sent,
|
||||
Direction::Received => &self.recv,
|
||||
};
|
||||
|
||||
let start = range.start * ENCODING_SIZE;
|
||||
let end = range.end * ENCODING_SIZE;
|
||||
|
||||
if end > encodings.len() {
|
||||
return Err(EncodingProviderError);
|
||||
}
|
||||
|
||||
dest.extend_from_slice(&encodings[start..end]);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Encoding protocol error.
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
#[error(transparent)]
|
||||
pub struct EncodingError(#[from] ErrorRepr);
|
||||
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
#[error("encoding protocol error: {0}")]
|
||||
enum ErrorRepr {
|
||||
#[error("I/O error: {0}")]
|
||||
Io(std::io::Error),
|
||||
#[error("incorrect MAC count for {direction}: expected {expected}, got {got}")]
|
||||
IncorrectMacCount {
|
||||
direction: Direction,
|
||||
expected: usize,
|
||||
got: usize,
|
||||
},
|
||||
#[error("encoding tree error: {0}")]
|
||||
EncodingTree(EncodingTreeError),
|
||||
}
|
||||
|
||||
impl From<std::io::Error> for EncodingError {
|
||||
fn from(value: std::io::Error) -> Self {
|
||||
Self(ErrorRepr::Io(value))
|
||||
}
|
||||
}
|
||||
|
||||
impl From<EncodingTreeError> for EncodingError {
|
||||
fn from(value: EncodingTreeError) -> Self {
|
||||
Self(ErrorRepr::EncodingTree(value))
|
||||
}
|
||||
}
|
||||
@@ -1,14 +0,0 @@
|
||||
//! Message types.
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use tlsn_core::connection::{ServerCertData, ServerName};
|
||||
|
||||
/// Message sent from Prover to Verifier to prove the server identity.
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct ServerIdentityProof {
|
||||
/// Server name.
|
||||
pub name: ServerName,
|
||||
/// Server identity data.
|
||||
pub data: ServerCertData,
|
||||
}
|
||||
@@ -1,304 +0,0 @@
|
||||
//! TLS transcript.
|
||||
|
||||
use mpz_memory_core::{
|
||||
binary::{Binary, U8},
|
||||
MemoryExt, Vector,
|
||||
};
|
||||
use mpz_vm_core::{Vm, VmError};
|
||||
use rangeset::Intersection;
|
||||
use tls_core::msgs::enums::ContentType;
|
||||
use tlsn_core::transcript::{Direction, Idx, PartialTranscript, Transcript};
|
||||
|
||||
/// A transcript of sent and received TLS records.
|
||||
#[derive(Debug, Default, Clone)]
|
||||
pub struct TlsTranscript {
|
||||
/// Records sent by the prover.
|
||||
pub sent: Vec<Record>,
|
||||
/// Records received by the prover.
|
||||
pub recv: Vec<Record>,
|
||||
}
|
||||
|
||||
impl TlsTranscript {
|
||||
/// Returns the application data transcript.
|
||||
pub fn to_transcript(&self) -> Result<Transcript, IncompleteTranscript> {
|
||||
let mut sent = Vec::new();
|
||||
let mut recv = Vec::new();
|
||||
|
||||
for record in self
|
||||
.sent
|
||||
.iter()
|
||||
.filter(|record| record.typ == ContentType::ApplicationData)
|
||||
{
|
||||
let plaintext = record
|
||||
.plaintext
|
||||
.as_ref()
|
||||
.ok_or(IncompleteTranscript {})?
|
||||
.clone();
|
||||
sent.extend_from_slice(&plaintext);
|
||||
}
|
||||
|
||||
for record in self
|
||||
.recv
|
||||
.iter()
|
||||
.filter(|record| record.typ == ContentType::ApplicationData)
|
||||
{
|
||||
let plaintext = record
|
||||
.plaintext
|
||||
.as_ref()
|
||||
.ok_or(IncompleteTranscript {})?
|
||||
.clone();
|
||||
recv.extend_from_slice(&plaintext);
|
||||
}
|
||||
|
||||
Ok(Transcript::new(sent, recv))
|
||||
}
|
||||
|
||||
/// Returns the application data transcript references.
|
||||
pub fn to_transcript_refs(&self) -> Result<TranscriptRefs, IncompleteTranscript> {
|
||||
let mut sent = Vec::new();
|
||||
let mut recv = Vec::new();
|
||||
|
||||
for record in self
|
||||
.sent
|
||||
.iter()
|
||||
.filter(|record| record.typ == ContentType::ApplicationData)
|
||||
{
|
||||
let plaintext_ref = record
|
||||
.plaintext_ref
|
||||
.as_ref()
|
||||
.ok_or(IncompleteTranscript {})?;
|
||||
sent.push(*plaintext_ref);
|
||||
}
|
||||
|
||||
for record in self
|
||||
.recv
|
||||
.iter()
|
||||
.filter(|record| record.typ == ContentType::ApplicationData)
|
||||
{
|
||||
let plaintext_ref = record
|
||||
.plaintext_ref
|
||||
.as_ref()
|
||||
.ok_or(IncompleteTranscript {})?;
|
||||
recv.push(*plaintext_ref);
|
||||
}
|
||||
|
||||
Ok(TranscriptRefs { sent, recv })
|
||||
}
|
||||
}
|
||||
|
||||
/// A TLS record.
|
||||
#[derive(Clone)]
|
||||
pub struct Record {
|
||||
/// Sequence number.
|
||||
pub seq: u64,
|
||||
/// Content type.
|
||||
pub typ: ContentType,
|
||||
/// Plaintext.
|
||||
pub plaintext: Option<Vec<u8>>,
|
||||
/// VM reference to the plaintext.
|
||||
pub plaintext_ref: Option<Vector<U8>>,
|
||||
/// Explicit nonce.
|
||||
pub explicit_nonce: Vec<u8>,
|
||||
/// Ciphertext.
|
||||
pub ciphertext: Vec<u8>,
|
||||
}
|
||||
|
||||
opaque_debug::implement!(Record);
|
||||
|
||||
/// References to the application plaintext in the transcript.
|
||||
#[derive(Debug, Default, Clone)]
|
||||
pub struct TranscriptRefs {
|
||||
sent: Vec<Vector<U8>>,
|
||||
recv: Vec<Vector<U8>>,
|
||||
}
|
||||
|
||||
impl TranscriptRefs {
|
||||
/// Returns the sent plaintext references.
|
||||
pub fn sent(&self) -> &[Vector<U8>] {
|
||||
&self.sent
|
||||
}
|
||||
|
||||
/// Returns the received plaintext references.
|
||||
pub fn recv(&self) -> &[Vector<U8>] {
|
||||
&self.recv
|
||||
}
|
||||
|
||||
/// Returns VM references for the given direction and index, otherwise
|
||||
/// `None` if the index is out of bounds.
|
||||
pub fn get(&self, direction: Direction, idx: &Idx) -> Option<Vec<Vector<U8>>> {
|
||||
if idx.is_empty() {
|
||||
return Some(Vec::new());
|
||||
}
|
||||
|
||||
let refs = match direction {
|
||||
Direction::Sent => &self.sent,
|
||||
Direction::Received => &self.recv,
|
||||
};
|
||||
|
||||
// Computes the transcript range for each reference.
|
||||
let mut start = 0;
|
||||
let mut slice_iter = refs.iter().map(move |slice| {
|
||||
let out = (slice, start..start + slice.len());
|
||||
start += slice.len();
|
||||
out
|
||||
});
|
||||
|
||||
let mut slices = Vec::new();
|
||||
let (mut slice, mut slice_range) = slice_iter.next()?;
|
||||
for range in idx.iter_ranges() {
|
||||
loop {
|
||||
if let Some(intersection) = slice_range.intersection(&range) {
|
||||
let start = intersection.start - slice_range.start;
|
||||
let end = intersection.end - slice_range.start;
|
||||
slices.push(slice.get(start..end).expect("range should be in bounds"));
|
||||
}
|
||||
|
||||
// Proceed to next range if the current slice extends beyond. Otherwise, proceed
|
||||
// to the next slice.
|
||||
if range.end <= slice_range.end {
|
||||
break;
|
||||
} else {
|
||||
(slice, slice_range) = slice_iter.next()?;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Some(slices)
|
||||
}
|
||||
}
|
||||
|
||||
/// Error for [`TranscriptRefs::from_transcript`].
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
#[error("not all application plaintext was committed to in the TLS transcript")]
|
||||
pub struct IncompleteTranscript {}
|
||||
|
||||
/// Decodes the transcript.
|
||||
pub fn decode_transcript(
|
||||
vm: &mut dyn Vm<Binary>,
|
||||
sent: &Idx,
|
||||
recv: &Idx,
|
||||
refs: &TranscriptRefs,
|
||||
) -> Result<(), VmError> {
|
||||
let sent_refs = refs.get(Direction::Sent, sent).expect("index is in bounds");
|
||||
let recv_refs = refs
|
||||
.get(Direction::Received, recv)
|
||||
.expect("index is in bounds");
|
||||
|
||||
for slice in sent_refs.into_iter().chain(recv_refs) {
|
||||
// Drop the future, we don't need it.
|
||||
drop(vm.decode(slice)?);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Verifies a partial transcript.
|
||||
pub fn verify_transcript(
|
||||
vm: &mut dyn Vm<Binary>,
|
||||
transcript: &PartialTranscript,
|
||||
refs: &TranscriptRefs,
|
||||
) -> Result<(), InconsistentTranscript> {
|
||||
let sent_refs = refs
|
||||
.get(Direction::Sent, transcript.sent_authed())
|
||||
.expect("index is in bounds");
|
||||
let recv_refs = refs
|
||||
.get(Direction::Received, transcript.received_authed())
|
||||
.expect("index is in bounds");
|
||||
|
||||
let mut authenticated_data = Vec::new();
|
||||
for data in sent_refs.into_iter().chain(recv_refs) {
|
||||
let plaintext = vm
|
||||
.get(data)
|
||||
.expect("reference is valid")
|
||||
.expect("plaintext is decoded");
|
||||
authenticated_data.extend_from_slice(&plaintext);
|
||||
}
|
||||
|
||||
let mut purported_data = Vec::with_capacity(authenticated_data.len());
|
||||
for range in transcript.sent_authed().iter_ranges() {
|
||||
purported_data.extend_from_slice(&transcript.sent_unsafe()[range]);
|
||||
}
|
||||
|
||||
for range in transcript.received_authed().iter_ranges() {
|
||||
purported_data.extend_from_slice(&transcript.received_unsafe()[range]);
|
||||
}
|
||||
|
||||
if purported_data != authenticated_data {
|
||||
return Err(InconsistentTranscript {});
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Error for [`verify_transcript`].
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
#[error("inconsistent transcript")]
|
||||
pub struct InconsistentTranscript {}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::TranscriptRefs;
|
||||
use mpz_memory_core::{binary::U8, FromRaw, Slice, Vector};
|
||||
use rangeset::RangeSet;
|
||||
use std::ops::Range;
|
||||
use tlsn_core::transcript::{Direction, Idx};
|
||||
|
||||
// TRANSCRIPT_REFS:
|
||||
//
|
||||
// 48..96 -> 6 slots
|
||||
// 112..176 -> 8 slots
|
||||
// 240..288 -> 6 slots
|
||||
// 352..392 -> 5 slots
|
||||
// 440..480 -> 5 slots
|
||||
const TRANSCRIPT_REFS: &[Range<usize>] = &[48..96, 112..176, 240..288, 352..392, 440..480];
|
||||
|
||||
const IDXS: &[Range<usize>] = &[0..4, 5..10, 14..16, 16..28];
|
||||
|
||||
// 1. Take slots 0..4, 4 slots -> 48..80 (4)
|
||||
// 2. Take slots 5..10, 5 slots -> 88..96 (1) + 112..144 (4)
|
||||
// 3. Take slots 14..16, 2 slots -> 240..256 (2)
|
||||
// 4. Take slots 16..28, 12 slots -> 256..288 (4) + 352..392 (5) + 440..464 (3)
|
||||
//
|
||||
// 5. Merge slots 240..256 and 256..288 => 240..288 and get EXPECTED_REFS
|
||||
const EXPECTED_REFS: &[Range<usize>] =
|
||||
&[48..80, 88..96, 112..144, 240..288, 352..392, 440..464];
|
||||
|
||||
#[test]
|
||||
fn test_transcript_refs_get() {
|
||||
let transcript_refs: Vec<Vector<U8>> = TRANSCRIPT_REFS
|
||||
.iter()
|
||||
.cloned()
|
||||
.map(|range| Vector::from_raw(Slice::from_range_unchecked(range)))
|
||||
.collect();
|
||||
|
||||
let transcript_refs = TranscriptRefs {
|
||||
sent: transcript_refs.clone(),
|
||||
recv: transcript_refs,
|
||||
};
|
||||
|
||||
let vm_refs = transcript_refs
|
||||
.get(Direction::Sent, &idx_fixture())
|
||||
.unwrap();
|
||||
|
||||
let expected_refs: Vec<Vector<U8>> = EXPECTED_REFS
|
||||
.iter()
|
||||
.cloned()
|
||||
.map(|range| Vector::from_raw(Slice::from_range_unchecked(range)))
|
||||
.collect();
|
||||
|
||||
assert_eq!(
|
||||
vm_refs.len(),
|
||||
expected_refs.len(),
|
||||
"Length of actual and expected refs are not equal"
|
||||
);
|
||||
|
||||
for (&expected, actual) in expected_refs.iter().zip(vm_refs) {
|
||||
assert_eq!(expected, actual);
|
||||
}
|
||||
}
|
||||
|
||||
fn idx_fixture() -> Idx {
|
||||
let set = RangeSet::from(IDXS);
|
||||
Idx::builder().union(&set).build()
|
||||
}
|
||||
}
|
||||
@@ -1,210 +0,0 @@
|
||||
//! Zero-knowledge AES-CTR encryption.
|
||||
|
||||
use cipher::{
|
||||
aes::{Aes128, AesError},
|
||||
Cipher, CipherError, Keystream,
|
||||
};
|
||||
use mpz_memory_core::{
|
||||
binary::{Binary, U8},
|
||||
Array, Vector,
|
||||
};
|
||||
use mpz_vm_core::{prelude::*, Vm};
|
||||
|
||||
use crate::Role;
|
||||
|
||||
type Nonce = Array<U8, 8>;
|
||||
type Ctr = Array<U8, 4>;
|
||||
type Block = Array<U8, 16>;
|
||||
|
||||
const START_CTR: u32 = 2;
|
||||
|
||||
/// ZK AES-CTR encryption.
|
||||
#[derive(Debug)]
|
||||
pub struct ZkAesCtr {
|
||||
role: Role,
|
||||
aes: Aes128,
|
||||
state: State,
|
||||
}
|
||||
|
||||
impl ZkAesCtr {
|
||||
/// Creates a new ZK AES-CTR instance.
|
||||
pub fn new(role: Role) -> Self {
|
||||
Self {
|
||||
role,
|
||||
aes: Aes128::default(),
|
||||
state: State::Init,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the role.
|
||||
pub fn role(&self) -> &Role {
|
||||
&self.role
|
||||
}
|
||||
|
||||
/// Allocates `len` bytes for encryption.
|
||||
pub fn alloc(&mut self, vm: &mut dyn Vm<Binary>, len: usize) -> Result<(), ZkAesCtrError> {
|
||||
let State::Init = self.state.take() else {
|
||||
Err(ErrorRepr::State {
|
||||
reason: "must be in init state to allocate",
|
||||
})?
|
||||
};
|
||||
|
||||
// Round up to the nearest block size.
|
||||
let len = 16 * len.div_ceil(16);
|
||||
|
||||
let input = vm.alloc_vec::<U8>(len).map_err(ZkAesCtrError::vm)?;
|
||||
let keystream = self.aes.alloc_keystream(vm, len)?;
|
||||
|
||||
match self.role {
|
||||
Role::Prover => vm.mark_private(input).map_err(ZkAesCtrError::vm)?,
|
||||
Role::Verifier => vm.mark_blind(input).map_err(ZkAesCtrError::vm)?,
|
||||
}
|
||||
|
||||
self.state = State::Ready { input, keystream };
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Sets the key and IV for the cipher.
|
||||
pub fn set_key(&mut self, key: Array<U8, 16>, iv: Array<U8, 4>) {
|
||||
self.aes.set_key(key);
|
||||
self.aes.set_iv(iv);
|
||||
}
|
||||
|
||||
/// Proves the encryption of `len` bytes.
|
||||
///
|
||||
/// Here we only assign certain values in the VM but the actual proving
|
||||
/// happens later when the plaintext is assigned and the VM is executed.
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `vm` - Virtual machine.
|
||||
/// * `explicit_nonce` - Explicit nonce.
|
||||
/// * `len` - Length of the plaintext in bytes.
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// A VM reference to the plaintext and the ciphertext.
|
||||
pub fn encrypt(
|
||||
&mut self,
|
||||
vm: &mut dyn Vm<Binary>,
|
||||
explicit_nonce: Vec<u8>,
|
||||
len: usize,
|
||||
) -> Result<(Vector<U8>, Vector<U8>), ZkAesCtrError> {
|
||||
let State::Ready { input, keystream } = &mut self.state else {
|
||||
Err(ErrorRepr::State {
|
||||
reason: "must be in ready state to encrypt",
|
||||
})?
|
||||
};
|
||||
|
||||
let explicit_nonce: [u8; 8] =
|
||||
explicit_nonce
|
||||
.try_into()
|
||||
.map_err(|explicit_nonce: Vec<_>| ErrorRepr::ExplicitNonceLength {
|
||||
expected: 8,
|
||||
actual: explicit_nonce.len(),
|
||||
})?;
|
||||
|
||||
let block_count = len.div_ceil(16);
|
||||
let padded_len = block_count * 16;
|
||||
let padding_len = padded_len - len;
|
||||
|
||||
if padded_len > input.len() {
|
||||
Err(ErrorRepr::InsufficientPreprocessing {
|
||||
expected: padded_len,
|
||||
actual: input.len(),
|
||||
})?
|
||||
}
|
||||
|
||||
let mut input = input.split_off(input.len() - padded_len);
|
||||
let keystream = keystream.consume(padded_len)?;
|
||||
let mut output = keystream.apply(vm, input)?;
|
||||
|
||||
// Assign counter block inputs.
|
||||
let mut ctr = START_CTR..;
|
||||
keystream.assign(vm, explicit_nonce, move || {
|
||||
ctr.next().expect("range is unbounded").to_be_bytes()
|
||||
})?;
|
||||
|
||||
// Assign zeroes to the padding.
|
||||
if padding_len > 0 {
|
||||
let padding = input.split_off(input.len() - padding_len);
|
||||
// To simplify the impl, we don't mark the padding as public, that's why only
|
||||
// the prover assigns it.
|
||||
if let Role::Prover = self.role {
|
||||
vm.assign(padding, vec![0; padding_len])
|
||||
.map_err(ZkAesCtrError::vm)?;
|
||||
}
|
||||
vm.commit(padding).map_err(ZkAesCtrError::vm)?;
|
||||
output.truncate(len);
|
||||
}
|
||||
|
||||
Ok((input, output))
|
||||
}
|
||||
}
|
||||
|
||||
enum State {
|
||||
Init,
|
||||
Ready {
|
||||
input: Vector<U8>,
|
||||
keystream: Keystream<Nonce, Ctr, Block>,
|
||||
},
|
||||
Error,
|
||||
}
|
||||
|
||||
impl State {
|
||||
fn take(&mut self) -> Self {
|
||||
std::mem::replace(self, State::Error)
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Debug for State {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
State::Init => write!(f, "Init"),
|
||||
State::Ready { .. } => write!(f, "Ready"),
|
||||
State::Error => write!(f, "Error"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Error for [`ZkAesCtr`].
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
#[error(transparent)]
|
||||
pub struct ZkAesCtrError(#[from] ErrorRepr);
|
||||
|
||||
impl ZkAesCtrError {
|
||||
fn vm<E>(err: E) -> Self
|
||||
where
|
||||
E: Into<Box<dyn std::error::Error + Send + Sync + 'static>>,
|
||||
{
|
||||
Self(ErrorRepr::Vm(err.into()))
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
#[error("zk aes error")]
|
||||
enum ErrorRepr {
|
||||
#[error("invalid state: {reason}")]
|
||||
State { reason: &'static str },
|
||||
#[error("cipher error: {0}")]
|
||||
Cipher(Box<dyn std::error::Error + Send + Sync + 'static>),
|
||||
#[error("vm error: {0}")]
|
||||
Vm(Box<dyn std::error::Error + Send + Sync + 'static>),
|
||||
#[error("invalid explicit nonce length: expected {expected}, got {actual}")]
|
||||
ExplicitNonceLength { expected: usize, actual: usize },
|
||||
#[error("insufficient preprocessing: expected {expected}, got {actual}")]
|
||||
InsufficientPreprocessing { expected: usize, actual: usize },
|
||||
}
|
||||
|
||||
impl From<AesError> for ZkAesCtrError {
|
||||
fn from(err: AesError) -> Self {
|
||||
Self(ErrorRepr::Cipher(Box::new(err)))
|
||||
}
|
||||
}
|
||||
|
||||
impl From<CipherError> for ZkAesCtrError {
|
||||
fn from(err: CipherError) -> Self {
|
||||
Self(ErrorRepr::Cipher(Box::new(err)))
|
||||
}
|
||||
}
|
||||
@@ -5,7 +5,7 @@ description = "This crate provides implementations of ciphers for two parties"
|
||||
keywords = ["tls", "mpc", "2pc", "aes"]
|
||||
categories = ["cryptography"]
|
||||
license = "MIT OR Apache-2.0"
|
||||
version = "0.1.0-alpha.11"
|
||||
version = "0.1.0-alpha.13"
|
||||
edition = "2021"
|
||||
|
||||
[lints]
|
||||
|
||||
@@ -36,7 +36,7 @@ impl Display for AesError {
|
||||
}
|
||||
|
||||
if let Some(source) = &self.source {
|
||||
write!(f, " caused by: {}", source)?;
|
||||
write!(f, " caused by: {source}")?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "tlsn-deap"
|
||||
version = "0.1.0-alpha.11"
|
||||
version = "0.1.0-alpha.13"
|
||||
edition = "2021"
|
||||
|
||||
[lints]
|
||||
|
||||
@@ -391,7 +391,7 @@ mod tests {
|
||||
memory::{binary::U8, correlated::Delta, Array},
|
||||
prelude::*,
|
||||
};
|
||||
use mpz_zk::{Prover, Verifier};
|
||||
use mpz_zk::{Prover, ProverConfig, Verifier, VerifierConfig};
|
||||
use rand::{rngs::StdRng, SeedableRng};
|
||||
|
||||
use super::*;
|
||||
@@ -408,8 +408,8 @@ mod tests {
|
||||
|
||||
let gb = Garbler::new(cot_send, [0u8; 16], delta_mpc);
|
||||
let ev = Evaluator::new(cot_recv);
|
||||
let prover = Prover::new(rcot_recv);
|
||||
let verifier = Verifier::new(delta_zk, rcot_send);
|
||||
let prover = Prover::new(ProverConfig::default(), rcot_recv);
|
||||
let verifier = Verifier::new(VerifierConfig::default(), delta_zk, rcot_send);
|
||||
|
||||
let mut leader = Deap::new(Role::Leader, gb, prover);
|
||||
let mut follower = Deap::new(Role::Follower, ev, verifier);
|
||||
@@ -488,8 +488,8 @@ mod tests {
|
||||
|
||||
let gb = Garbler::new(cot_send, [0u8; 16], delta_mpc);
|
||||
let ev = Evaluator::new(cot_recv);
|
||||
let prover = Prover::new(rcot_recv);
|
||||
let verifier = Verifier::new(delta_zk, rcot_send);
|
||||
let prover = Prover::new(ProverConfig::default(), rcot_recv);
|
||||
let verifier = Verifier::new(VerifierConfig::default(), delta_zk, rcot_send);
|
||||
|
||||
let mut leader = Deap::new(Role::Leader, gb, prover);
|
||||
let mut follower = Deap::new(Role::Follower, ev, verifier);
|
||||
@@ -574,8 +574,8 @@ mod tests {
|
||||
|
||||
let gb = Garbler::new(cot_send, [1u8; 16], delta_mpc);
|
||||
let ev = Evaluator::new(cot_recv);
|
||||
let prover = Prover::new(rcot_recv);
|
||||
let verifier = Verifier::new(delta_zk, rcot_send);
|
||||
let prover = Prover::new(ProverConfig::default(), rcot_recv);
|
||||
let verifier = Verifier::new(VerifierConfig::default(), delta_zk, rcot_send);
|
||||
|
||||
let mut leader = Deap::new(Role::Leader, gb, prover);
|
||||
let mut follower = Deap::new(Role::Follower, ev, verifier);
|
||||
|
||||
@@ -5,7 +5,7 @@ description = "A 2PC implementation of TLS HMAC-SHA256 PRF"
|
||||
keywords = ["tls", "mpc", "2pc", "hmac", "sha256"]
|
||||
categories = ["cryptography"]
|
||||
license = "MIT OR Apache-2.0"
|
||||
version = "0.1.0-alpha.11"
|
||||
version = "0.1.0-alpha.13"
|
||||
edition = "2021"
|
||||
|
||||
[lints]
|
||||
|
||||
@@ -56,7 +56,7 @@ impl fmt::Display for PrfError {
|
||||
}
|
||||
|
||||
if let Some(ref source) = self.source {
|
||||
write!(f, " caused by: {}", source)?;
|
||||
write!(f, " caused by: {source}")?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
|
||||
@@ -40,7 +40,6 @@ enum PrfState {
|
||||
inner_partial: [u32; 8],
|
||||
a_output: DecodeFutureTyped<BitVec, [u8; 32]>,
|
||||
},
|
||||
FinishLastP,
|
||||
Done,
|
||||
}
|
||||
|
||||
@@ -137,16 +136,18 @@ impl PrfFunction {
|
||||
assign_inner_local(vm, p.inner_local, *inner_partial, &msg)?;
|
||||
|
||||
if *iter == self.iterations {
|
||||
self.state = PrfState::FinishLastP;
|
||||
self.state = PrfState::Done;
|
||||
} else {
|
||||
self.state = PrfState::ComputeA {
|
||||
iter: *iter + 1,
|
||||
inner_partial: *inner_partial,
|
||||
msg: output.to_vec(),
|
||||
}
|
||||
};
|
||||
};
|
||||
// We recurse, so that this PHash and the next AHash could
|
||||
// be computed in a single VM execute call.
|
||||
self.flush(vm)?;
|
||||
}
|
||||
}
|
||||
PrfState::FinishLastP => self.state = PrfState::Done,
|
||||
_ => (),
|
||||
}
|
||||
|
||||
|
||||
@@ -5,7 +5,7 @@ description = "Implementation of the 3-party key-exchange protocol"
|
||||
keywords = ["tls", "mpc", "2pc", "pms", "key-exchange"]
|
||||
categories = ["cryptography"]
|
||||
license = "MIT OR Apache-2.0"
|
||||
version = "0.1.0-alpha.11"
|
||||
version = "0.1.0-alpha.13"
|
||||
edition = "2021"
|
||||
|
||||
[lints]
|
||||
|
||||
@@ -5,7 +5,7 @@ description = "Core types for TLSNotary"
|
||||
keywords = ["tls", "mpc", "2pc", "types"]
|
||||
categories = ["cryptography"]
|
||||
license = "MIT OR Apache-2.0"
|
||||
version = "0.1.0-alpha.11"
|
||||
version = "0.1.0-alpha.13"
|
||||
edition = "2021"
|
||||
|
||||
[lints]
|
||||
@@ -13,7 +13,13 @@ workspace = true
|
||||
|
||||
[features]
|
||||
default = []
|
||||
fixtures = ["dep:hex", "dep:tlsn-data-fixtures"]
|
||||
fixtures = [
|
||||
"dep:hex",
|
||||
"dep:tlsn-data-fixtures",
|
||||
"dep:aead",
|
||||
"dep:aes-gcm",
|
||||
"dep:generic-array",
|
||||
]
|
||||
|
||||
[dependencies]
|
||||
tlsn-data-fixtures = { workspace = true, optional = true }
|
||||
@@ -21,13 +27,13 @@ tlsn-tls-core = { workspace = true, features = ["serde"] }
|
||||
tlsn-utils = { workspace = true }
|
||||
rangeset = { workspace = true, features = ["serde"] }
|
||||
|
||||
bcs = { workspace = true }
|
||||
aead = { workspace = true, features = ["alloc"], optional = true }
|
||||
aes-gcm = { workspace = true, optional = true }
|
||||
generic-array = { workspace = true, optional = true }
|
||||
bimap = { version = "0.6", features = ["serde"] }
|
||||
blake3 = { workspace = true }
|
||||
hex = { workspace = true, optional = true }
|
||||
k256 = { workspace = true }
|
||||
opaque-debug = { workspace = true }
|
||||
p256 = { workspace = true, features = ["serde"] }
|
||||
rand = { workspace = true }
|
||||
rand_core = { workspace = true }
|
||||
rand_chacha = { workspace = true }
|
||||
@@ -36,21 +42,20 @@ rstest = { workspace = true, optional = true }
|
||||
serde = { workspace = true }
|
||||
sha2 = { workspace = true }
|
||||
thiserror = { workspace = true }
|
||||
tiny-keccak = { version = "2.0", features = ["keccak"] }
|
||||
tiny-keccak = { workspace = true, features = ["keccak"] }
|
||||
web-time = { workspace = true }
|
||||
webpki-roots = { workspace = true }
|
||||
rustls-webpki = { workspace = true, features = ["ring"] }
|
||||
rustls-pki-types = { workspace = true }
|
||||
itybity = { workspace = true }
|
||||
zeroize = { workspace = true, features = ["zeroize_derive"] }
|
||||
|
||||
[dev-dependencies]
|
||||
alloy-primitives = { version = "0.8.22", default-features = false }
|
||||
alloy-signer = { version = "0.12", default-features = false }
|
||||
alloy-signer-local = { version = "0.12", default-features = false }
|
||||
aead = { workspace = true, features = ["alloc"] }
|
||||
aes-gcm = { workspace = true }
|
||||
generic-array = { workspace = true }
|
||||
bincode = { workspace = true }
|
||||
hex = { workspace = true }
|
||||
rstest = { workspace = true }
|
||||
tlsn-data-fixtures = { workspace = true }
|
||||
rand06-compat = { workspace = true }
|
||||
|
||||
[[test]]
|
||||
name = "api"
|
||||
required-features = ["fixtures"]
|
||||
webpki-root-certs = { workspace = true }
|
||||
|
||||
@@ -1,44 +1,12 @@
|
||||
//! TLS connection types.
|
||||
//!
|
||||
//! ## Commitment
|
||||
//!
|
||||
//! During the TLS handshake the Notary receives the Server's ephemeral public
|
||||
//! key, and this key serves as a binding commitment to the identity of the
|
||||
//! Server. The ephemeral key itself does not reveal the Server's identity, but
|
||||
//! it is bound to it via a signature created using the Server's
|
||||
//! X.509 certificate.
|
||||
//!
|
||||
//! A Prover can withhold the Server's signature and certificate chain from the
|
||||
//! Notary to improve privacy and censorship resistance.
|
||||
//!
|
||||
//! ## Proving the Server's identity
|
||||
//!
|
||||
//! A Prover can prove the Server's identity to a Verifier by sending a
|
||||
//! [`ServerIdentityProof`]. This proof contains all the information required to
|
||||
//! establish the link between the TLS connection and the Server's X.509
|
||||
//! certificate. A Verifier checks the Server's certificate against their own
|
||||
//! trust anchors, the same way a typical TLS client would.
|
||||
|
||||
mod commit;
|
||||
mod proof;
|
||||
|
||||
use std::fmt;
|
||||
|
||||
use rustls_pki_types as webpki_types;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use tls_core::{
|
||||
msgs::{
|
||||
codec::Codec,
|
||||
enums::NamedGroup,
|
||||
handshake::{DigitallySignedStruct, ServerECDHParams},
|
||||
},
|
||||
verify::ServerCertVerifier as _,
|
||||
};
|
||||
use web_time::{Duration, UNIX_EPOCH};
|
||||
use tls_core::msgs::{codec::Codec, enums::NamedGroup, handshake::ServerECDHParams};
|
||||
|
||||
use crate::{hash::impl_domain_separator, CryptoProvider};
|
||||
|
||||
pub use commit::{ServerCertCommitment, ServerCertOpening};
|
||||
pub use proof::{ServerIdentityProof, ServerIdentityProofError};
|
||||
use crate::webpki::{CertificateDer, ServerCertVerifier, ServerCertVerifierError};
|
||||
|
||||
/// TLS version.
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)]
|
||||
@@ -62,40 +30,82 @@ impl TryFrom<tls_core::msgs::enums::ProtocolVersion> for TlsVersion {
|
||||
}
|
||||
}
|
||||
|
||||
/// Server's name, a.k.a. the DNS name.
|
||||
/// Server's name.
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)]
|
||||
pub struct ServerName(String);
|
||||
pub enum ServerName {
|
||||
/// DNS name.
|
||||
Dns(DnsName),
|
||||
}
|
||||
|
||||
impl ServerName {
|
||||
/// Creates a new server name.
|
||||
pub fn new(name: String) -> Self {
|
||||
Self(name)
|
||||
}
|
||||
|
||||
/// Returns the name as a string.
|
||||
pub fn as_str(&self) -> &str {
|
||||
&self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl From<&str> for ServerName {
|
||||
fn from(name: &str) -> Self {
|
||||
Self(name.to_string())
|
||||
}
|
||||
}
|
||||
|
||||
impl AsRef<str> for ServerName {
|
||||
fn as_ref(&self) -> &str {
|
||||
&self.0
|
||||
pub(crate) fn to_webpki(&self) -> webpki_types::ServerName<'static> {
|
||||
match self {
|
||||
ServerName::Dns(name) => webpki_types::ServerName::DnsName(
|
||||
webpki_types::DnsName::try_from(name.0.as_str())
|
||||
.expect("name was validated")
|
||||
.to_owned(),
|
||||
),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for ServerName {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
match self {
|
||||
ServerName::Dns(name) => write!(f, "{name}"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// DNS name.
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)]
|
||||
#[serde(try_from = "String")]
|
||||
pub struct DnsName(String);
|
||||
|
||||
impl DnsName {
|
||||
/// Returns the DNS name as a string.
|
||||
pub fn as_str(&self) -> &str {
|
||||
self.0.as_str()
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for DnsName {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "{}", self.0)
|
||||
}
|
||||
}
|
||||
|
||||
impl AsRef<str> for DnsName {
|
||||
fn as_ref(&self) -> &str {
|
||||
&self.0
|
||||
}
|
||||
}
|
||||
|
||||
/// Error returned when a DNS name is invalid.
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
#[error("invalid DNS name")]
|
||||
pub struct InvalidDnsNameError {}
|
||||
|
||||
impl TryFrom<&str> for DnsName {
|
||||
type Error = InvalidDnsNameError;
|
||||
|
||||
fn try_from(value: &str) -> Result<Self, Self::Error> {
|
||||
// Borrow validation from rustls
|
||||
match webpki_types::DnsName::try_from_str(value) {
|
||||
Ok(_) => Ok(DnsName(value.to_string())),
|
||||
Err(_) => Err(InvalidDnsNameError {}),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<String> for DnsName {
|
||||
type Error = InvalidDnsNameError;
|
||||
|
||||
fn try_from(value: String) -> Result<Self, Self::Error> {
|
||||
Self::try_from(value.as_str())
|
||||
}
|
||||
}
|
||||
|
||||
/// Type of a public key.
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
@@ -106,84 +116,84 @@ pub enum KeyType {
|
||||
SECP256R1 = 0x0017,
|
||||
}
|
||||
|
||||
/// Signature scheme on the key exchange parameters.
|
||||
/// Signature algorithm used on the key exchange parameters.
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
#[allow(non_camel_case_types, missing_docs)]
|
||||
pub enum SignatureScheme {
|
||||
RSA_PKCS1_SHA1 = 0x0201,
|
||||
ECDSA_SHA1_Legacy = 0x0203,
|
||||
RSA_PKCS1_SHA256 = 0x0401,
|
||||
ECDSA_NISTP256_SHA256 = 0x0403,
|
||||
RSA_PKCS1_SHA384 = 0x0501,
|
||||
ECDSA_NISTP384_SHA384 = 0x0503,
|
||||
RSA_PKCS1_SHA512 = 0x0601,
|
||||
ECDSA_NISTP521_SHA512 = 0x0603,
|
||||
RSA_PSS_SHA256 = 0x0804,
|
||||
RSA_PSS_SHA384 = 0x0805,
|
||||
RSA_PSS_SHA512 = 0x0806,
|
||||
ED25519 = 0x0807,
|
||||
pub enum SignatureAlgorithm {
|
||||
ECDSA_NISTP256_SHA256,
|
||||
ECDSA_NISTP256_SHA384,
|
||||
ECDSA_NISTP384_SHA256,
|
||||
ECDSA_NISTP384_SHA384,
|
||||
ED25519,
|
||||
RSA_PKCS1_2048_8192_SHA256,
|
||||
RSA_PKCS1_2048_8192_SHA384,
|
||||
RSA_PKCS1_2048_8192_SHA512,
|
||||
RSA_PSS_2048_8192_SHA256_LEGACY_KEY,
|
||||
RSA_PSS_2048_8192_SHA384_LEGACY_KEY,
|
||||
RSA_PSS_2048_8192_SHA512_LEGACY_KEY,
|
||||
}
|
||||
|
||||
impl TryFrom<tls_core::msgs::enums::SignatureScheme> for SignatureScheme {
|
||||
type Error = &'static str;
|
||||
|
||||
fn try_from(value: tls_core::msgs::enums::SignatureScheme) -> Result<Self, Self::Error> {
|
||||
use tls_core::msgs::enums::SignatureScheme as Core;
|
||||
use SignatureScheme::*;
|
||||
Ok(match value {
|
||||
Core::RSA_PKCS1_SHA1 => RSA_PKCS1_SHA1,
|
||||
Core::ECDSA_SHA1_Legacy => ECDSA_SHA1_Legacy,
|
||||
Core::RSA_PKCS1_SHA256 => RSA_PKCS1_SHA256,
|
||||
Core::ECDSA_NISTP256_SHA256 => ECDSA_NISTP256_SHA256,
|
||||
Core::RSA_PKCS1_SHA384 => RSA_PKCS1_SHA384,
|
||||
Core::ECDSA_NISTP384_SHA384 => ECDSA_NISTP384_SHA384,
|
||||
Core::RSA_PKCS1_SHA512 => RSA_PKCS1_SHA512,
|
||||
Core::ECDSA_NISTP521_SHA512 => ECDSA_NISTP521_SHA512,
|
||||
Core::RSA_PSS_SHA256 => RSA_PSS_SHA256,
|
||||
Core::RSA_PSS_SHA384 => RSA_PSS_SHA384,
|
||||
Core::RSA_PSS_SHA512 => RSA_PSS_SHA512,
|
||||
Core::ED25519 => ED25519,
|
||||
_ => return Err("unsupported signature scheme"),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl From<SignatureScheme> for tls_core::msgs::enums::SignatureScheme {
|
||||
fn from(value: SignatureScheme) -> Self {
|
||||
use tls_core::msgs::enums::SignatureScheme::*;
|
||||
match value {
|
||||
SignatureScheme::RSA_PKCS1_SHA1 => RSA_PKCS1_SHA1,
|
||||
SignatureScheme::ECDSA_SHA1_Legacy => ECDSA_SHA1_Legacy,
|
||||
SignatureScheme::RSA_PKCS1_SHA256 => RSA_PKCS1_SHA256,
|
||||
SignatureScheme::ECDSA_NISTP256_SHA256 => ECDSA_NISTP256_SHA256,
|
||||
SignatureScheme::RSA_PKCS1_SHA384 => RSA_PKCS1_SHA384,
|
||||
SignatureScheme::ECDSA_NISTP384_SHA384 => ECDSA_NISTP384_SHA384,
|
||||
SignatureScheme::RSA_PKCS1_SHA512 => RSA_PKCS1_SHA512,
|
||||
SignatureScheme::ECDSA_NISTP521_SHA512 => ECDSA_NISTP521_SHA512,
|
||||
SignatureScheme::RSA_PSS_SHA256 => RSA_PSS_SHA256,
|
||||
SignatureScheme::RSA_PSS_SHA384 => RSA_PSS_SHA384,
|
||||
SignatureScheme::RSA_PSS_SHA512 => RSA_PSS_SHA512,
|
||||
SignatureScheme::ED25519 => ED25519,
|
||||
impl fmt::Display for SignatureAlgorithm {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
match self {
|
||||
SignatureAlgorithm::ECDSA_NISTP256_SHA256 => write!(f, "ECDSA_NISTP256_SHA256"),
|
||||
SignatureAlgorithm::ECDSA_NISTP256_SHA384 => write!(f, "ECDSA_NISTP256_SHA384"),
|
||||
SignatureAlgorithm::ECDSA_NISTP384_SHA256 => write!(f, "ECDSA_NISTP384_SHA256"),
|
||||
SignatureAlgorithm::ECDSA_NISTP384_SHA384 => write!(f, "ECDSA_NISTP384_SHA384"),
|
||||
SignatureAlgorithm::ED25519 => write!(f, "ED25519"),
|
||||
SignatureAlgorithm::RSA_PKCS1_2048_8192_SHA256 => {
|
||||
write!(f, "RSA_PKCS1_2048_8192_SHA256")
|
||||
}
|
||||
SignatureAlgorithm::RSA_PKCS1_2048_8192_SHA384 => {
|
||||
write!(f, "RSA_PKCS1_2048_8192_SHA384")
|
||||
}
|
||||
SignatureAlgorithm::RSA_PKCS1_2048_8192_SHA512 => {
|
||||
write!(f, "RSA_PKCS1_2048_8192_SHA512")
|
||||
}
|
||||
SignatureAlgorithm::RSA_PSS_2048_8192_SHA256_LEGACY_KEY => {
|
||||
write!(f, "RSA_PSS_2048_8192_SHA256_LEGACY_KEY")
|
||||
}
|
||||
SignatureAlgorithm::RSA_PSS_2048_8192_SHA384_LEGACY_KEY => {
|
||||
write!(f, "RSA_PSS_2048_8192_SHA384_LEGACY_KEY")
|
||||
}
|
||||
SignatureAlgorithm::RSA_PSS_2048_8192_SHA512_LEGACY_KEY => {
|
||||
write!(f, "RSA_PSS_2048_8192_SHA512_LEGACY_KEY")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// X.509 certificate, DER encoded.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct Certificate(pub Vec<u8>);
|
||||
|
||||
impl From<tls_core::key::Certificate> for Certificate {
|
||||
fn from(cert: tls_core::key::Certificate) -> Self {
|
||||
Self(cert.0)
|
||||
impl From<tls_core::verify::SignatureAlgorithm> for SignatureAlgorithm {
|
||||
fn from(value: tls_core::verify::SignatureAlgorithm) -> Self {
|
||||
use tls_core::verify::SignatureAlgorithm as Core;
|
||||
match value {
|
||||
Core::ECDSA_NISTP256_SHA256 => SignatureAlgorithm::ECDSA_NISTP256_SHA256,
|
||||
Core::ECDSA_NISTP256_SHA384 => SignatureAlgorithm::ECDSA_NISTP256_SHA384,
|
||||
Core::ECDSA_NISTP384_SHA256 => SignatureAlgorithm::ECDSA_NISTP384_SHA256,
|
||||
Core::ECDSA_NISTP384_SHA384 => SignatureAlgorithm::ECDSA_NISTP384_SHA384,
|
||||
Core::ED25519 => SignatureAlgorithm::ED25519,
|
||||
Core::RSA_PKCS1_2048_8192_SHA256 => SignatureAlgorithm::RSA_PKCS1_2048_8192_SHA256,
|
||||
Core::RSA_PKCS1_2048_8192_SHA384 => SignatureAlgorithm::RSA_PKCS1_2048_8192_SHA384,
|
||||
Core::RSA_PKCS1_2048_8192_SHA512 => SignatureAlgorithm::RSA_PKCS1_2048_8192_SHA512,
|
||||
Core::RSA_PSS_2048_8192_SHA256_LEGACY_KEY => {
|
||||
SignatureAlgorithm::RSA_PSS_2048_8192_SHA256_LEGACY_KEY
|
||||
}
|
||||
Core::RSA_PSS_2048_8192_SHA384_LEGACY_KEY => {
|
||||
SignatureAlgorithm::RSA_PSS_2048_8192_SHA384_LEGACY_KEY
|
||||
}
|
||||
Core::RSA_PSS_2048_8192_SHA512_LEGACY_KEY => {
|
||||
SignatureAlgorithm::RSA_PSS_2048_8192_SHA512_LEGACY_KEY
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Server's signature of the key exchange parameters.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct ServerSignature {
|
||||
/// Signature scheme.
|
||||
pub scheme: SignatureScheme,
|
||||
/// Signature algorithm.
|
||||
pub alg: SignatureAlgorithm,
|
||||
/// Signature data.
|
||||
pub sig: Vec<u8>,
|
||||
}
|
||||
@@ -198,8 +208,6 @@ pub struct ServerEphemKey {
|
||||
pub key: Vec<u8>,
|
||||
}
|
||||
|
||||
impl_domain_separator!(ServerEphemKey);
|
||||
|
||||
impl ServerEphemKey {
|
||||
/// Encodes the key exchange parameters as in TLS.
|
||||
pub(crate) fn kx_params(&self) -> Vec<u8> {
|
||||
@@ -240,8 +248,6 @@ pub struct ConnectionInfo {
|
||||
pub transcript_length: TranscriptLength,
|
||||
}
|
||||
|
||||
impl_domain_separator!(ConnectionInfo);
|
||||
|
||||
/// Transcript length information.
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
pub struct TranscriptLength {
|
||||
@@ -251,9 +257,9 @@ pub struct TranscriptLength {
|
||||
pub received: u32,
|
||||
}
|
||||
|
||||
/// TLS 1.2 handshake data.
|
||||
/// TLS 1.2 certificate binding.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct HandshakeDataV1_2 {
|
||||
pub struct CertBindingV1_2 {
|
||||
/// Client random.
|
||||
pub client_random: [u8; 32],
|
||||
/// Server random.
|
||||
@@ -262,87 +268,80 @@ pub struct HandshakeDataV1_2 {
|
||||
pub server_ephemeral_key: ServerEphemKey,
|
||||
}
|
||||
|
||||
/// TLS handshake data.
|
||||
/// TLS certificate binding.
|
||||
///
|
||||
/// This is the data that the server signs using its public key in the
|
||||
/// certificate it presents during the TLS handshake. This provides a binding
|
||||
/// between the server's identity and the ephemeral keys used to authenticate
|
||||
/// the TLS session.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "snake_case")]
|
||||
#[non_exhaustive]
|
||||
pub enum HandshakeData {
|
||||
/// TLS 1.2 handshake data.
|
||||
V1_2(HandshakeDataV1_2),
|
||||
pub enum CertBinding {
|
||||
/// TLS 1.2 certificate binding.
|
||||
V1_2(CertBindingV1_2),
|
||||
}
|
||||
|
||||
impl_domain_separator!(HandshakeData);
|
||||
|
||||
/// Server certificate and handshake data.
|
||||
/// Verify data from the TLS handshake finished messages.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct ServerCertData {
|
||||
/// Certificate chain.
|
||||
pub certs: Vec<Certificate>,
|
||||
/// Server signature of the key exchange parameters.
|
||||
pub sig: ServerSignature,
|
||||
/// TLS handshake data.
|
||||
pub handshake: HandshakeData,
|
||||
pub struct VerifyData {
|
||||
/// Client finished verify data.
|
||||
pub client_finished: Vec<u8>,
|
||||
/// Server finished verify data.
|
||||
pub server_finished: Vec<u8>,
|
||||
}
|
||||
|
||||
impl ServerCertData {
|
||||
/// Verifies the server certificate data.
|
||||
/// TLS handshake data.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct HandshakeData {
|
||||
/// Server certificate chain.
|
||||
pub certs: Vec<CertificateDer>,
|
||||
/// Server certificate signature over the binding message.
|
||||
pub sig: ServerSignature,
|
||||
/// Certificate binding.
|
||||
pub binding: CertBinding,
|
||||
}
|
||||
|
||||
impl HandshakeData {
|
||||
/// Verifies the handshake data.
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `provider` - The crypto provider to use for verification.
|
||||
/// * `verifier` - Cerificate verifier.
|
||||
/// * `time` - The time of the connection.
|
||||
/// * `server_ephemeral_key` - The server's ephemeral key.
|
||||
/// * `server_name` - The server name.
|
||||
pub fn verify_with_provider(
|
||||
pub fn verify(
|
||||
&self,
|
||||
provider: &CryptoProvider,
|
||||
verifier: &ServerCertVerifier,
|
||||
time: u64,
|
||||
server_ephemeral_key: &ServerEphemKey,
|
||||
server_name: &ServerName,
|
||||
) -> Result<(), CertificateVerificationError> {
|
||||
) -> Result<(), HandshakeVerificationError> {
|
||||
#[allow(irrefutable_let_patterns)]
|
||||
let HandshakeData::V1_2(HandshakeDataV1_2 {
|
||||
let CertBinding::V1_2(CertBindingV1_2 {
|
||||
client_random,
|
||||
server_random,
|
||||
server_ephemeral_key: expected_server_ephemeral_key,
|
||||
}) = &self.handshake
|
||||
}) = &self.binding
|
||||
else {
|
||||
unreachable!("only TLS 1.2 is implemented")
|
||||
};
|
||||
|
||||
if server_ephemeral_key != expected_server_ephemeral_key {
|
||||
return Err(CertificateVerificationError::InvalidServerEphemeralKey);
|
||||
return Err(HandshakeVerificationError::InvalidServerEphemeralKey);
|
||||
}
|
||||
|
||||
// Verify server name.
|
||||
let server_name = tls_core::dns::ServerName::try_from(server_name.as_ref())
|
||||
.map_err(|_| CertificateVerificationError::InvalidIdentity(server_name.clone()))?;
|
||||
|
||||
// Verify server certificate.
|
||||
let cert_chain = self
|
||||
let (end_entity, intermediates) = self
|
||||
.certs
|
||||
.clone()
|
||||
.into_iter()
|
||||
.map(|cert| tls_core::key::Certificate(cert.0))
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let (end_entity, intermediates) = cert_chain
|
||||
.split_first()
|
||||
.ok_or(CertificateVerificationError::MissingCerts)?;
|
||||
.ok_or(HandshakeVerificationError::MissingCerts)?;
|
||||
|
||||
// Verify the end entity cert is valid for the provided server name
|
||||
// and that it chains to at least one of the roots we trust.
|
||||
provider
|
||||
.cert
|
||||
.verify_server_cert(
|
||||
end_entity,
|
||||
intermediates,
|
||||
&server_name,
|
||||
&mut [].into_iter(),
|
||||
&[],
|
||||
UNIX_EPOCH + Duration::from_secs(time),
|
||||
)
|
||||
.map_err(|_| CertificateVerificationError::InvalidCert)?;
|
||||
verifier
|
||||
.verify_server_cert(end_entity, intermediates, server_name, time)
|
||||
.map_err(HandshakeVerificationError::ServerCert)?;
|
||||
|
||||
// Verify the signature matches the certificate and key exchange parameters.
|
||||
let mut message = Vec::new();
|
||||
@@ -350,12 +349,34 @@ impl ServerCertData {
|
||||
message.extend_from_slice(server_random);
|
||||
message.extend_from_slice(&server_ephemeral_key.kx_params());
|
||||
|
||||
let dss = DigitallySignedStruct::new(self.sig.scheme.into(), self.sig.sig.clone());
|
||||
use webpki::ring as alg;
|
||||
let sig_alg = match self.sig.alg {
|
||||
SignatureAlgorithm::ECDSA_NISTP256_SHA256 => alg::ECDSA_P256_SHA256,
|
||||
SignatureAlgorithm::ECDSA_NISTP256_SHA384 => alg::ECDSA_P256_SHA384,
|
||||
SignatureAlgorithm::ECDSA_NISTP384_SHA256 => alg::ECDSA_P384_SHA256,
|
||||
SignatureAlgorithm::ECDSA_NISTP384_SHA384 => alg::ECDSA_P384_SHA384,
|
||||
SignatureAlgorithm::ED25519 => alg::ED25519,
|
||||
SignatureAlgorithm::RSA_PKCS1_2048_8192_SHA256 => alg::RSA_PKCS1_2048_8192_SHA256,
|
||||
SignatureAlgorithm::RSA_PKCS1_2048_8192_SHA384 => alg::RSA_PKCS1_2048_8192_SHA384,
|
||||
SignatureAlgorithm::RSA_PKCS1_2048_8192_SHA512 => alg::RSA_PKCS1_2048_8192_SHA512,
|
||||
SignatureAlgorithm::RSA_PSS_2048_8192_SHA256_LEGACY_KEY => {
|
||||
alg::RSA_PSS_2048_8192_SHA256_LEGACY_KEY
|
||||
}
|
||||
SignatureAlgorithm::RSA_PSS_2048_8192_SHA384_LEGACY_KEY => {
|
||||
alg::RSA_PSS_2048_8192_SHA384_LEGACY_KEY
|
||||
}
|
||||
SignatureAlgorithm::RSA_PSS_2048_8192_SHA512_LEGACY_KEY => {
|
||||
alg::RSA_PSS_2048_8192_SHA512_LEGACY_KEY
|
||||
}
|
||||
};
|
||||
|
||||
provider
|
||||
.cert
|
||||
.verify_tls12_signature(&message, end_entity, &dss)
|
||||
.map_err(|_| CertificateVerificationError::InvalidServerSignature)?;
|
||||
let end_entity = webpki_types::CertificateDer::from(end_entity.0.as_slice());
|
||||
let end_entity = webpki::EndEntityCert::try_from(&end_entity)
|
||||
.map_err(|_| HandshakeVerificationError::InvalidEndEntityCertificate)?;
|
||||
|
||||
end_entity
|
||||
.verify_signature(sig_alg, &message, &self.sig.sig)
|
||||
.map_err(|_| HandshakeVerificationError::InvalidServerSignature)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -364,55 +385,49 @@ impl ServerCertData {
|
||||
/// Errors that can occur when verifying a certificate chain or signature.
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
#[allow(missing_docs)]
|
||||
pub enum CertificateVerificationError {
|
||||
#[error("invalid server identity: {0}")]
|
||||
InvalidIdentity(ServerName),
|
||||
pub enum HandshakeVerificationError {
|
||||
#[error("invalid end entity certificate")]
|
||||
InvalidEndEntityCertificate,
|
||||
#[error("missing server certificates")]
|
||||
MissingCerts,
|
||||
#[error("invalid server certificate")]
|
||||
InvalidCert,
|
||||
#[error("invalid server signature")]
|
||||
InvalidServerSignature,
|
||||
#[error("invalid server ephemeral key")]
|
||||
InvalidServerEphemeralKey,
|
||||
#[error("server certificate verification failed: {0}")]
|
||||
ServerCert(ServerCertVerifierError),
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::{
|
||||
fixtures::ConnectionFixture, provider::default_cert_verifier, transcript::Transcript,
|
||||
};
|
||||
use crate::{fixtures::ConnectionFixture, transcript::Transcript, webpki::RootCertStore};
|
||||
|
||||
use hex::FromHex;
|
||||
use rstest::*;
|
||||
use tls_core::verify::WebPkiVerifier;
|
||||
use tlsn_data_fixtures::http::{request::GET_WITH_HEADER, response::OK_JSON};
|
||||
|
||||
#[fixture]
|
||||
#[once]
|
||||
fn crypto_provider() -> CryptoProvider {
|
||||
let mut store = default_cert_verifier().root_store().clone();
|
||||
fn verifier() -> ServerCertVerifier {
|
||||
let mut root_store = RootCertStore {
|
||||
roots: webpki_root_certs::TLS_SERVER_ROOT_CERTS
|
||||
.iter()
|
||||
.map(|c| CertificateDer(c.to_vec()))
|
||||
.collect(),
|
||||
};
|
||||
|
||||
// Add a cert which is no longer included in the Mozilla root store.
|
||||
let cert = tls_core::key::Certificate(
|
||||
root_store.roots.push(
|
||||
appliedzkp()
|
||||
.server_cert_data
|
||||
.certs
|
||||
.last()
|
||||
.expect("chain is valid")
|
||||
.0
|
||||
.clone(),
|
||||
);
|
||||
|
||||
store.add(&cert).unwrap();
|
||||
|
||||
CryptoProvider {
|
||||
hash: Default::default(),
|
||||
cert: WebPkiVerifier::new(store, None),
|
||||
signer: Default::default(),
|
||||
signature: Default::default(),
|
||||
}
|
||||
ServerCertVerifier::new(&root_store).unwrap()
|
||||
}
|
||||
|
||||
fn tlsnotary() -> ConnectionFixture {
|
||||
@@ -428,7 +443,7 @@ mod tests {
|
||||
#[case::tlsnotary(tlsnotary())]
|
||||
#[case::appliedzkp(appliedzkp())]
|
||||
fn test_verify_cert_chain_sucess_ca_implicit(
|
||||
crypto_provider: &CryptoProvider,
|
||||
verifier: &ServerCertVerifier,
|
||||
#[case] mut data: ConnectionFixture,
|
||||
) {
|
||||
// Remove the CA cert
|
||||
@@ -436,11 +451,11 @@ mod tests {
|
||||
|
||||
assert!(data
|
||||
.server_cert_data
|
||||
.verify_with_provider(
|
||||
crypto_provider,
|
||||
.verify(
|
||||
verifier,
|
||||
data.connection_info.time,
|
||||
data.server_ephemeral_key(),
|
||||
&ServerName::from(data.server_name.as_ref()),
|
||||
&data.server_name,
|
||||
)
|
||||
.is_ok());
|
||||
}
|
||||
@@ -451,16 +466,16 @@ mod tests {
|
||||
#[case::tlsnotary(tlsnotary())]
|
||||
#[case::appliedzkp(appliedzkp())]
|
||||
fn test_verify_cert_chain_success_ca_explicit(
|
||||
crypto_provider: &CryptoProvider,
|
||||
verifier: &ServerCertVerifier,
|
||||
#[case] data: ConnectionFixture,
|
||||
) {
|
||||
assert!(data
|
||||
.server_cert_data
|
||||
.verify_with_provider(
|
||||
crypto_provider,
|
||||
.verify(
|
||||
verifier,
|
||||
data.connection_info.time,
|
||||
data.server_ephemeral_key(),
|
||||
&ServerName::from(data.server_name.as_ref()),
|
||||
&data.server_name,
|
||||
)
|
||||
.is_ok());
|
||||
}
|
||||
@@ -470,22 +485,22 @@ mod tests {
|
||||
#[case::tlsnotary(tlsnotary())]
|
||||
#[case::appliedzkp(appliedzkp())]
|
||||
fn test_verify_cert_chain_fail_bad_time(
|
||||
crypto_provider: &CryptoProvider,
|
||||
verifier: &ServerCertVerifier,
|
||||
#[case] data: ConnectionFixture,
|
||||
) {
|
||||
// unix time when the cert chain was NOT valid
|
||||
let bad_time: u64 = 1571465711;
|
||||
|
||||
let err = data.server_cert_data.verify_with_provider(
|
||||
crypto_provider,
|
||||
let err = data.server_cert_data.verify(
|
||||
verifier,
|
||||
bad_time,
|
||||
data.server_ephemeral_key(),
|
||||
&ServerName::from(data.server_name.as_ref()),
|
||||
&data.server_name,
|
||||
);
|
||||
|
||||
assert!(matches!(
|
||||
err.unwrap_err(),
|
||||
CertificateVerificationError::InvalidCert
|
||||
HandshakeVerificationError::ServerCert(_)
|
||||
));
|
||||
}
|
||||
|
||||
@@ -494,7 +509,7 @@ mod tests {
|
||||
#[case::tlsnotary(tlsnotary())]
|
||||
#[case::appliedzkp(appliedzkp())]
|
||||
fn test_verify_cert_chain_fail_no_interm_cert(
|
||||
crypto_provider: &CryptoProvider,
|
||||
verifier: &ServerCertVerifier,
|
||||
#[case] mut data: ConnectionFixture,
|
||||
) {
|
||||
// Remove the CA cert
|
||||
@@ -502,16 +517,16 @@ mod tests {
|
||||
// Remove the intermediate cert
|
||||
data.server_cert_data.certs.pop();
|
||||
|
||||
let err = data.server_cert_data.verify_with_provider(
|
||||
crypto_provider,
|
||||
let err = data.server_cert_data.verify(
|
||||
verifier,
|
||||
data.connection_info.time,
|
||||
data.server_ephemeral_key(),
|
||||
&ServerName::from(data.server_name.as_ref()),
|
||||
&data.server_name,
|
||||
);
|
||||
|
||||
assert!(matches!(
|
||||
err.unwrap_err(),
|
||||
CertificateVerificationError::InvalidCert
|
||||
HandshakeVerificationError::ServerCert(_)
|
||||
));
|
||||
}
|
||||
|
||||
@@ -521,22 +536,22 @@ mod tests {
|
||||
#[case::tlsnotary(tlsnotary())]
|
||||
#[case::appliedzkp(appliedzkp())]
|
||||
fn test_verify_cert_chain_fail_no_interm_cert_with_ca_cert(
|
||||
crypto_provider: &CryptoProvider,
|
||||
verifier: &ServerCertVerifier,
|
||||
#[case] mut data: ConnectionFixture,
|
||||
) {
|
||||
// Remove the intermediate cert
|
||||
data.server_cert_data.certs.remove(1);
|
||||
|
||||
let err = data.server_cert_data.verify_with_provider(
|
||||
crypto_provider,
|
||||
let err = data.server_cert_data.verify(
|
||||
verifier,
|
||||
data.connection_info.time,
|
||||
data.server_ephemeral_key(),
|
||||
&ServerName::from(data.server_name.as_ref()),
|
||||
&data.server_name,
|
||||
);
|
||||
|
||||
assert!(matches!(
|
||||
err.unwrap_err(),
|
||||
CertificateVerificationError::InvalidCert
|
||||
HandshakeVerificationError::ServerCert(_)
|
||||
));
|
||||
}
|
||||
|
||||
@@ -545,24 +560,24 @@ mod tests {
|
||||
#[case::tlsnotary(tlsnotary())]
|
||||
#[case::appliedzkp(appliedzkp())]
|
||||
fn test_verify_cert_chain_fail_bad_ee_cert(
|
||||
crypto_provider: &CryptoProvider,
|
||||
verifier: &ServerCertVerifier,
|
||||
#[case] mut data: ConnectionFixture,
|
||||
) {
|
||||
let ee: &[u8] = include_bytes!("./fixtures/data/unknown/ee.der");
|
||||
|
||||
// Change the end entity cert
|
||||
data.server_cert_data.certs[0] = Certificate(ee.to_vec());
|
||||
data.server_cert_data.certs[0] = CertificateDer(ee.to_vec());
|
||||
|
||||
let err = data.server_cert_data.verify_with_provider(
|
||||
crypto_provider,
|
||||
let err = data.server_cert_data.verify(
|
||||
verifier,
|
||||
data.connection_info.time,
|
||||
data.server_ephemeral_key(),
|
||||
&ServerName::from(data.server_name.as_ref()),
|
||||
&data.server_name,
|
||||
);
|
||||
|
||||
assert!(matches!(
|
||||
err.unwrap_err(),
|
||||
CertificateVerificationError::InvalidCert
|
||||
HandshakeVerificationError::ServerCert(_)
|
||||
));
|
||||
}
|
||||
|
||||
@@ -571,23 +586,23 @@ mod tests {
|
||||
#[case::tlsnotary(tlsnotary())]
|
||||
#[case::appliedzkp(appliedzkp())]
|
||||
fn test_verify_sig_ke_params_fail_bad_client_random(
|
||||
crypto_provider: &CryptoProvider,
|
||||
verifier: &ServerCertVerifier,
|
||||
#[case] mut data: ConnectionFixture,
|
||||
) {
|
||||
let HandshakeData::V1_2(HandshakeDataV1_2 { client_random, .. }) =
|
||||
&mut data.server_cert_data.handshake;
|
||||
let CertBinding::V1_2(CertBindingV1_2 { client_random, .. }) =
|
||||
&mut data.server_cert_data.binding;
|
||||
client_random[31] = client_random[31].wrapping_add(1);
|
||||
|
||||
let err = data.server_cert_data.verify_with_provider(
|
||||
crypto_provider,
|
||||
let err = data.server_cert_data.verify(
|
||||
verifier,
|
||||
data.connection_info.time,
|
||||
data.server_ephemeral_key(),
|
||||
&ServerName::from(data.server_name.as_ref()),
|
||||
&data.server_name,
|
||||
);
|
||||
|
||||
assert!(matches!(
|
||||
err.unwrap_err(),
|
||||
CertificateVerificationError::InvalidServerSignature
|
||||
HandshakeVerificationError::InvalidServerSignature
|
||||
));
|
||||
}
|
||||
|
||||
@@ -596,21 +611,21 @@ mod tests {
|
||||
#[case::tlsnotary(tlsnotary())]
|
||||
#[case::appliedzkp(appliedzkp())]
|
||||
fn test_verify_sig_ke_params_fail_bad_sig(
|
||||
crypto_provider: &CryptoProvider,
|
||||
verifier: &ServerCertVerifier,
|
||||
#[case] mut data: ConnectionFixture,
|
||||
) {
|
||||
data.server_cert_data.sig.sig[31] = data.server_cert_data.sig.sig[31].wrapping_add(1);
|
||||
|
||||
let err = data.server_cert_data.verify_with_provider(
|
||||
crypto_provider,
|
||||
let err = data.server_cert_data.verify(
|
||||
verifier,
|
||||
data.connection_info.time,
|
||||
data.server_ephemeral_key(),
|
||||
&ServerName::from(data.server_name.as_ref()),
|
||||
&data.server_name,
|
||||
);
|
||||
|
||||
assert!(matches!(
|
||||
err.unwrap_err(),
|
||||
CertificateVerificationError::InvalidServerSignature
|
||||
HandshakeVerificationError::InvalidServerSignature
|
||||
));
|
||||
}
|
||||
|
||||
@@ -619,13 +634,13 @@ mod tests {
|
||||
#[case::tlsnotary(tlsnotary())]
|
||||
#[case::appliedzkp(appliedzkp())]
|
||||
fn test_check_dns_name_present_in_cert_fail_bad_host(
|
||||
crypto_provider: &CryptoProvider,
|
||||
verifier: &ServerCertVerifier,
|
||||
#[case] data: ConnectionFixture,
|
||||
) {
|
||||
let bad_name = ServerName::from("badhost.com");
|
||||
let bad_name = ServerName::Dns(DnsName::try_from("badhost.com").unwrap());
|
||||
|
||||
let err = data.server_cert_data.verify_with_provider(
|
||||
crypto_provider,
|
||||
let err = data.server_cert_data.verify(
|
||||
verifier,
|
||||
data.connection_info.time,
|
||||
data.server_ephemeral_key(),
|
||||
&bad_name,
|
||||
@@ -633,7 +648,7 @@ mod tests {
|
||||
|
||||
assert!(matches!(
|
||||
err.unwrap_err(),
|
||||
CertificateVerificationError::InvalidCert
|
||||
HandshakeVerificationError::ServerCert(_)
|
||||
));
|
||||
}
|
||||
|
||||
@@ -641,25 +656,22 @@ mod tests {
|
||||
#[rstest]
|
||||
#[case::tlsnotary(tlsnotary())]
|
||||
#[case::appliedzkp(appliedzkp())]
|
||||
fn test_invalid_ephemeral_key(
|
||||
crypto_provider: &CryptoProvider,
|
||||
#[case] data: ConnectionFixture,
|
||||
) {
|
||||
fn test_invalid_ephemeral_key(verifier: &ServerCertVerifier, #[case] data: ConnectionFixture) {
|
||||
let wrong_ephemeral_key = ServerEphemKey {
|
||||
typ: KeyType::SECP256R1,
|
||||
key: Vec::<u8>::from_hex(include_bytes!("./fixtures/data/unknown/pubkey")).unwrap(),
|
||||
};
|
||||
|
||||
let err = data.server_cert_data.verify_with_provider(
|
||||
crypto_provider,
|
||||
let err = data.server_cert_data.verify(
|
||||
verifier,
|
||||
data.connection_info.time,
|
||||
&wrong_ephemeral_key,
|
||||
&ServerName::from(data.server_name.as_ref()),
|
||||
&data.server_name,
|
||||
);
|
||||
|
||||
assert!(matches!(
|
||||
err.unwrap_err(),
|
||||
CertificateVerificationError::InvalidServerEphemeralKey
|
||||
HandshakeVerificationError::InvalidServerEphemeralKey
|
||||
));
|
||||
}
|
||||
|
||||
@@ -668,22 +680,22 @@ mod tests {
|
||||
#[case::tlsnotary(tlsnotary())]
|
||||
#[case::appliedzkp(appliedzkp())]
|
||||
fn test_verify_cert_chain_fail_no_cert(
|
||||
crypto_provider: &CryptoProvider,
|
||||
verifier: &ServerCertVerifier,
|
||||
#[case] mut data: ConnectionFixture,
|
||||
) {
|
||||
// Empty certs
|
||||
data.server_cert_data.certs = Vec::new();
|
||||
|
||||
let err = data.server_cert_data.verify_with_provider(
|
||||
crypto_provider,
|
||||
let err = data.server_cert_data.verify(
|
||||
verifier,
|
||||
data.connection_info.time,
|
||||
data.server_ephemeral_key(),
|
||||
&ServerName::from(data.server_name.as_ref()),
|
||||
&data.server_name,
|
||||
);
|
||||
|
||||
assert!(matches!(
|
||||
err.unwrap_err(),
|
||||
CertificateVerificationError::MissingCerts
|
||||
HandshakeVerificationError::MissingCerts
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,40 +0,0 @@
|
||||
//! Types for committing details of a connection.
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::{
|
||||
connection::ServerCertData,
|
||||
hash::{impl_domain_separator, Blinded, HashAlgorithm, HashAlgorithmExt, TypedHash},
|
||||
};
|
||||
|
||||
/// Opens a [`ServerCertCommitment`].
|
||||
#[derive(Clone, Serialize, Deserialize)]
|
||||
pub struct ServerCertOpening(Blinded<ServerCertData>);
|
||||
|
||||
impl_domain_separator!(ServerCertOpening);
|
||||
|
||||
opaque_debug::implement!(ServerCertOpening);
|
||||
|
||||
impl ServerCertOpening {
|
||||
pub(crate) fn new(data: ServerCertData) -> Self {
|
||||
Self(Blinded::new(data))
|
||||
}
|
||||
|
||||
pub(crate) fn commit(&self, hasher: &dyn HashAlgorithm) -> ServerCertCommitment {
|
||||
ServerCertCommitment(TypedHash {
|
||||
alg: hasher.id(),
|
||||
value: hasher.hash_separated(self),
|
||||
})
|
||||
}
|
||||
|
||||
/// Returns the server identity data.
|
||||
pub fn data(&self) -> &ServerCertData {
|
||||
self.0.data()
|
||||
}
|
||||
}
|
||||
|
||||
/// Commitment to a server certificate.
|
||||
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
|
||||
pub struct ServerCertCommitment(pub(crate) TypedHash);
|
||||
|
||||
impl_domain_separator!(ServerCertCommitment);
|
||||
@@ -1,103 +0,0 @@
|
||||
//! Types for proving details of a connection.
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::{
|
||||
connection::{
|
||||
commit::{ServerCertCommitment, ServerCertOpening},
|
||||
CertificateVerificationError, ServerEphemKey, ServerName,
|
||||
},
|
||||
hash::{HashAlgorithmExt, HashProviderError},
|
||||
CryptoProvider,
|
||||
};
|
||||
|
||||
/// TLS server identity proof.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct ServerIdentityProof {
|
||||
name: ServerName,
|
||||
opening: ServerCertOpening,
|
||||
}
|
||||
|
||||
impl ServerIdentityProof {
|
||||
pub(crate) fn new(name: ServerName, opening: ServerCertOpening) -> Self {
|
||||
Self { name, opening }
|
||||
}
|
||||
|
||||
/// Verifies the server identity proof.
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `provider` - The crypto provider to use for verification.
|
||||
/// * `time` - The time of the connection.
|
||||
/// * `server_ephemeral_key` - The server's ephemeral key.
|
||||
/// * `commitment` - Commitment to the server certificate.
|
||||
pub fn verify_with_provider(
|
||||
self,
|
||||
provider: &CryptoProvider,
|
||||
time: u64,
|
||||
server_ephemeral_key: &ServerEphemKey,
|
||||
commitment: &ServerCertCommitment,
|
||||
) -> Result<ServerName, ServerIdentityProofError> {
|
||||
let hasher = provider.hash.get(&commitment.0.alg)?;
|
||||
|
||||
if commitment.0.value != hasher.hash_separated(&self.opening) {
|
||||
return Err(ServerIdentityProofError {
|
||||
kind: ErrorKind::Commitment,
|
||||
message: "certificate opening does not match commitment".to_string(),
|
||||
});
|
||||
}
|
||||
|
||||
// Verify certificate and identity.
|
||||
self.opening.data().verify_with_provider(
|
||||
provider,
|
||||
time,
|
||||
server_ephemeral_key,
|
||||
&self.name,
|
||||
)?;
|
||||
|
||||
Ok(self.name)
|
||||
}
|
||||
}
|
||||
|
||||
/// Error for [`ServerIdentityProof`].
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
#[error("server identity proof error: {kind}: {message}")]
|
||||
pub struct ServerIdentityProofError {
|
||||
kind: ErrorKind,
|
||||
message: String,
|
||||
}
|
||||
|
||||
impl From<HashProviderError> for ServerIdentityProofError {
|
||||
fn from(err: HashProviderError) -> Self {
|
||||
Self {
|
||||
kind: ErrorKind::Provider,
|
||||
message: err.to_string(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<CertificateVerificationError> for ServerIdentityProofError {
|
||||
fn from(err: CertificateVerificationError) -> Self {
|
||||
Self {
|
||||
kind: ErrorKind::Certificate,
|
||||
message: err.to_string(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
enum ErrorKind {
|
||||
Provider,
|
||||
Commitment,
|
||||
Certificate,
|
||||
}
|
||||
|
||||
impl std::fmt::Display for ErrorKind {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
ErrorKind::Provider => write!(f, "provider"),
|
||||
ErrorKind::Commitment => write!(f, "commitment"),
|
||||
ErrorKind::Certificate => write!(f, "certificate"),
|
||||
}
|
||||
}
|
||||
}
|
||||
16
crates/core/src/display.rs
Normal file
16
crates/core/src/display.rs
Normal file
@@ -0,0 +1,16 @@
|
||||
use rangeset::RangeSet;
|
||||
|
||||
pub(crate) struct FmtRangeSet<'a>(pub &'a RangeSet<usize>);
|
||||
|
||||
impl<'a> std::fmt::Display for FmtRangeSet<'a> {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
f.write_str("{")?;
|
||||
for range in self.0.iter_ranges() {
|
||||
write!(f, "{}..{}", range.start, range.end)?;
|
||||
if range.end < self.0.end().unwrap_or(0) {
|
||||
f.write_str(", ")?;
|
||||
}
|
||||
}
|
||||
f.write_str("}")
|
||||
}
|
||||
}
|
||||
@@ -1,26 +1,23 @@
|
||||
//! Fixtures for testing
|
||||
|
||||
mod provider;
|
||||
pub mod transcript;
|
||||
|
||||
pub use provider::FixtureEncodingProvider;
|
||||
|
||||
use hex::FromHex;
|
||||
use p256::ecdsa::SigningKey;
|
||||
|
||||
use crate::{
|
||||
attestation::{Attestation, AttestationConfig, Extension},
|
||||
connection::{
|
||||
Certificate, ConnectionInfo, HandshakeData, HandshakeDataV1_2, KeyType, ServerCertData,
|
||||
ServerEphemKey, ServerName, ServerSignature, SignatureScheme, TlsVersion, TranscriptLength,
|
||||
CertBinding, CertBindingV1_2, ConnectionInfo, DnsName, HandshakeData, KeyType,
|
||||
ServerEphemKey, ServerName, ServerSignature, SignatureAlgorithm, TlsVersion,
|
||||
TranscriptLength,
|
||||
},
|
||||
hash::HashAlgorithm,
|
||||
request::{Request, RequestConfig},
|
||||
signing::SignatureAlgId,
|
||||
transcript::{
|
||||
encoding::{EncoderSecret, EncodingProvider, EncodingTree},
|
||||
Transcript, TranscriptCommitConfigBuilder, TranscriptCommitment,
|
||||
encoding::{EncoderSecret, EncodingProvider},
|
||||
Transcript,
|
||||
},
|
||||
CryptoProvider,
|
||||
webpki::CertificateDer,
|
||||
};
|
||||
|
||||
/// A fixture containing various TLS connection data.
|
||||
@@ -29,33 +26,35 @@ use crate::{
|
||||
pub struct ConnectionFixture {
|
||||
pub server_name: ServerName,
|
||||
pub connection_info: ConnectionInfo,
|
||||
pub server_cert_data: ServerCertData,
|
||||
pub server_cert_data: HandshakeData,
|
||||
}
|
||||
|
||||
impl ConnectionFixture {
|
||||
/// Returns a connection fixture for tlsnotary.org.
|
||||
pub fn tlsnotary(transcript_length: TranscriptLength) -> Self {
|
||||
ConnectionFixture {
|
||||
server_name: ServerName::new("tlsnotary.org".to_string()),
|
||||
server_name: ServerName::Dns(DnsName::try_from("tlsnotary.org").unwrap()),
|
||||
connection_info: ConnectionInfo {
|
||||
time: 1671637529,
|
||||
version: TlsVersion::V1_2,
|
||||
transcript_length,
|
||||
},
|
||||
server_cert_data: ServerCertData {
|
||||
server_cert_data: HandshakeData {
|
||||
certs: vec![
|
||||
Certificate(include_bytes!("fixtures/data/tlsnotary.org/ee.der").to_vec()),
|
||||
Certificate(include_bytes!("fixtures/data/tlsnotary.org/inter.der").to_vec()),
|
||||
Certificate(include_bytes!("fixtures/data/tlsnotary.org/ca.der").to_vec()),
|
||||
CertificateDer(include_bytes!("fixtures/data/tlsnotary.org/ee.der").to_vec()),
|
||||
CertificateDer(
|
||||
include_bytes!("fixtures/data/tlsnotary.org/inter.der").to_vec(),
|
||||
),
|
||||
CertificateDer(include_bytes!("fixtures/data/tlsnotary.org/ca.der").to_vec()),
|
||||
],
|
||||
sig: ServerSignature {
|
||||
scheme: SignatureScheme::RSA_PKCS1_SHA256,
|
||||
alg: SignatureAlgorithm::RSA_PKCS1_2048_8192_SHA256,
|
||||
sig: Vec::<u8>::from_hex(include_bytes!(
|
||||
"fixtures/data/tlsnotary.org/signature"
|
||||
))
|
||||
.unwrap(),
|
||||
},
|
||||
handshake: HandshakeData::V1_2(HandshakeDataV1_2 {
|
||||
binding: CertBinding::V1_2(CertBindingV1_2 {
|
||||
client_random: <[u8; 32]>::from_hex(include_bytes!(
|
||||
"fixtures/data/tlsnotary.org/client_random"
|
||||
))
|
||||
@@ -79,26 +78,28 @@ impl ConnectionFixture {
|
||||
/// Returns a connection fixture for appliedzkp.org.
|
||||
pub fn appliedzkp(transcript_length: TranscriptLength) -> Self {
|
||||
ConnectionFixture {
|
||||
server_name: ServerName::new("appliedzkp.org".to_string()),
|
||||
server_name: ServerName::Dns(DnsName::try_from("appliedzkp.org").unwrap()),
|
||||
connection_info: ConnectionInfo {
|
||||
time: 1671637529,
|
||||
version: TlsVersion::V1_2,
|
||||
transcript_length,
|
||||
},
|
||||
server_cert_data: ServerCertData {
|
||||
server_cert_data: HandshakeData {
|
||||
certs: vec![
|
||||
Certificate(include_bytes!("fixtures/data/appliedzkp.org/ee.der").to_vec()),
|
||||
Certificate(include_bytes!("fixtures/data/appliedzkp.org/inter.der").to_vec()),
|
||||
Certificate(include_bytes!("fixtures/data/appliedzkp.org/ca.der").to_vec()),
|
||||
CertificateDer(include_bytes!("fixtures/data/appliedzkp.org/ee.der").to_vec()),
|
||||
CertificateDer(
|
||||
include_bytes!("fixtures/data/appliedzkp.org/inter.der").to_vec(),
|
||||
),
|
||||
CertificateDer(include_bytes!("fixtures/data/appliedzkp.org/ca.der").to_vec()),
|
||||
],
|
||||
sig: ServerSignature {
|
||||
scheme: SignatureScheme::ECDSA_NISTP256_SHA256,
|
||||
alg: SignatureAlgorithm::ECDSA_NISTP256_SHA256,
|
||||
sig: Vec::<u8>::from_hex(include_bytes!(
|
||||
"fixtures/data/appliedzkp.org/signature"
|
||||
))
|
||||
.unwrap(),
|
||||
},
|
||||
handshake: HandshakeData::V1_2(HandshakeDataV1_2 {
|
||||
binding: CertBinding::V1_2(CertBindingV1_2 {
|
||||
client_random: <[u8; 32]>::from_hex(include_bytes!(
|
||||
"fixtures/data/appliedzkp.org/client_random"
|
||||
))
|
||||
@@ -121,10 +122,10 @@ impl ConnectionFixture {
|
||||
|
||||
/// Returns the server_ephemeral_key fixture.
|
||||
pub fn server_ephemeral_key(&self) -> &ServerEphemKey {
|
||||
let HandshakeData::V1_2(HandshakeDataV1_2 {
|
||||
let CertBinding::V1_2(CertBindingV1_2 {
|
||||
server_ephemeral_key,
|
||||
..
|
||||
}) = &self.server_cert_data.handshake;
|
||||
}) = &self.server_cert_data.binding;
|
||||
server_ephemeral_key
|
||||
}
|
||||
}
|
||||
@@ -152,112 +153,3 @@ pub fn encoder_secret_tampered_seed() -> EncoderSecret {
|
||||
seed[0] += 1;
|
||||
EncoderSecret::new(seed, DELTA)
|
||||
}
|
||||
|
||||
/// Returns a notary signing key fixture.
|
||||
pub fn notary_signing_key() -> SigningKey {
|
||||
SigningKey::from_slice(&[1; 32]).unwrap()
|
||||
}
|
||||
|
||||
/// A Request fixture used for testing.
|
||||
#[allow(missing_docs)]
|
||||
pub struct RequestFixture {
|
||||
pub encoding_tree: EncodingTree,
|
||||
pub request: Request,
|
||||
}
|
||||
|
||||
/// Returns a request fixture for testing.
|
||||
pub fn request_fixture(
|
||||
transcript: Transcript,
|
||||
encodings_provider: impl EncodingProvider,
|
||||
connection: ConnectionFixture,
|
||||
encoding_hasher: impl HashAlgorithm,
|
||||
extensions: Vec<Extension>,
|
||||
) -> RequestFixture {
|
||||
let provider = CryptoProvider::default();
|
||||
let (sent_len, recv_len) = transcript.len();
|
||||
|
||||
let ConnectionFixture {
|
||||
server_name,
|
||||
server_cert_data,
|
||||
..
|
||||
} = connection;
|
||||
|
||||
let mut transcript_commitment_builder = TranscriptCommitConfigBuilder::new(&transcript);
|
||||
transcript_commitment_builder
|
||||
.commit_sent(&(0..sent_len))
|
||||
.unwrap()
|
||||
.commit_recv(&(0..recv_len))
|
||||
.unwrap();
|
||||
let transcripts_commitment_config = transcript_commitment_builder.build().unwrap();
|
||||
|
||||
// Prover constructs encoding tree.
|
||||
let encoding_tree = EncodingTree::new(
|
||||
&encoding_hasher,
|
||||
transcripts_commitment_config.iter_encoding(),
|
||||
&encodings_provider,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let mut builder = RequestConfig::builder();
|
||||
|
||||
for extension in extensions {
|
||||
builder.extension(extension);
|
||||
}
|
||||
|
||||
let request_config = builder.build().unwrap();
|
||||
|
||||
let mut request_builder = Request::builder(&request_config);
|
||||
request_builder
|
||||
.server_name(server_name)
|
||||
.server_cert_data(server_cert_data)
|
||||
.transcript(transcript);
|
||||
|
||||
let (request, _) = request_builder.build(&provider).unwrap();
|
||||
|
||||
RequestFixture {
|
||||
encoding_tree,
|
||||
request,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns an attestation fixture for testing.
|
||||
pub fn attestation_fixture(
|
||||
request: Request,
|
||||
connection: ConnectionFixture,
|
||||
signature_alg: SignatureAlgId,
|
||||
transcript_commitments: &[TranscriptCommitment],
|
||||
) -> Attestation {
|
||||
let ConnectionFixture {
|
||||
connection_info,
|
||||
server_cert_data,
|
||||
..
|
||||
} = connection;
|
||||
|
||||
let HandshakeData::V1_2(HandshakeDataV1_2 {
|
||||
server_ephemeral_key,
|
||||
..
|
||||
}) = server_cert_data.handshake;
|
||||
|
||||
let mut provider = CryptoProvider::default();
|
||||
match signature_alg {
|
||||
SignatureAlgId::SECP256K1 => provider.signer.set_secp256k1(&[42u8; 32]).unwrap(),
|
||||
SignatureAlgId::SECP256R1 => provider.signer.set_secp256r1(&[42u8; 32]).unwrap(),
|
||||
_ => unimplemented!(),
|
||||
};
|
||||
|
||||
let attestation_config = AttestationConfig::builder()
|
||||
.supported_signature_algs([signature_alg])
|
||||
.build()
|
||||
.unwrap();
|
||||
|
||||
let mut attestation_builder = Attestation::builder(&attestation_config)
|
||||
.accept_request(request)
|
||||
.unwrap();
|
||||
|
||||
attestation_builder
|
||||
.connection_info(connection_info)
|
||||
.server_ephemeral_key(server_ephemeral_key)
|
||||
.transcript_commitments(transcript_commitments.to_vec());
|
||||
|
||||
attestation_builder.build(&provider).unwrap()
|
||||
}
|
||||
|
||||
199
crates/core/src/fixtures/transcript.rs
Normal file
199
crates/core/src/fixtures/transcript.rs
Normal file
@@ -0,0 +1,199 @@
|
||||
//! Transcript fixtures for testing.
|
||||
|
||||
use aead::Payload as AeadPayload;
|
||||
use aes_gcm::{aead::Aead, Aes128Gcm, NewAead};
|
||||
use generic_array::GenericArray;
|
||||
use rand::{rngs::StdRng, Rng, SeedableRng};
|
||||
use tls_core::msgs::{
|
||||
base::Payload,
|
||||
codec::Codec,
|
||||
enums::{ContentType, HandshakeType, ProtocolVersion},
|
||||
handshake::{HandshakeMessagePayload, HandshakePayload},
|
||||
message::{OpaqueMessage, PlainMessage},
|
||||
};
|
||||
|
||||
use crate::{
|
||||
connection::{TranscriptLength, VerifyData},
|
||||
fixtures::ConnectionFixture,
|
||||
transcript::{Record, TlsTranscript},
|
||||
};
|
||||
|
||||
/// The key used for encryption of the sent and received transcript.
|
||||
pub const KEY: [u8; 16] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15];
|
||||
|
||||
/// The iv used for encryption of the sent and received transcript.
|
||||
pub const IV: [u8; 4] = [1, 3, 3, 7];
|
||||
|
||||
/// The record size in bytes.
|
||||
pub const RECORD_SIZE: usize = 512;
|
||||
|
||||
/// Creates a transript fixture for testing.
|
||||
pub fn transcript_fixture(sent: &[u8], recv: &[u8]) -> TlsTranscript {
|
||||
TranscriptGenerator::new(KEY, IV).generate(sent, recv)
|
||||
}
|
||||
|
||||
struct TranscriptGenerator {
|
||||
key: [u8; 16],
|
||||
iv: [u8; 4],
|
||||
}
|
||||
|
||||
impl TranscriptGenerator {
|
||||
fn new(key: [u8; 16], iv: [u8; 4]) -> Self {
|
||||
Self { key, iv }
|
||||
}
|
||||
|
||||
fn generate(&self, sent: &[u8], recv: &[u8]) -> TlsTranscript {
|
||||
let mut rng = StdRng::from_seed([1; 32]);
|
||||
|
||||
let transcript_len = TranscriptLength {
|
||||
sent: sent.len() as u32,
|
||||
received: recv.len() as u32,
|
||||
};
|
||||
let tlsn = ConnectionFixture::tlsnotary(transcript_len);
|
||||
|
||||
let time = tlsn.connection_info.time;
|
||||
let version = tlsn.connection_info.version;
|
||||
let server_cert_chain = tlsn.server_cert_data.certs;
|
||||
let server_signature = tlsn.server_cert_data.sig;
|
||||
let cert_binding = tlsn.server_cert_data.binding;
|
||||
|
||||
let cf_vd: [u8; 12] = rng.random();
|
||||
let sf_vd: [u8; 12] = rng.random();
|
||||
|
||||
let verify_data = VerifyData {
|
||||
client_finished: cf_vd.to_vec(),
|
||||
server_finished: sf_vd.to_vec(),
|
||||
};
|
||||
|
||||
let sent = self.gen_records(cf_vd, sent);
|
||||
let recv = self.gen_records(sf_vd, recv);
|
||||
|
||||
TlsTranscript::new(
|
||||
time,
|
||||
version,
|
||||
Some(server_cert_chain),
|
||||
Some(server_signature),
|
||||
cert_binding,
|
||||
verify_data,
|
||||
sent,
|
||||
recv,
|
||||
)
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
fn gen_records(&self, vd: [u8; 12], plaintext: &[u8]) -> Vec<Record> {
|
||||
let mut records = Vec::new();
|
||||
|
||||
let handshake = self.gen_handshake(vd);
|
||||
records.push(handshake);
|
||||
|
||||
for (seq, msg) in (1_u64..).zip(plaintext.chunks(RECORD_SIZE)) {
|
||||
let record = self.gen_app_data(seq, msg);
|
||||
records.push(record);
|
||||
}
|
||||
|
||||
records
|
||||
}
|
||||
|
||||
fn gen_app_data(&self, seq: u64, plaintext: &[u8]) -> Record {
|
||||
assert!(
|
||||
plaintext.len() <= 1 << 14,
|
||||
"plaintext len per record must be smaller than 2^14 bytes"
|
||||
);
|
||||
|
||||
let explicit_nonce: [u8; 8] = seq.to_be_bytes();
|
||||
let msg = PlainMessage {
|
||||
typ: ContentType::ApplicationData,
|
||||
version: ProtocolVersion::TLSv1_2,
|
||||
payload: Payload::new(plaintext),
|
||||
};
|
||||
let opaque = aes_gcm_encrypt(self.key, self.iv, seq, explicit_nonce, &msg);
|
||||
|
||||
let mut payload = opaque.payload.0;
|
||||
let mut ciphertext = payload.split_off(8);
|
||||
let tag = ciphertext.split_off(ciphertext.len() - 16);
|
||||
|
||||
Record {
|
||||
seq,
|
||||
typ: ContentType::ApplicationData,
|
||||
plaintext: Some(plaintext.to_vec()),
|
||||
explicit_nonce: explicit_nonce.to_vec(),
|
||||
ciphertext,
|
||||
tag: Some(tag),
|
||||
}
|
||||
}
|
||||
|
||||
fn gen_handshake(&self, vd: [u8; 12]) -> Record {
|
||||
let seq = 0_u64;
|
||||
let explicit_nonce = seq.to_be_bytes();
|
||||
|
||||
let mut plaintext = Vec::new();
|
||||
|
||||
let payload = Payload(vd.to_vec());
|
||||
let hs_payload = HandshakePayload::Finished(payload);
|
||||
let handshake_message = HandshakeMessagePayload {
|
||||
typ: HandshakeType::Finished,
|
||||
payload: hs_payload,
|
||||
};
|
||||
handshake_message.encode(&mut plaintext);
|
||||
|
||||
let msg = PlainMessage {
|
||||
typ: ContentType::Handshake,
|
||||
version: ProtocolVersion::TLSv1_2,
|
||||
payload: Payload::new(plaintext.clone()),
|
||||
};
|
||||
|
||||
let opaque = aes_gcm_encrypt(self.key, self.iv, seq, explicit_nonce, &msg);
|
||||
let mut payload = opaque.payload.0;
|
||||
let mut ciphertext = payload.split_off(8);
|
||||
let tag = ciphertext.split_off(ciphertext.len() - 16);
|
||||
|
||||
Record {
|
||||
seq,
|
||||
typ: ContentType::Handshake,
|
||||
plaintext: Some(plaintext),
|
||||
explicit_nonce: explicit_nonce.to_vec(),
|
||||
ciphertext,
|
||||
tag: Some(tag),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn aes_gcm_encrypt(
|
||||
key: [u8; 16],
|
||||
iv: [u8; 4],
|
||||
seq: u64,
|
||||
explicit_nonce: [u8; 8],
|
||||
msg: &PlainMessage,
|
||||
) -> OpaqueMessage {
|
||||
let mut aad = [0u8; 13];
|
||||
|
||||
aad[..8].copy_from_slice(&seq.to_be_bytes());
|
||||
aad[8] = msg.typ.get_u8();
|
||||
aad[9..11].copy_from_slice(&msg.version.get_u16().to_be_bytes());
|
||||
aad[11..13].copy_from_slice(&(msg.payload.0.len() as u16).to_be_bytes());
|
||||
let payload = AeadPayload {
|
||||
msg: &msg.payload.0,
|
||||
aad: &aad,
|
||||
};
|
||||
|
||||
let mut nonce = [0u8; 12];
|
||||
nonce[..4].copy_from_slice(&iv);
|
||||
nonce[4..].copy_from_slice(&explicit_nonce);
|
||||
let nonce = GenericArray::from_slice(&nonce);
|
||||
let cipher = Aes128Gcm::new_from_slice(&key).unwrap();
|
||||
|
||||
// ciphertext will have the MAC appended
|
||||
let ciphertext = cipher.encrypt(nonce, payload).unwrap();
|
||||
|
||||
// prepend the explicit nonce
|
||||
let mut nonce_ct_mac = vec![0u8; 0];
|
||||
nonce_ct_mac.extend(explicit_nonce.iter());
|
||||
nonce_ct_mac.extend(ciphertext.iter());
|
||||
|
||||
OpaqueMessage {
|
||||
typ: msg.typ,
|
||||
version: msg.version,
|
||||
payload: Payload::new(nonce_ct_mac),
|
||||
}
|
||||
}
|
||||
@@ -5,11 +5,6 @@ use std::{collections::HashMap, fmt::Display};
|
||||
use rand::{distr::StandardUniform, prelude::Distribution};
|
||||
use serde::{Deserialize, Deserializer, Serialize, Serializer};
|
||||
|
||||
use crate::serialize::CanonicalSerialize;
|
||||
|
||||
pub(crate) const DEFAULT_SUPPORTED_HASH_ALGS: &[HashAlgId] =
|
||||
&[HashAlgId::SHA256, HashAlgId::BLAKE3, HashAlgId::KECCAK256];
|
||||
|
||||
/// Maximum length of a hash value.
|
||||
const MAX_LEN: usize = 64;
|
||||
|
||||
@@ -196,6 +191,11 @@ impl Hash {
|
||||
len: value.len(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns a byte slice of the hash value.
|
||||
pub fn as_bytes(&self) -> &[u8] {
|
||||
&self.value[..self.len]
|
||||
}
|
||||
}
|
||||
|
||||
impl rs_merkle::Hash for Hash {
|
||||
@@ -238,19 +238,6 @@ pub trait HashAlgorithm {
|
||||
fn hash_prefixed(&self, prefix: &[u8], data: &[u8]) -> Hash;
|
||||
}
|
||||
|
||||
pub(crate) trait HashAlgorithmExt: HashAlgorithm {
|
||||
#[allow(dead_code)]
|
||||
fn hash_canonical<T: CanonicalSerialize>(&self, data: &T) -> Hash {
|
||||
self.hash(&data.serialize())
|
||||
}
|
||||
|
||||
fn hash_separated<T: DomainSeparator + CanonicalSerialize>(&self, data: &T) -> Hash {
|
||||
self.hash_prefixed(data.domain(), &data.serialize())
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: HashAlgorithm + ?Sized> HashAlgorithmExt for T {}
|
||||
|
||||
/// A hash blinder.
|
||||
#[derive(Clone, Serialize, Deserialize)]
|
||||
pub struct Blinder([u8; 16]);
|
||||
@@ -274,52 +261,26 @@ impl Distribution<Blinder> for StandardUniform {
|
||||
|
||||
/// A blinded pre-image of a hash.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub(crate) struct Blinded<T> {
|
||||
pub struct Blinded<T> {
|
||||
data: T,
|
||||
blinder: Blinder,
|
||||
}
|
||||
|
||||
impl<T> Blinded<T> {
|
||||
/// Creates a new blinded pre-image.
|
||||
pub(crate) fn new(data: T) -> Self {
|
||||
pub fn new(data: T) -> Self {
|
||||
Self {
|
||||
data,
|
||||
blinder: rand::random(),
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn data(&self) -> &T {
|
||||
/// Returns the data.
|
||||
pub fn data(&self) -> &T {
|
||||
&self.data
|
||||
}
|
||||
}
|
||||
|
||||
/// A type with a domain separator which is used during hashing to mitigate type
|
||||
/// confusion attacks.
|
||||
pub(crate) trait DomainSeparator {
|
||||
/// Returns the domain separator for the type.
|
||||
fn domain(&self) -> &[u8];
|
||||
}
|
||||
|
||||
macro_rules! impl_domain_separator {
|
||||
($type:ty) => {
|
||||
impl $crate::hash::DomainSeparator for $type {
|
||||
fn domain(&self) -> &[u8] {
|
||||
use std::sync::LazyLock;
|
||||
|
||||
// Computes a 16 byte hash of the type's name to use as a domain separator.
|
||||
static DOMAIN: LazyLock<[u8; 16]> = LazyLock::new(|| {
|
||||
let domain: [u8; 32] = blake3::hash(stringify!($type).as_bytes()).into();
|
||||
domain[..16].try_into().unwrap()
|
||||
});
|
||||
|
||||
&*DOMAIN
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
pub(crate) use impl_domain_separator;
|
||||
|
||||
mod sha2 {
|
||||
use ::sha2::Digest;
|
||||
|
||||
|
||||
@@ -1,220 +1,41 @@
|
||||
//! TLSNotary core library.
|
||||
//!
|
||||
//! # Introduction
|
||||
//!
|
||||
//! This library provides core functionality for the TLSNotary **attestation**
|
||||
//! protocol, including some more general types which are useful outside
|
||||
//! of attestations.
|
||||
//!
|
||||
//! Once the MPC-TLS protocol has been completed the Prover holds a collection
|
||||
//! of commitments pertaining to the TLS connection. Most importantly, the
|
||||
//! Prover is committed to the [`ServerName`](crate::connection::ServerName),
|
||||
//! and the [`Transcript`](crate::transcript::Transcript) of application data.
|
||||
//! Subsequently, the Prover can request an
|
||||
//! [`Attestation`](crate::attestation::Attestation) from the Notary who will
|
||||
//! include the commitments as well as any additional information which may be
|
||||
//! useful to an attestation Verifier.
|
||||
//!
|
||||
//! Holding an attestation, the Prover can construct a
|
||||
//! [`Presentation`](crate::presentation::Presentation) which facilitates
|
||||
//! selectively disclosing various aspects of the TLS connection to a Verifier.
|
||||
//! If the Verifier trusts the Notary, or more specifically the verifying key of
|
||||
//! the attestation, then the Verifier can trust the authenticity of the
|
||||
//! information disclosed in the presentation.
|
||||
//!
|
||||
//! **Be sure to check out the various submodules for more information.**
|
||||
//!
|
||||
//! # Committing to the transcript
|
||||
//!
|
||||
//! The MPC-TLS protocol produces commitments to the entire transcript of
|
||||
//! application data. However, we may want to disclose only a subset of the data
|
||||
//! in a presentation. Prior to attestation, the Prover has the opportunity to
|
||||
//! slice and dice the commitments into smaller sections which can be
|
||||
//! selectively disclosed. Additionally, the Prover may want to use different
|
||||
//! commitment schemes depending on the context they expect to disclose.
|
||||
//!
|
||||
//! The primary API for this process is the
|
||||
//! [`TranscriptCommitConfigBuilder`](crate::transcript::TranscriptCommitConfigBuilder)
|
||||
//! which is used to build up a configuration.
|
||||
//!
|
||||
//! Currently, only the
|
||||
//! [`Encoding`](crate::transcript::TranscriptCommitmentKind::Encoding)
|
||||
//! commitment kind is supported. In the future you will be able to acquire hash
|
||||
//! commitments directly to the transcript data.
|
||||
//!
|
||||
//! ```no_run
|
||||
//! # use tlsn_core::transcript::{TranscriptCommitConfigBuilder, Transcript, Direction};
|
||||
//! # use tlsn_core::hash::HashAlgId;
|
||||
//! # fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||
//! # let transcript: Transcript = unimplemented!();
|
||||
//! let (sent_len, recv_len) = transcript.len();
|
||||
//!
|
||||
//! // Create a new configuration builder.
|
||||
//! let mut builder = TranscriptCommitConfigBuilder::new(&transcript);
|
||||
//!
|
||||
//! // Specify all the transcript commitments we want to make.
|
||||
//! builder
|
||||
//! // Use BLAKE3 for encoding commitments.
|
||||
//! .encoding_hash_alg(HashAlgId::BLAKE3)
|
||||
//! // Commit to all sent data.
|
||||
//! .commit_sent(&(0..sent_len))?
|
||||
//! // Commit to the first 10 bytes of sent data.
|
||||
//! .commit_sent(&(0..10))?
|
||||
//! // Skip some bytes so it can be omitted in the presentation.
|
||||
//! .commit_sent(&(20..sent_len))?
|
||||
//! // Commit to all received data.
|
||||
//! .commit_recv(&(0..recv_len))?;
|
||||
//!
|
||||
//! let config = builder.build()?;
|
||||
//! # Ok(())
|
||||
//! # }
|
||||
//! ```
|
||||
//!
|
||||
//! # Requesting an attestation
|
||||
//!
|
||||
//! The first step in the attestation protocol is for the Prover to make a
|
||||
//! [`Request`](crate::request::Request), which can be configured using the
|
||||
//! associated [builder](crate::request::RequestConfigBuilder). With it the
|
||||
//! Prover can configure some of the details of the attestation, such as which
|
||||
//! cryptographic algorithms are used (if the Notary supports them).
|
||||
//!
|
||||
//! The Prover may also request for extensions to be added to the attestation,
|
||||
//! see [here](crate::attestation#extensions) for more information.
|
||||
//!
|
||||
//! Upon being issued an attestation, the Prover will also hold a corresponding
|
||||
//! [`Secrets`] which contains all private information. This pair can be stored
|
||||
//! and used later to construct a
|
||||
//! [`Presentation`](crate::presentation::Presentation), [see
|
||||
//! below](#constructing-a-presentation).
|
||||
//!
|
||||
//! # Issuing an attestation
|
||||
//!
|
||||
//! Upon receiving a request, the Notary can issue an
|
||||
//! [`Attestation`](crate::attestation::Attestation) which can be configured
|
||||
//! using the associated
|
||||
//! [builder](crate::attestation::AttestationConfigBuilder).
|
||||
//!
|
||||
//! The Notary's [`CryptoProvider`] must be configured with an appropriate
|
||||
//! signing key for attestations. See
|
||||
//! [`SignerProvider`](crate::signing::SignerProvider) for more information.
|
||||
//!
|
||||
//! # Constructing a presentation
|
||||
//!
|
||||
//! A Prover can use an [`Attestation`](crate::attestation::Attestation) and the
|
||||
//! corresponding [`Secrets`] to construct a verifiable
|
||||
//! [`Presentation`](crate::presentation::Presentation).
|
||||
//!
|
||||
//! ```no_run
|
||||
//! # use tlsn_core::presentation::Presentation;
|
||||
//! # use tlsn_core::attestation::Attestation;
|
||||
//! # use tlsn_core::transcript::{TranscriptCommitmentKind, Direction};
|
||||
//! # use tlsn_core::{Secrets, CryptoProvider};
|
||||
//! # fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||
//! # let attestation: Attestation = unimplemented!();
|
||||
//! # let secrets: Secrets = unimplemented!();
|
||||
//! # let crypto_provider: CryptoProvider = unimplemented!();
|
||||
//! let (_sent_len, recv_len) = secrets.transcript().len();
|
||||
//!
|
||||
//! // First, we decide which application data we would like to disclose.
|
||||
//! let mut builder = secrets.transcript_proof_builder();
|
||||
//!
|
||||
//! builder
|
||||
//! // Use transcript encoding commitments.
|
||||
//! .commitment_kinds(&[TranscriptCommitmentKind::Encoding])
|
||||
//! // Disclose the first 10 bytes of the sent data.
|
||||
//! .reveal(&(0..10), Direction::Sent)?
|
||||
//! // Disclose all of the received data.
|
||||
//! .reveal(&(0..recv_len), Direction::Received)?;
|
||||
//!
|
||||
//! let transcript_proof = builder.build()?;
|
||||
//!
|
||||
//! // Most cases we will also disclose the server identity.
|
||||
//! let identity_proof = secrets.identity_proof();
|
||||
//!
|
||||
//! // Now we can construct the presentation.
|
||||
//! let mut builder = attestation.presentation_builder(&crypto_provider);
|
||||
//!
|
||||
//! builder
|
||||
//! .identity_proof(identity_proof)
|
||||
//! .transcript_proof(transcript_proof);
|
||||
//!
|
||||
//! // Finally, we build the presentation. Send it to a verifier!
|
||||
//! let presentation: Presentation = builder.build()?;
|
||||
//! # Ok(())
|
||||
//! # }
|
||||
//! ```
|
||||
//!
|
||||
//! # Verifying a presentation
|
||||
//!
|
||||
//! Verifying a presentation is as simple as checking the verifier trusts the
|
||||
//! verifying key then calling
|
||||
//! [`Presentation::verify`](crate::presentation::Presentation::verify).
|
||||
//!
|
||||
//! ```no_run
|
||||
//! # use tlsn_core::presentation::{Presentation, PresentationOutput};
|
||||
//! # use tlsn_core::signing::VerifyingKey;
|
||||
//! # use tlsn_core::CryptoProvider;
|
||||
//! # fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||
//! # let presentation: Presentation = unimplemented!();
|
||||
//! # let trusted_key: VerifyingKey = unimplemented!();
|
||||
//! # let crypto_provider: CryptoProvider = unimplemented!();
|
||||
//! // Assert that we trust the verifying key.
|
||||
//! assert_eq!(presentation.verifying_key(), &trusted_key);
|
||||
//!
|
||||
//! let PresentationOutput {
|
||||
//! attestation,
|
||||
//! server_name,
|
||||
//! connection_info,
|
||||
//! transcript,
|
||||
//! ..
|
||||
//! } = presentation.verify(&crypto_provider)?;
|
||||
//! # Ok(())
|
||||
//! # }
|
||||
//! ```
|
||||
|
||||
#![deny(missing_docs, unreachable_pub, unused_must_use)]
|
||||
#![deny(clippy::all)]
|
||||
#![forbid(unsafe_code)]
|
||||
|
||||
pub mod attestation;
|
||||
pub mod connection;
|
||||
#[cfg(any(test, feature = "fixtures"))]
|
||||
pub mod fixtures;
|
||||
pub mod hash;
|
||||
pub(crate) mod merkle;
|
||||
pub mod presentation;
|
||||
mod provider;
|
||||
pub mod request;
|
||||
mod secrets;
|
||||
pub(crate) mod serialize;
|
||||
pub mod signing;
|
||||
pub mod merkle;
|
||||
pub mod transcript;
|
||||
pub mod webpki;
|
||||
pub use rangeset;
|
||||
pub(crate) mod display;
|
||||
|
||||
pub use provider::CryptoProvider;
|
||||
pub use secrets::Secrets;
|
||||
|
||||
use rangeset::ToRangeSet;
|
||||
use rangeset::{RangeSet, ToRangeSet, UnionMut};
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::{
|
||||
connection::{ServerCertData, ServerName},
|
||||
connection::{HandshakeData, ServerName},
|
||||
transcript::{
|
||||
Direction, Idx, PartialTranscript, Transcript, TranscriptCommitConfig,
|
||||
encoding::EncoderSecret, Direction, PartialTranscript, Transcript, TranscriptCommitConfig,
|
||||
TranscriptCommitRequest, TranscriptCommitment, TranscriptSecret,
|
||||
},
|
||||
};
|
||||
|
||||
/// Configuration to prove information to the verifier.
|
||||
#[derive(Debug, Clone)]
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct ProveConfig {
|
||||
server_identity: bool,
|
||||
transcript: Option<PartialTranscript>,
|
||||
reveal: Option<(RangeSet<usize>, RangeSet<usize>)>,
|
||||
transcript_commit: Option<TranscriptCommitConfig>,
|
||||
}
|
||||
|
||||
impl ProveConfig {
|
||||
/// Creates a new builder.
|
||||
pub fn builder(transcript: &Transcript) -> ProveConfigBuilder {
|
||||
pub fn builder(transcript: &Transcript) -> ProveConfigBuilder<'_> {
|
||||
ProveConfigBuilder::new(transcript)
|
||||
}
|
||||
|
||||
@@ -223,9 +44,9 @@ impl ProveConfig {
|
||||
self.server_identity
|
||||
}
|
||||
|
||||
/// Returns the transcript to be proven.
|
||||
pub fn transcript(&self) -> Option<&PartialTranscript> {
|
||||
self.transcript.as_ref()
|
||||
/// Returns the ranges of the transcript to be revealed.
|
||||
pub fn reveal(&self) -> Option<&(RangeSet<usize>, RangeSet<usize>)> {
|
||||
self.reveal.as_ref()
|
||||
}
|
||||
|
||||
/// Returns the transcript commitment configuration.
|
||||
@@ -239,8 +60,7 @@ impl ProveConfig {
|
||||
pub struct ProveConfigBuilder<'a> {
|
||||
transcript: &'a Transcript,
|
||||
server_identity: bool,
|
||||
reveal_sent: Idx,
|
||||
reveal_recv: Idx,
|
||||
reveal: Option<(RangeSet<usize>, RangeSet<usize>)>,
|
||||
transcript_commit: Option<TranscriptCommitConfig>,
|
||||
}
|
||||
|
||||
@@ -250,8 +70,7 @@ impl<'a> ProveConfigBuilder<'a> {
|
||||
Self {
|
||||
transcript,
|
||||
server_identity: false,
|
||||
reveal_sent: Idx::default(),
|
||||
reveal_recv: Idx::default(),
|
||||
reveal: None,
|
||||
transcript_commit: None,
|
||||
}
|
||||
}
|
||||
@@ -274,22 +93,24 @@ impl<'a> ProveConfigBuilder<'a> {
|
||||
direction: Direction,
|
||||
ranges: &dyn ToRangeSet<usize>,
|
||||
) -> Result<&mut Self, ProveConfigBuilderError> {
|
||||
let idx = Idx::new(ranges.to_range_set());
|
||||
let idx = ranges.to_range_set();
|
||||
|
||||
if idx.end() > self.transcript.len_of_direction(direction) {
|
||||
if idx.end().unwrap_or(0) > self.transcript.len_of_direction(direction) {
|
||||
return Err(ProveConfigBuilderError(
|
||||
ProveConfigBuilderErrorRepr::IndexOutOfBounds {
|
||||
direction,
|
||||
actual: idx.end(),
|
||||
actual: idx.end().unwrap_or(0),
|
||||
len: self.transcript.len_of_direction(direction),
|
||||
},
|
||||
));
|
||||
}
|
||||
|
||||
let (sent, recv) = self.reveal.get_or_insert_default();
|
||||
match direction {
|
||||
Direction::Sent => self.reveal_sent.union_mut(&idx),
|
||||
Direction::Received => self.reveal_recv.union_mut(&idx),
|
||||
Direction::Sent => sent.union_mut(&idx),
|
||||
Direction::Received => recv.union_mut(&idx),
|
||||
}
|
||||
|
||||
Ok(self)
|
||||
}
|
||||
|
||||
@@ -301,6 +122,14 @@ impl<'a> ProveConfigBuilder<'a> {
|
||||
self.reveal(Direction::Sent, ranges)
|
||||
}
|
||||
|
||||
/// Reveals all of the sent data transcript.
|
||||
pub fn reveal_sent_all(&mut self) -> Result<&mut Self, ProveConfigBuilderError> {
|
||||
let len = self.transcript.len_of_direction(Direction::Sent);
|
||||
let (sent, _) = self.reveal.get_or_insert_default();
|
||||
sent.union_mut(&(0..len));
|
||||
Ok(self)
|
||||
}
|
||||
|
||||
/// Reveals the given ranges of the received data transcript.
|
||||
pub fn reveal_recv(
|
||||
&mut self,
|
||||
@@ -309,20 +138,19 @@ impl<'a> ProveConfigBuilder<'a> {
|
||||
self.reveal(Direction::Received, ranges)
|
||||
}
|
||||
|
||||
/// Reveals all of the received data transcript.
|
||||
pub fn reveal_recv_all(&mut self) -> Result<&mut Self, ProveConfigBuilderError> {
|
||||
let len = self.transcript.len_of_direction(Direction::Received);
|
||||
let (_, recv) = self.reveal.get_or_insert_default();
|
||||
recv.union_mut(&(0..len));
|
||||
Ok(self)
|
||||
}
|
||||
|
||||
/// Builds the configuration.
|
||||
pub fn build(self) -> Result<ProveConfig, ProveConfigBuilderError> {
|
||||
let transcript = if !self.reveal_sent.is_empty() || !self.reveal_recv.is_empty() {
|
||||
Some(
|
||||
self.transcript
|
||||
.to_partial(self.reveal_sent, self.reveal_recv),
|
||||
)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
Ok(ProveConfig {
|
||||
server_identity: self.server_identity,
|
||||
transcript,
|
||||
reveal: self.reveal,
|
||||
transcript_commit: self.transcript_commit,
|
||||
})
|
||||
}
|
||||
@@ -344,7 +172,7 @@ enum ProveConfigBuilderErrorRepr {
|
||||
}
|
||||
|
||||
/// Configuration to verify information from the prover.
|
||||
#[derive(Debug, Default, Clone)]
|
||||
#[derive(Debug, Default, Clone, Serialize, Deserialize)]
|
||||
pub struct VerifyConfig {}
|
||||
|
||||
impl VerifyConfig {
|
||||
@@ -378,12 +206,12 @@ pub struct VerifyConfigBuilderError(#[from] VerifyConfigBuilderErrorRepr);
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
enum VerifyConfigBuilderErrorRepr {}
|
||||
|
||||
/// Payload sent to the verifier.
|
||||
/// Request to prove statements about the connection.
|
||||
#[doc(hidden)]
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct ProvePayload {
|
||||
/// Server identity data.
|
||||
pub server_identity: Option<(ServerName, ServerCertData)>,
|
||||
pub struct ProveRequest {
|
||||
/// Handshake data.
|
||||
pub handshake: Option<(ServerName, HandshakeData)>,
|
||||
/// Transcript data.
|
||||
pub transcript: Option<PartialTranscript>,
|
||||
/// Transcript commitment configuration.
|
||||
@@ -391,6 +219,7 @@ pub struct ProvePayload {
|
||||
}
|
||||
|
||||
/// Prover output.
|
||||
#[derive(Serialize, Deserialize)]
|
||||
pub struct ProverOutput {
|
||||
/// Transcript commitments.
|
||||
pub transcript_commitments: Vec<TranscriptCommitment>,
|
||||
@@ -401,11 +230,14 @@ pub struct ProverOutput {
|
||||
opaque_debug::implement!(ProverOutput);
|
||||
|
||||
/// Verifier output.
|
||||
#[derive(Serialize, Deserialize)]
|
||||
pub struct VerifierOutput {
|
||||
/// Server identity.
|
||||
pub server_name: Option<ServerName>,
|
||||
/// Transcript data.
|
||||
pub transcript: Option<PartialTranscript>,
|
||||
/// Encoding commitment secret.
|
||||
pub encoder_secret: Option<EncoderSecret>,
|
||||
/// Transcript commitments.
|
||||
pub transcript_commitments: Vec<TranscriptCommitment>,
|
||||
}
|
||||
|
||||
@@ -5,10 +5,10 @@ use utils::iter::DuplicateCheck;
|
||||
|
||||
use crate::hash::{Hash, HashAlgId, HashAlgorithm, TypedHash};
|
||||
|
||||
/// Errors that can occur during operations with Merkle tree and Merkle proof.
|
||||
/// Merkle tree error.
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
#[error("merkle error: {0}")]
|
||||
pub(crate) struct MerkleError(String);
|
||||
pub struct MerkleError(String);
|
||||
|
||||
impl MerkleError {
|
||||
fn new(msg: impl Into<String>) -> Self {
|
||||
@@ -16,8 +16,9 @@ impl MerkleError {
|
||||
}
|
||||
}
|
||||
|
||||
/// Merkle proof.
|
||||
#[derive(Clone, Serialize, Deserialize)]
|
||||
pub(crate) struct MerkleProof {
|
||||
pub struct MerkleProof {
|
||||
alg: HashAlgId,
|
||||
leaf_count: usize,
|
||||
proof: rs_merkle::MerkleProof<Hash>,
|
||||
@@ -33,7 +34,7 @@ impl MerkleProof {
|
||||
///
|
||||
/// - If the length of `leaf_indices` and `leaf_hashes` does not match.
|
||||
/// - If `leaf_indices` contains duplicates.
|
||||
pub(crate) fn verify(
|
||||
pub fn verify(
|
||||
&self,
|
||||
hasher: &dyn HashAlgorithm,
|
||||
root: &TypedHash,
|
||||
@@ -80,25 +81,29 @@ impl rs_merkle::Hasher for RsMerkleHasher<'_> {
|
||||
}
|
||||
}
|
||||
|
||||
/// Merkle tree.
|
||||
#[derive(Clone, Serialize, Deserialize)]
|
||||
pub(crate) struct MerkleTree {
|
||||
pub struct MerkleTree {
|
||||
alg: HashAlgId,
|
||||
tree: rs_merkle::MerkleTree<Hash>,
|
||||
}
|
||||
|
||||
impl MerkleTree {
|
||||
pub(crate) fn new(alg: HashAlgId) -> Self {
|
||||
/// Creates a new Merkle tree.
|
||||
pub fn new(alg: HashAlgId) -> Self {
|
||||
Self {
|
||||
alg,
|
||||
tree: Default::default(),
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn algorithm(&self) -> HashAlgId {
|
||||
/// Returns the hash algorithm used to create the tree.
|
||||
pub fn algorithm(&self) -> HashAlgId {
|
||||
self.alg
|
||||
}
|
||||
|
||||
pub(crate) fn root(&self) -> TypedHash {
|
||||
/// Returns the root of the tree.
|
||||
pub fn root(&self) -> TypedHash {
|
||||
TypedHash {
|
||||
alg: self.alg,
|
||||
value: self.tree.root().expect("tree should not be empty"),
|
||||
@@ -111,7 +116,7 @@ impl MerkleTree {
|
||||
///
|
||||
/// - If the provided hasher is not the same as the one used to create the
|
||||
/// tree.
|
||||
pub(crate) fn insert(&mut self, hasher: &dyn HashAlgorithm, mut leaves: Vec<Hash>) {
|
||||
pub fn insert(&mut self, hasher: &dyn HashAlgorithm, mut leaves: Vec<Hash>) {
|
||||
assert_eq!(self.alg, hasher.id(), "hash algorithm mismatch");
|
||||
|
||||
self.tree.append(&mut leaves);
|
||||
@@ -124,7 +129,7 @@ impl MerkleTree {
|
||||
///
|
||||
/// - If the provided indices are not unique and sorted.
|
||||
/// - If the provided indices are out of bounds.
|
||||
pub(crate) fn proof(&self, indices: &[usize]) -> MerkleProof {
|
||||
pub fn proof(&self, indices: &[usize]) -> MerkleProof {
|
||||
assert!(
|
||||
indices.windows(2).all(|w| w[0] < w[1]),
|
||||
"indices must be unique and sorted"
|
||||
@@ -145,7 +150,7 @@ impl MerkleTree {
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use crate::hash::{impl_domain_separator, Blake3, HashAlgorithmExt, Keccak256, Sha256};
|
||||
use crate::hash::{Blake3, Keccak256, Sha256};
|
||||
|
||||
use super::*;
|
||||
use rstest::*;
|
||||
@@ -153,12 +158,10 @@ mod test {
|
||||
#[derive(Serialize)]
|
||||
struct T(u64);
|
||||
|
||||
impl_domain_separator!(T);
|
||||
|
||||
fn leaves<H: HashAlgorithm>(hasher: &H, leaves: impl IntoIterator<Item = T>) -> Vec<Hash> {
|
||||
leaves
|
||||
.into_iter()
|
||||
.map(|x| hasher.hash_canonical(&x))
|
||||
.map(|x| hasher.hash(&x.0.to_be_bytes()))
|
||||
.collect()
|
||||
}
|
||||
|
||||
|
||||
@@ -1,18 +0,0 @@
|
||||
/// Canonical serialization of TLSNotary types.
|
||||
///
|
||||
/// This trait is used to serialize types into a canonical byte representation.
|
||||
pub(crate) trait CanonicalSerialize {
|
||||
/// Serializes the type.
|
||||
fn serialize(&self) -> Vec<u8>;
|
||||
}
|
||||
|
||||
impl<T> CanonicalSerialize for T
|
||||
where
|
||||
T: serde::Serialize,
|
||||
{
|
||||
fn serialize(&self) -> Vec<u8> {
|
||||
// For now we use BCS for serialization. In future releases we will want to
|
||||
// consider this further, particularly with respect to EVM compatibility.
|
||||
bcs::to_bytes(self).unwrap()
|
||||
}
|
||||
}
|
||||
@@ -11,35 +11,22 @@
|
||||
//! recovered by parsing the application data and relating it to the bytes
|
||||
//! in the transcript.
|
||||
//!
|
||||
//! ## Commitments
|
||||
//!
|
||||
//! During the attestation process a Prover can generate multiple commitments to
|
||||
//! various parts of the transcript. These commitments are inserted into the
|
||||
//! attestation body and can be used by the Verifier to verify transcript proofs
|
||||
//! later.
|
||||
//!
|
||||
//! To configure the transcript commitments, use the
|
||||
//! [`TranscriptCommitConfigBuilder`].
|
||||
//!
|
||||
//! ## Selective Disclosure
|
||||
//!
|
||||
//! Using a [`TranscriptProof`] a Prover can selectively disclose parts of a
|
||||
//! transcript to a Verifier in the form of a [`PartialTranscript`]. A Verifier
|
||||
//! always learns the length of the transcript, but sensitive data can be
|
||||
//! withheld.
|
||||
//!
|
||||
//! To create a proof, use the [`TranscriptProofBuilder`] which is returned by
|
||||
//! [`Secrets::transcript_proof_builder`](crate::Secrets::transcript_proof_builder).
|
||||
|
||||
mod commit;
|
||||
#[doc(hidden)]
|
||||
pub mod encoding;
|
||||
pub mod hash;
|
||||
mod proof;
|
||||
mod tls;
|
||||
|
||||
use std::{fmt, ops::Range};
|
||||
|
||||
use rangeset::{Difference, IndexRanges, RangeSet, Subset, ToRangeSet, Union, UnionMut};
|
||||
use rangeset::{Difference, IndexRanges, RangeSet, Union};
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::connection::TranscriptLength;
|
||||
@@ -51,6 +38,8 @@ pub use commit::{
|
||||
pub use proof::{
|
||||
TranscriptProof, TranscriptProofBuilder, TranscriptProofBuilderError, TranscriptProofError,
|
||||
};
|
||||
pub use tls::{Record, TlsTranscript};
|
||||
pub use tls_core::msgs::enums::ContentType;
|
||||
|
||||
/// A transcript contains the plaintext of all application data communicated
|
||||
/// between the Prover and the Server.
|
||||
@@ -107,18 +96,18 @@ impl Transcript {
|
||||
|
||||
/// Returns the subsequence of the transcript with the provided index,
|
||||
/// returning `None` if the index is out of bounds.
|
||||
pub fn get(&self, direction: Direction, idx: &Idx) -> Option<Subsequence> {
|
||||
pub fn get(&self, direction: Direction, idx: &RangeSet<usize>) -> Option<Subsequence> {
|
||||
let data = match direction {
|
||||
Direction::Sent => &self.sent,
|
||||
Direction::Received => &self.received,
|
||||
};
|
||||
|
||||
if idx.end() > data.len() {
|
||||
if idx.end().unwrap_or(0) > data.len() {
|
||||
return None;
|
||||
}
|
||||
|
||||
Some(
|
||||
Subsequence::new(idx.clone(), data.index_ranges(&idx.0))
|
||||
Subsequence::new(idx.clone(), data.index_ranges(idx))
|
||||
.expect("data is same length as index"),
|
||||
)
|
||||
}
|
||||
@@ -133,7 +122,11 @@ impl Transcript {
|
||||
///
|
||||
/// * `sent_idx` - The indices of the sent data to include.
|
||||
/// * `recv_idx` - The indices of the received data to include.
|
||||
pub fn to_partial(&self, sent_idx: Idx, recv_idx: Idx) -> PartialTranscript {
|
||||
pub fn to_partial(
|
||||
&self,
|
||||
sent_idx: RangeSet<usize>,
|
||||
recv_idx: RangeSet<usize>,
|
||||
) -> PartialTranscript {
|
||||
let mut sent = vec![0; self.sent.len()];
|
||||
let mut received = vec![0; self.received.len()];
|
||||
|
||||
@@ -168,9 +161,9 @@ pub struct PartialTranscript {
|
||||
/// Data received by the Prover from the Server.
|
||||
received: Vec<u8>,
|
||||
/// Index of `sent` which have been authenticated.
|
||||
sent_authed_idx: Idx,
|
||||
sent_authed_idx: RangeSet<usize>,
|
||||
/// Index of `received` which have been authenticated.
|
||||
received_authed_idx: Idx,
|
||||
received_authed_idx: RangeSet<usize>,
|
||||
}
|
||||
|
||||
/// `PartialTranscript` in a compressed form.
|
||||
@@ -182,9 +175,9 @@ pub struct CompressedPartialTranscript {
|
||||
/// Received data which has been authenticated.
|
||||
received_authed: Vec<u8>,
|
||||
/// Index of `sent_authed`.
|
||||
sent_idx: Idx,
|
||||
sent_idx: RangeSet<usize>,
|
||||
/// Index of `received_authed`.
|
||||
recv_idx: Idx,
|
||||
recv_idx: RangeSet<usize>,
|
||||
/// Total bytelength of sent data in the original partial transcript.
|
||||
sent_total: usize,
|
||||
/// Total bytelength of received data in the original partial transcript.
|
||||
@@ -196,10 +189,10 @@ impl From<PartialTranscript> for CompressedPartialTranscript {
|
||||
Self {
|
||||
sent_authed: uncompressed
|
||||
.sent
|
||||
.index_ranges(&uncompressed.sent_authed_idx.0),
|
||||
.index_ranges(&uncompressed.sent_authed_idx),
|
||||
received_authed: uncompressed
|
||||
.received
|
||||
.index_ranges(&uncompressed.received_authed_idx.0),
|
||||
.index_ranges(&uncompressed.received_authed_idx),
|
||||
sent_idx: uncompressed.sent_authed_idx,
|
||||
recv_idx: uncompressed.received_authed_idx,
|
||||
sent_total: uncompressed.sent.len(),
|
||||
@@ -249,8 +242,8 @@ impl PartialTranscript {
|
||||
Self {
|
||||
sent: vec![0; sent_len],
|
||||
received: vec![0; received_len],
|
||||
sent_authed_idx: Idx::default(),
|
||||
received_authed_idx: Idx::default(),
|
||||
sent_authed_idx: RangeSet::default(),
|
||||
received_authed_idx: RangeSet::default(),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -271,10 +264,10 @@ impl PartialTranscript {
|
||||
}
|
||||
|
||||
/// Returns whether the index is in bounds of the transcript.
|
||||
pub fn contains(&self, direction: Direction, idx: &Idx) -> bool {
|
||||
pub fn contains(&self, direction: Direction, idx: &RangeSet<usize>) -> bool {
|
||||
match direction {
|
||||
Direction::Sent => idx.end() <= self.sent.len(),
|
||||
Direction::Received => idx.end() <= self.received.len(),
|
||||
Direction::Sent => idx.end().unwrap_or(0) <= self.sent.len(),
|
||||
Direction::Received => idx.end().unwrap_or(0) <= self.received.len(),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -301,23 +294,23 @@ impl PartialTranscript {
|
||||
}
|
||||
|
||||
/// Returns the index of sent data which have been authenticated.
|
||||
pub fn sent_authed(&self) -> &Idx {
|
||||
pub fn sent_authed(&self) -> &RangeSet<usize> {
|
||||
&self.sent_authed_idx
|
||||
}
|
||||
|
||||
/// Returns the index of received data which have been authenticated.
|
||||
pub fn received_authed(&self) -> &Idx {
|
||||
pub fn received_authed(&self) -> &RangeSet<usize> {
|
||||
&self.received_authed_idx
|
||||
}
|
||||
|
||||
/// Returns the index of sent data which haven't been authenticated.
|
||||
pub fn sent_unauthed(&self) -> Idx {
|
||||
Idx(RangeSet::from(0..self.sent.len()).difference(&self.sent_authed_idx.0))
|
||||
pub fn sent_unauthed(&self) -> RangeSet<usize> {
|
||||
(0..self.sent.len()).difference(&self.sent_authed_idx)
|
||||
}
|
||||
|
||||
/// Returns the index of received data which haven't been authenticated.
|
||||
pub fn received_unauthed(&self) -> Idx {
|
||||
Idx(RangeSet::from(0..self.received.len()).difference(&self.received_authed_idx.0))
|
||||
pub fn received_unauthed(&self) -> RangeSet<usize> {
|
||||
(0..self.received.len()).difference(&self.received_authed_idx)
|
||||
}
|
||||
|
||||
/// Returns an iterator over the authenticated data in the transcript.
|
||||
@@ -327,7 +320,7 @@ impl PartialTranscript {
|
||||
Direction::Received => (&self.received, &self.received_authed_idx),
|
||||
};
|
||||
|
||||
authed.0.iter().map(|i| data[i])
|
||||
authed.iter().map(|i| data[i])
|
||||
}
|
||||
|
||||
/// Unions the authenticated data of this transcript with another.
|
||||
@@ -349,8 +342,7 @@ impl PartialTranscript {
|
||||
|
||||
for range in other
|
||||
.sent_authed_idx
|
||||
.0
|
||||
.difference(&self.sent_authed_idx.0)
|
||||
.difference(&self.sent_authed_idx)
|
||||
.iter_ranges()
|
||||
{
|
||||
self.sent[range.clone()].copy_from_slice(&other.sent[range]);
|
||||
@@ -358,8 +350,7 @@ impl PartialTranscript {
|
||||
|
||||
for range in other
|
||||
.received_authed_idx
|
||||
.0
|
||||
.difference(&self.received_authed_idx.0)
|
||||
.difference(&self.received_authed_idx)
|
||||
.iter_ranges()
|
||||
{
|
||||
self.received[range.clone()].copy_from_slice(&other.received[range]);
|
||||
@@ -411,12 +402,12 @@ impl PartialTranscript {
|
||||
pub fn set_unauthed_range(&mut self, value: u8, direction: Direction, range: Range<usize>) {
|
||||
match direction {
|
||||
Direction::Sent => {
|
||||
for range in range.difference(&self.sent_authed_idx.0).iter_ranges() {
|
||||
for range in range.difference(&self.sent_authed_idx).iter_ranges() {
|
||||
self.sent[range].fill(value);
|
||||
}
|
||||
}
|
||||
Direction::Received => {
|
||||
for range in range.difference(&self.received_authed_idx.0).iter_ranges() {
|
||||
for range in range.difference(&self.received_authed_idx).iter_ranges() {
|
||||
self.received[range].fill(value);
|
||||
}
|
||||
}
|
||||
@@ -445,130 +436,19 @@ impl fmt::Display for Direction {
|
||||
}
|
||||
}
|
||||
|
||||
/// Transcript index.
|
||||
#[derive(Debug, Default, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)]
|
||||
pub struct Idx(RangeSet<usize>);
|
||||
|
||||
impl Idx {
|
||||
/// Creates a new index builder.
|
||||
pub fn builder() -> IdxBuilder {
|
||||
IdxBuilder::default()
|
||||
}
|
||||
|
||||
/// Creates an empty index.
|
||||
pub fn empty() -> Self {
|
||||
Self(RangeSet::default())
|
||||
}
|
||||
|
||||
/// Creates a new transcript index.
|
||||
pub fn new(ranges: impl Into<RangeSet<usize>>) -> Self {
|
||||
Self(ranges.into())
|
||||
}
|
||||
|
||||
/// Returns the start of the index.
|
||||
pub fn start(&self) -> usize {
|
||||
self.0.min().unwrap_or_default()
|
||||
}
|
||||
|
||||
/// Returns the end of the index, non-inclusive.
|
||||
pub fn end(&self) -> usize {
|
||||
self.0.end().unwrap_or_default()
|
||||
}
|
||||
|
||||
/// Returns an iterator over the values in the index.
|
||||
pub fn iter(&self) -> impl Iterator<Item = usize> + '_ {
|
||||
self.0.iter()
|
||||
}
|
||||
|
||||
/// Returns an iterator over the ranges of the index.
|
||||
pub fn iter_ranges(&self) -> impl Iterator<Item = Range<usize>> + '_ {
|
||||
self.0.iter_ranges()
|
||||
}
|
||||
|
||||
/// Returns the number of values in the index.
|
||||
pub fn len(&self) -> usize {
|
||||
self.0.len()
|
||||
}
|
||||
|
||||
/// Returns whether the index is empty.
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.0.is_empty()
|
||||
}
|
||||
|
||||
/// Returns the number of disjoint ranges in the index.
|
||||
pub fn count(&self) -> usize {
|
||||
self.0.len_ranges()
|
||||
}
|
||||
|
||||
pub(crate) fn as_range_set(&self) -> &RangeSet<usize> {
|
||||
&self.0
|
||||
}
|
||||
|
||||
/// Returns the union of this index with another.
|
||||
pub(crate) fn union(&self, other: &Idx) -> Idx {
|
||||
Idx(self.0.union(&other.0))
|
||||
}
|
||||
|
||||
/// Unions this index with another.
|
||||
pub(crate) fn union_mut(&mut self, other: &Idx) {
|
||||
self.0.union_mut(&other.0);
|
||||
}
|
||||
|
||||
/// Returns the difference between `self` and `other`.
|
||||
pub(crate) fn difference(&self, other: &Idx) -> Idx {
|
||||
Idx(self.0.difference(&other.0))
|
||||
}
|
||||
|
||||
/// Returns `true` if `self` is a subset of `other`.
|
||||
pub(crate) fn is_subset(&self, other: &Idx) -> bool {
|
||||
self.0.is_subset(&other.0)
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Display for Idx {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
f.write_str("Idx([")?;
|
||||
let count = self.0.len_ranges();
|
||||
for (i, range) in self.0.iter_ranges().enumerate() {
|
||||
write!(f, "{}..{}", range.start, range.end)?;
|
||||
if i < count - 1 {
|
||||
write!(f, ", ")?;
|
||||
}
|
||||
}
|
||||
f.write_str("])")?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Builder for [`Idx`].
|
||||
#[derive(Debug, Default)]
|
||||
pub struct IdxBuilder(RangeSet<usize>);
|
||||
|
||||
impl IdxBuilder {
|
||||
/// Unions ranges.
|
||||
pub fn union(self, ranges: &dyn ToRangeSet<usize>) -> Self {
|
||||
IdxBuilder(self.0.union(&ranges.to_range_set()))
|
||||
}
|
||||
|
||||
/// Builds the index.
|
||||
pub fn build(self) -> Idx {
|
||||
Idx(self.0)
|
||||
}
|
||||
}
|
||||
|
||||
/// Transcript subsequence.
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
#[serde(try_from = "validation::SubsequenceUnchecked")]
|
||||
pub struct Subsequence {
|
||||
/// Index of the subsequence.
|
||||
idx: Idx,
|
||||
idx: RangeSet<usize>,
|
||||
/// Data of the subsequence.
|
||||
data: Vec<u8>,
|
||||
}
|
||||
|
||||
impl Subsequence {
|
||||
/// Creates a new subsequence.
|
||||
pub fn new(idx: Idx, data: Vec<u8>) -> Result<Self, InvalidSubsequence> {
|
||||
pub fn new(idx: RangeSet<usize>, data: Vec<u8>) -> Result<Self, InvalidSubsequence> {
|
||||
if idx.len() != data.len() {
|
||||
return Err(InvalidSubsequence(
|
||||
"index length does not match data length",
|
||||
@@ -579,7 +459,7 @@ impl Subsequence {
|
||||
}
|
||||
|
||||
/// Returns the index of the subsequence.
|
||||
pub fn index(&self) -> &Idx {
|
||||
pub fn index(&self) -> &RangeSet<usize> {
|
||||
&self.idx
|
||||
}
|
||||
|
||||
@@ -595,7 +475,7 @@ impl Subsequence {
|
||||
}
|
||||
|
||||
/// Returns the inner parts of the subsequence.
|
||||
pub fn into_parts(self) -> (Idx, Vec<u8>) {
|
||||
pub fn into_parts(self) -> (RangeSet<usize>, Vec<u8>) {
|
||||
(self.idx, self.data)
|
||||
}
|
||||
|
||||
@@ -623,7 +503,7 @@ mod validation {
|
||||
|
||||
#[derive(Debug, Deserialize)]
|
||||
pub(super) struct SubsequenceUnchecked {
|
||||
idx: Idx,
|
||||
idx: RangeSet<usize>,
|
||||
data: Vec<u8>,
|
||||
}
|
||||
|
||||
@@ -645,8 +525,8 @@ mod validation {
|
||||
pub(super) struct CompressedPartialTranscriptUnchecked {
|
||||
sent_authed: Vec<u8>,
|
||||
received_authed: Vec<u8>,
|
||||
sent_idx: Idx,
|
||||
recv_idx: Idx,
|
||||
sent_idx: RangeSet<usize>,
|
||||
recv_idx: RangeSet<usize>,
|
||||
sent_total: usize,
|
||||
recv_total: usize,
|
||||
}
|
||||
@@ -663,8 +543,8 @@ mod validation {
|
||||
));
|
||||
}
|
||||
|
||||
if unchecked.sent_idx.end() > unchecked.sent_total
|
||||
|| unchecked.recv_idx.end() > unchecked.recv_total
|
||||
if unchecked.sent_idx.end().unwrap_or(0) > unchecked.sent_total
|
||||
|| unchecked.recv_idx.end().unwrap_or(0) > unchecked.recv_total
|
||||
{
|
||||
return Err(InvalidCompressedPartialTranscript(
|
||||
"ranges are not in bounds of the data",
|
||||
@@ -693,8 +573,8 @@ mod validation {
|
||||
CompressedPartialTranscriptUnchecked {
|
||||
received_authed: vec![1, 2, 3, 11, 12, 13],
|
||||
sent_authed: vec![4, 5, 6, 14, 15, 16],
|
||||
recv_idx: Idx(RangeSet::new(&[1..4, 11..14])),
|
||||
sent_idx: Idx(RangeSet::new(&[4..7, 14..17])),
|
||||
recv_idx: RangeSet::from([1..4, 11..14]),
|
||||
sent_idx: RangeSet::from([4..7, 14..17]),
|
||||
sent_total: 20,
|
||||
recv_total: 20,
|
||||
}
|
||||
@@ -733,7 +613,6 @@ mod validation {
|
||||
// Change the total to be less than the last range's end bound.
|
||||
let end = partial_transcript
|
||||
.sent_idx
|
||||
.0
|
||||
.iter_ranges()
|
||||
.next_back()
|
||||
.unwrap()
|
||||
@@ -765,31 +644,25 @@ mod tests {
|
||||
|
||||
#[fixture]
|
||||
fn partial_transcript() -> PartialTranscript {
|
||||
transcript().to_partial(
|
||||
Idx::new(RangeSet::new(&[1..4, 6..9])),
|
||||
Idx::new(RangeSet::new(&[2..5, 7..10])),
|
||||
)
|
||||
transcript().to_partial(RangeSet::from([1..4, 6..9]), RangeSet::from([2..5, 7..10]))
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
fn test_transcript_get_subsequence(transcript: Transcript) {
|
||||
let subseq = transcript
|
||||
.get(Direction::Received, &Idx(RangeSet::from([0..4, 7..10])))
|
||||
.get(Direction::Received, &RangeSet::from([0..4, 7..10]))
|
||||
.unwrap();
|
||||
assert_eq!(subseq.data, vec![0, 1, 2, 3, 7, 8, 9]);
|
||||
|
||||
let subseq = transcript
|
||||
.get(Direction::Sent, &Idx(RangeSet::from([0..4, 9..12])))
|
||||
.get(Direction::Sent, &RangeSet::from([0..4, 9..12]))
|
||||
.unwrap();
|
||||
assert_eq!(subseq.data, vec![0, 1, 2, 3, 9, 10, 11]);
|
||||
|
||||
let subseq = transcript.get(
|
||||
Direction::Received,
|
||||
&Idx(RangeSet::from([0..4, 7..10, 11..13])),
|
||||
);
|
||||
let subseq = transcript.get(Direction::Received, &RangeSet::from([0..4, 7..10, 11..13]));
|
||||
assert_eq!(subseq, None);
|
||||
|
||||
let subseq = transcript.get(Direction::Sent, &Idx(RangeSet::from([0..4, 7..10, 11..13])));
|
||||
let subseq = transcript.get(Direction::Sent, &RangeSet::from([0..4, 7..10, 11..13]));
|
||||
assert_eq!(subseq, None);
|
||||
}
|
||||
|
||||
@@ -802,7 +675,7 @@ mod tests {
|
||||
|
||||
#[rstest]
|
||||
fn test_transcript_to_partial_success(transcript: Transcript) {
|
||||
let partial = transcript.to_partial(Idx::new(0..2), Idx::new(3..7));
|
||||
let partial = transcript.to_partial(RangeSet::from(0..2), RangeSet::from(3..7));
|
||||
assert_eq!(partial.sent_unsafe(), [0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]);
|
||||
assert_eq!(
|
||||
partial.received_unsafe(),
|
||||
@@ -813,29 +686,30 @@ mod tests {
|
||||
#[rstest]
|
||||
#[should_panic]
|
||||
fn test_transcript_to_partial_failure(transcript: Transcript) {
|
||||
let _ = transcript.to_partial(Idx::new(0..14), Idx::new(3..7));
|
||||
let _ = transcript.to_partial(RangeSet::from(0..14), RangeSet::from(3..7));
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
fn test_partial_transcript_contains(transcript: Transcript) {
|
||||
let partial = transcript.to_partial(Idx::new(0..2), Idx::new(3..7));
|
||||
assert!(partial.contains(Direction::Sent, &Idx::new([0..5, 7..10])));
|
||||
assert!(!partial.contains(Direction::Received, &Idx::new([4..6, 7..13])))
|
||||
let partial = transcript.to_partial(RangeSet::from(0..2), RangeSet::from(3..7));
|
||||
assert!(partial.contains(Direction::Sent, &RangeSet::from([0..5, 7..10])));
|
||||
assert!(!partial.contains(Direction::Received, &RangeSet::from([4..6, 7..13])))
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
fn test_partial_transcript_unauthed(transcript: Transcript) {
|
||||
let partial = transcript.to_partial(Idx::new(0..2), Idx::new(3..7));
|
||||
assert_eq!(partial.sent_unauthed(), Idx::new(2..12));
|
||||
assert_eq!(partial.received_unauthed(), Idx::new([0..3, 7..12]));
|
||||
let partial = transcript.to_partial(RangeSet::from(0..2), RangeSet::from(3..7));
|
||||
assert_eq!(partial.sent_unauthed(), RangeSet::from(2..12));
|
||||
assert_eq!(partial.received_unauthed(), RangeSet::from([0..3, 7..12]));
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
fn test_partial_transcript_union_success(transcript: Transcript) {
|
||||
// Non overlapping ranges.
|
||||
let mut simple_partial = transcript.to_partial(Idx::new(0..2), Idx::new(3..7));
|
||||
let mut simple_partial = transcript.to_partial(RangeSet::from(0..2), RangeSet::from(3..7));
|
||||
|
||||
let other_simple_partial = transcript.to_partial(Idx::new(3..5), Idx::new(1..2));
|
||||
let other_simple_partial =
|
||||
transcript.to_partial(RangeSet::from(3..5), RangeSet::from(1..2));
|
||||
|
||||
simple_partial.union_transcript(&other_simple_partial);
|
||||
|
||||
@@ -847,12 +721,16 @@ mod tests {
|
||||
simple_partial.received_unsafe(),
|
||||
[0, 1, 0, 3, 4, 5, 6, 0, 0, 0, 0, 0]
|
||||
);
|
||||
assert_eq!(simple_partial.sent_authed(), &Idx::new([0..2, 3..5]));
|
||||
assert_eq!(simple_partial.received_authed(), &Idx::new([1..2, 3..7]));
|
||||
assert_eq!(simple_partial.sent_authed(), &RangeSet::from([0..2, 3..5]));
|
||||
assert_eq!(
|
||||
simple_partial.received_authed(),
|
||||
&RangeSet::from([1..2, 3..7])
|
||||
);
|
||||
|
||||
// Overwrite with another partial transcript.
|
||||
|
||||
let another_simple_partial = transcript.to_partial(Idx::new(1..4), Idx::new(6..9));
|
||||
let another_simple_partial =
|
||||
transcript.to_partial(RangeSet::from(1..4), RangeSet::from(6..9));
|
||||
|
||||
simple_partial.union_transcript(&another_simple_partial);
|
||||
|
||||
@@ -864,13 +742,17 @@ mod tests {
|
||||
simple_partial.received_unsafe(),
|
||||
[0, 1, 0, 3, 4, 5, 6, 7, 8, 0, 0, 0]
|
||||
);
|
||||
assert_eq!(simple_partial.sent_authed(), &Idx::new(0..5));
|
||||
assert_eq!(simple_partial.received_authed(), &Idx::new([1..2, 3..9]));
|
||||
assert_eq!(simple_partial.sent_authed(), &RangeSet::from(0..5));
|
||||
assert_eq!(
|
||||
simple_partial.received_authed(),
|
||||
&RangeSet::from([1..2, 3..9])
|
||||
);
|
||||
|
||||
// Overlapping ranges.
|
||||
let mut overlap_partial = transcript.to_partial(Idx::new(4..6), Idx::new(3..7));
|
||||
let mut overlap_partial = transcript.to_partial(RangeSet::from(4..6), RangeSet::from(3..7));
|
||||
|
||||
let other_overlap_partial = transcript.to_partial(Idx::new(3..5), Idx::new(5..9));
|
||||
let other_overlap_partial =
|
||||
transcript.to_partial(RangeSet::from(3..5), RangeSet::from(5..9));
|
||||
|
||||
overlap_partial.union_transcript(&other_overlap_partial);
|
||||
|
||||
@@ -882,13 +764,16 @@ mod tests {
|
||||
overlap_partial.received_unsafe(),
|
||||
[0, 0, 0, 3, 4, 5, 6, 7, 8, 0, 0, 0]
|
||||
);
|
||||
assert_eq!(overlap_partial.sent_authed(), &Idx::new([3..5, 4..6]));
|
||||
assert_eq!(overlap_partial.received_authed(), &Idx::new([3..7, 5..9]));
|
||||
assert_eq!(overlap_partial.sent_authed(), &RangeSet::from([3..5, 4..6]));
|
||||
assert_eq!(
|
||||
overlap_partial.received_authed(),
|
||||
&RangeSet::from([3..7, 5..9])
|
||||
);
|
||||
|
||||
// Equal ranges.
|
||||
let mut equal_partial = transcript.to_partial(Idx::new(4..6), Idx::new(3..7));
|
||||
let mut equal_partial = transcript.to_partial(RangeSet::from(4..6), RangeSet::from(3..7));
|
||||
|
||||
let other_equal_partial = transcript.to_partial(Idx::new(4..6), Idx::new(3..7));
|
||||
let other_equal_partial = transcript.to_partial(RangeSet::from(4..6), RangeSet::from(3..7));
|
||||
|
||||
equal_partial.union_transcript(&other_equal_partial);
|
||||
|
||||
@@ -900,13 +785,15 @@ mod tests {
|
||||
equal_partial.received_unsafe(),
|
||||
[0, 0, 0, 3, 4, 5, 6, 0, 0, 0, 0, 0]
|
||||
);
|
||||
assert_eq!(equal_partial.sent_authed(), &Idx::new(4..6));
|
||||
assert_eq!(equal_partial.received_authed(), &Idx::new(3..7));
|
||||
assert_eq!(equal_partial.sent_authed(), &RangeSet::from(4..6));
|
||||
assert_eq!(equal_partial.received_authed(), &RangeSet::from(3..7));
|
||||
|
||||
// Subset ranges.
|
||||
let mut subset_partial = transcript.to_partial(Idx::new(4..10), Idx::new(3..11));
|
||||
let mut subset_partial =
|
||||
transcript.to_partial(RangeSet::from(4..10), RangeSet::from(3..11));
|
||||
|
||||
let other_subset_partial = transcript.to_partial(Idx::new(6..9), Idx::new(5..6));
|
||||
let other_subset_partial =
|
||||
transcript.to_partial(RangeSet::from(6..9), RangeSet::from(5..6));
|
||||
|
||||
subset_partial.union_transcript(&other_subset_partial);
|
||||
|
||||
@@ -918,30 +805,32 @@ mod tests {
|
||||
subset_partial.received_unsafe(),
|
||||
[0, 0, 0, 3, 4, 5, 6, 7, 8, 9, 10, 0]
|
||||
);
|
||||
assert_eq!(subset_partial.sent_authed(), &Idx::new(4..10));
|
||||
assert_eq!(subset_partial.received_authed(), &Idx::new(3..11));
|
||||
assert_eq!(subset_partial.sent_authed(), &RangeSet::from(4..10));
|
||||
assert_eq!(subset_partial.received_authed(), &RangeSet::from(3..11));
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[should_panic]
|
||||
fn test_partial_transcript_union_failure(transcript: Transcript) {
|
||||
let mut partial = transcript.to_partial(Idx::new(4..10), Idx::new(3..11));
|
||||
let mut partial = transcript.to_partial(RangeSet::from(4..10), RangeSet::from(3..11));
|
||||
|
||||
let other_transcript = Transcript::new(
|
||||
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
|
||||
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
|
||||
);
|
||||
|
||||
let other_partial = other_transcript.to_partial(Idx::new(6..9), Idx::new(5..6));
|
||||
let other_partial = other_transcript.to_partial(RangeSet::from(6..9), RangeSet::from(5..6));
|
||||
|
||||
partial.union_transcript(&other_partial);
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
fn test_partial_transcript_union_subseq_success(transcript: Transcript) {
|
||||
let mut partial = transcript.to_partial(Idx::new(4..10), Idx::new(3..11));
|
||||
let sent_seq = Subsequence::new(Idx::new([0..3, 5..7]), [0, 1, 2, 5, 6].into()).unwrap();
|
||||
let recv_seq = Subsequence::new(Idx::new([0..4, 5..7]), [0, 1, 2, 3, 5, 6].into()).unwrap();
|
||||
let mut partial = transcript.to_partial(RangeSet::from(4..10), RangeSet::from(3..11));
|
||||
let sent_seq =
|
||||
Subsequence::new(RangeSet::from([0..3, 5..7]), [0, 1, 2, 5, 6].into()).unwrap();
|
||||
let recv_seq =
|
||||
Subsequence::new(RangeSet::from([0..4, 5..7]), [0, 1, 2, 3, 5, 6].into()).unwrap();
|
||||
|
||||
partial.union_subsequence(Direction::Sent, &sent_seq);
|
||||
partial.union_subsequence(Direction::Received, &recv_seq);
|
||||
@@ -951,30 +840,31 @@ mod tests {
|
||||
partial.received_unsafe(),
|
||||
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 0]
|
||||
);
|
||||
assert_eq!(partial.sent_authed(), &Idx::new([0..3, 4..10]));
|
||||
assert_eq!(partial.received_authed(), &Idx::new(0..11));
|
||||
assert_eq!(partial.sent_authed(), &RangeSet::from([0..3, 4..10]));
|
||||
assert_eq!(partial.received_authed(), &RangeSet::from(0..11));
|
||||
|
||||
// Overwrite with another subseq.
|
||||
let other_sent_seq = Subsequence::new(Idx::new(0..3), [3, 2, 1].into()).unwrap();
|
||||
let other_sent_seq = Subsequence::new(RangeSet::from(0..3), [3, 2, 1].into()).unwrap();
|
||||
|
||||
partial.union_subsequence(Direction::Sent, &other_sent_seq);
|
||||
assert_eq!(partial.sent_unsafe(), [3, 2, 1, 0, 4, 5, 6, 7, 8, 9, 0, 0]);
|
||||
assert_eq!(partial.sent_authed(), &Idx::new([0..3, 4..10]));
|
||||
assert_eq!(partial.sent_authed(), &RangeSet::from([0..3, 4..10]));
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[should_panic]
|
||||
fn test_partial_transcript_union_subseq_failure(transcript: Transcript) {
|
||||
let mut partial = transcript.to_partial(Idx::new(4..10), Idx::new(3..11));
|
||||
let mut partial = transcript.to_partial(RangeSet::from(4..10), RangeSet::from(3..11));
|
||||
|
||||
let sent_seq = Subsequence::new(Idx::new([0..3, 13..15]), [0, 1, 2, 5, 6].into()).unwrap();
|
||||
let sent_seq =
|
||||
Subsequence::new(RangeSet::from([0..3, 13..15]), [0, 1, 2, 5, 6].into()).unwrap();
|
||||
|
||||
partial.union_subsequence(Direction::Sent, &sent_seq);
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
fn test_partial_transcript_set_unauthed_range(transcript: Transcript) {
|
||||
let mut partial = transcript.to_partial(Idx::new(4..10), Idx::new(3..7));
|
||||
let mut partial = transcript.to_partial(RangeSet::from(4..10), RangeSet::from(3..7));
|
||||
|
||||
partial.set_unauthed_range(7, Direction::Sent, 2..5);
|
||||
partial.set_unauthed_range(5, Direction::Sent, 0..2);
|
||||
@@ -991,13 +881,13 @@ mod tests {
|
||||
#[rstest]
|
||||
#[should_panic]
|
||||
fn test_subsequence_new_invalid_len() {
|
||||
let _ = Subsequence::new(Idx::new([0..3, 5..8]), [0, 1, 2, 5, 6].into()).unwrap();
|
||||
let _ = Subsequence::new(RangeSet::from([0..3, 5..8]), [0, 1, 2, 5, 6].into()).unwrap();
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[should_panic]
|
||||
fn test_subsequence_copy_to_invalid_len() {
|
||||
let seq = Subsequence::new(Idx::new([0..3, 5..7]), [0, 1, 2, 5, 6].into()).unwrap();
|
||||
let seq = Subsequence::new(RangeSet::from([0..3, 5..7]), [0, 1, 2, 5, 6].into()).unwrap();
|
||||
|
||||
let mut data: [u8; 3] = [0, 1, 2];
|
||||
seq.copy_to(&mut data);
|
||||
|
||||
@@ -2,15 +2,15 @@
|
||||
|
||||
use std::{collections::HashSet, fmt};
|
||||
|
||||
use rangeset::ToRangeSet;
|
||||
use rangeset::{ToRangeSet, UnionMut};
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::{
|
||||
hash::{impl_domain_separator, HashAlgId},
|
||||
hash::HashAlgId,
|
||||
transcript::{
|
||||
encoding::{EncodingCommitment, EncodingTree},
|
||||
hash::{PlaintextHash, PlaintextHashSecret},
|
||||
Direction, Idx, Transcript,
|
||||
Direction, RangeSet, Transcript,
|
||||
},
|
||||
};
|
||||
|
||||
@@ -55,8 +55,6 @@ pub enum TranscriptCommitment {
|
||||
Hash(PlaintextHash),
|
||||
}
|
||||
|
||||
impl_domain_separator!(TranscriptCommitment);
|
||||
|
||||
/// Secret for a transcript commitment.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[non_exhaustive]
|
||||
@@ -67,20 +65,18 @@ pub enum TranscriptSecret {
|
||||
Hash(PlaintextHashSecret),
|
||||
}
|
||||
|
||||
impl_domain_separator!(TranscriptSecret);
|
||||
|
||||
/// Configuration for transcript commitments.
|
||||
#[derive(Debug, Clone)]
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct TranscriptCommitConfig {
|
||||
encoding_hash_alg: HashAlgId,
|
||||
has_encoding: bool,
|
||||
has_hash: bool,
|
||||
commits: Vec<((Direction, Idx), TranscriptCommitmentKind)>,
|
||||
commits: Vec<((Direction, RangeSet<usize>), TranscriptCommitmentKind)>,
|
||||
}
|
||||
|
||||
impl TranscriptCommitConfig {
|
||||
/// Creates a new commit config builder.
|
||||
pub fn builder(transcript: &Transcript) -> TranscriptCommitConfigBuilder {
|
||||
pub fn builder(transcript: &Transcript) -> TranscriptCommitConfigBuilder<'_> {
|
||||
TranscriptCommitConfigBuilder::new(transcript)
|
||||
}
|
||||
|
||||
@@ -100,7 +96,7 @@ impl TranscriptCommitConfig {
|
||||
}
|
||||
|
||||
/// Returns an iterator over the encoding commitment indices.
|
||||
pub fn iter_encoding(&self) -> impl Iterator<Item = &(Direction, Idx)> {
|
||||
pub fn iter_encoding(&self) -> impl Iterator<Item = &(Direction, RangeSet<usize>)> {
|
||||
self.commits.iter().filter_map(|(idx, kind)| match kind {
|
||||
TranscriptCommitmentKind::Encoding => Some(idx),
|
||||
_ => None,
|
||||
@@ -108,7 +104,7 @@ impl TranscriptCommitConfig {
|
||||
}
|
||||
|
||||
/// Returns an iterator over the hash commitment indices.
|
||||
pub fn iter_hash(&self) -> impl Iterator<Item = (&(Direction, Idx), &HashAlgId)> {
|
||||
pub fn iter_hash(&self) -> impl Iterator<Item = (&(Direction, RangeSet<usize>), &HashAlgId)> {
|
||||
self.commits.iter().filter_map(|(idx, kind)| match kind {
|
||||
TranscriptCommitmentKind::Hash { alg } => Some((idx, alg)),
|
||||
_ => None,
|
||||
@@ -118,7 +114,19 @@ impl TranscriptCommitConfig {
|
||||
/// Returns a request for the transcript commitments.
|
||||
pub fn to_request(&self) -> TranscriptCommitRequest {
|
||||
TranscriptCommitRequest {
|
||||
encoding: self.has_encoding,
|
||||
encoding: self.has_encoding.then(|| {
|
||||
let mut sent = RangeSet::default();
|
||||
let mut recv = RangeSet::default();
|
||||
|
||||
for (dir, idx) in self.iter_encoding() {
|
||||
match dir {
|
||||
Direction::Sent => sent.union_mut(idx),
|
||||
Direction::Received => recv.union_mut(idx),
|
||||
}
|
||||
}
|
||||
|
||||
(sent, recv)
|
||||
}),
|
||||
hash: self
|
||||
.iter_hash()
|
||||
.map(|((dir, idx), alg)| (*dir, idx.clone(), *alg))
|
||||
@@ -138,7 +146,7 @@ pub struct TranscriptCommitConfigBuilder<'a> {
|
||||
has_encoding: bool,
|
||||
has_hash: bool,
|
||||
default_kind: TranscriptCommitmentKind,
|
||||
commits: HashSet<((Direction, Idx), TranscriptCommitmentKind)>,
|
||||
commits: HashSet<((Direction, RangeSet<usize>), TranscriptCommitmentKind)>,
|
||||
}
|
||||
|
||||
impl<'a> TranscriptCommitConfigBuilder<'a> {
|
||||
@@ -179,15 +187,15 @@ impl<'a> TranscriptCommitConfigBuilder<'a> {
|
||||
direction: Direction,
|
||||
kind: TranscriptCommitmentKind,
|
||||
) -> Result<&mut Self, TranscriptCommitConfigBuilderError> {
|
||||
let idx = Idx::new(ranges.to_range_set());
|
||||
let idx = ranges.to_range_set();
|
||||
|
||||
if idx.end() > self.transcript.len_of_direction(direction) {
|
||||
if idx.end().unwrap_or(0) > self.transcript.len_of_direction(direction) {
|
||||
return Err(TranscriptCommitConfigBuilderError::new(
|
||||
ErrorKind::Index,
|
||||
format!(
|
||||
"range is out of bounds of the transcript ({}): {} > {}",
|
||||
direction,
|
||||
idx.end(),
|
||||
idx.end().unwrap_or(0),
|
||||
self.transcript.len_of_direction(direction)
|
||||
),
|
||||
));
|
||||
@@ -283,7 +291,7 @@ impl fmt::Display for TranscriptCommitConfigBuilderError {
|
||||
}
|
||||
|
||||
if let Some(source) = &self.source {
|
||||
write!(f, " caused by: {}", source)?;
|
||||
write!(f, " caused by: {source}")?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
@@ -293,14 +301,14 @@ impl fmt::Display for TranscriptCommitConfigBuilderError {
|
||||
/// Request to compute transcript commitments.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct TranscriptCommitRequest {
|
||||
encoding: bool,
|
||||
hash: Vec<(Direction, Idx, HashAlgId)>,
|
||||
encoding: Option<(RangeSet<usize>, RangeSet<usize>)>,
|
||||
hash: Vec<(Direction, RangeSet<usize>, HashAlgId)>,
|
||||
}
|
||||
|
||||
impl TranscriptCommitRequest {
|
||||
/// Returns `true` if an encoding commitment is requested.
|
||||
pub fn encoding(&self) -> bool {
|
||||
self.encoding
|
||||
pub fn has_encoding(&self) -> bool {
|
||||
self.encoding.is_some()
|
||||
}
|
||||
|
||||
/// Returns `true` if a hash commitment is requested.
|
||||
@@ -309,9 +317,14 @@ impl TranscriptCommitRequest {
|
||||
}
|
||||
|
||||
/// Returns an iterator over the hash commitments.
|
||||
pub fn iter_hash(&self) -> impl Iterator<Item = &(Direction, Idx, HashAlgId)> {
|
||||
pub fn iter_hash(&self) -> impl Iterator<Item = &(Direction, RangeSet<usize>, HashAlgId)> {
|
||||
self.hash.iter()
|
||||
}
|
||||
|
||||
/// Returns the ranges of the encoding commitments.
|
||||
pub fn encoding(&self) -> Option<&(RangeSet<usize>, RangeSet<usize>)> {
|
||||
self.encoding.as_ref()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
|
||||
@@ -1,7 +1,4 @@
|
||||
//! Transcript encoding commitments and proofs.
|
||||
//!
|
||||
//! This is an internal module that is not intended to be used directly by
|
||||
//! users.
|
||||
|
||||
mod encoder;
|
||||
mod proof;
|
||||
@@ -15,15 +12,11 @@ pub use tree::{EncodingTree, EncodingTreeError};
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::hash::{impl_domain_separator, TypedHash};
|
||||
use crate::hash::TypedHash;
|
||||
|
||||
/// Transcript encoding commitment.
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
pub struct EncodingCommitment {
|
||||
/// Merkle root of the encoding commitments.
|
||||
pub root: TypedHash,
|
||||
/// Seed used to generate the encodings.
|
||||
pub secret: EncoderSecret,
|
||||
}
|
||||
|
||||
impl_domain_separator!(EncodingCommitment);
|
||||
|
||||
@@ -4,21 +4,20 @@ use rangeset::{RangeSet, UnionMut};
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::{
|
||||
hash::{Blinder, HashProviderError},
|
||||
hash::{Blinder, HashProvider, HashProviderError},
|
||||
merkle::{MerkleError, MerkleProof},
|
||||
transcript::{
|
||||
commit::MAX_TOTAL_COMMITTED_DATA,
|
||||
encoding::{new_encoder, Encoder, EncodingCommitment},
|
||||
Direction, Idx,
|
||||
encoding::{new_encoder, Encoder, EncoderSecret, EncodingCommitment},
|
||||
Direction,
|
||||
},
|
||||
CryptoProvider,
|
||||
};
|
||||
|
||||
/// An opening of a leaf in the encoding tree.
|
||||
#[derive(Clone, Serialize, Deserialize)]
|
||||
pub(super) struct Opening {
|
||||
pub(super) direction: Direction,
|
||||
pub(super) idx: Idx,
|
||||
pub(super) idx: RangeSet<usize>,
|
||||
pub(super) blinder: Blinder,
|
||||
}
|
||||
|
||||
@@ -42,20 +41,21 @@ impl EncodingProof {
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `provider` - Crypto provider.
|
||||
/// * `provider` - Hash provider.
|
||||
/// * `commitment` - Encoding commitment to verify against.
|
||||
/// * `sent` - Sent data to authenticate.
|
||||
/// * `recv` - Received data to authenticate.
|
||||
pub fn verify_with_provider(
|
||||
&self,
|
||||
provider: &CryptoProvider,
|
||||
provider: &HashProvider,
|
||||
secret: &EncoderSecret,
|
||||
commitment: &EncodingCommitment,
|
||||
sent: &[u8],
|
||||
recv: &[u8],
|
||||
) -> Result<(Idx, Idx), EncodingProofError> {
|
||||
let hasher = provider.hash.get(&commitment.root.alg)?;
|
||||
) -> Result<(RangeSet<usize>, RangeSet<usize>), EncodingProofError> {
|
||||
let hasher = provider.get(&commitment.root.alg)?;
|
||||
|
||||
let encoder = new_encoder(&commitment.secret);
|
||||
let encoder = new_encoder(secret);
|
||||
let Self {
|
||||
inclusion_proof,
|
||||
openings,
|
||||
@@ -90,13 +90,13 @@ impl EncodingProof {
|
||||
};
|
||||
|
||||
// Make sure the ranges are within the bounds of the transcript.
|
||||
if idx.end() > data.len() {
|
||||
if idx.end().unwrap_or(0) > data.len() {
|
||||
return Err(EncodingProofError::new(
|
||||
ErrorKind::Proof,
|
||||
format!(
|
||||
"index out of bounds of the transcript ({}): {} > {}",
|
||||
direction,
|
||||
idx.end(),
|
||||
idx.end().unwrap_or(0),
|
||||
data.len()
|
||||
),
|
||||
));
|
||||
@@ -112,7 +112,7 @@ impl EncodingProof {
|
||||
// present in the merkle tree.
|
||||
leaves.push((*id, hasher.hash(&expected_leaf)));
|
||||
|
||||
auth.union_mut(idx.as_range_set());
|
||||
auth.union_mut(idx);
|
||||
}
|
||||
|
||||
// Verify that the expected hashes are present in the merkle tree.
|
||||
@@ -122,7 +122,7 @@ impl EncodingProof {
|
||||
// data is authentic.
|
||||
inclusion_proof.verify(hasher, &commitment.root, leaves)?;
|
||||
|
||||
Ok((Idx(auth_sent), Idx(auth_recv)))
|
||||
Ok((auth_sent, auth_recv))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -161,7 +161,7 @@ impl fmt::Display for EncodingProofError {
|
||||
}
|
||||
|
||||
if let Some(source) = &self.source {
|
||||
write!(f, " caused by: {}", source)?;
|
||||
write!(f, " caused by: {source}")?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
@@ -233,10 +233,7 @@ mod test {
|
||||
use crate::{
|
||||
fixtures::{encoder_secret, encoder_secret_tampered_seed, encoding_provider},
|
||||
hash::Blake3,
|
||||
transcript::{
|
||||
encoding::{EncoderSecret, EncodingTree},
|
||||
Idx, Transcript,
|
||||
},
|
||||
transcript::{encoding::EncodingTree, Transcript},
|
||||
};
|
||||
|
||||
use super::*;
|
||||
@@ -247,21 +244,18 @@ mod test {
|
||||
commitment: EncodingCommitment,
|
||||
}
|
||||
|
||||
fn new_encoding_fixture(secret: EncoderSecret) -> EncodingFixture {
|
||||
fn new_encoding_fixture() -> EncodingFixture {
|
||||
let transcript = Transcript::new(POST_JSON, OK_JSON);
|
||||
|
||||
let idx_0 = (Direction::Sent, Idx::new(0..POST_JSON.len()));
|
||||
let idx_1 = (Direction::Received, Idx::new(0..OK_JSON.len()));
|
||||
let idx_0 = (Direction::Sent, RangeSet::from(0..POST_JSON.len()));
|
||||
let idx_1 = (Direction::Received, RangeSet::from(0..OK_JSON.len()));
|
||||
|
||||
let provider = encoding_provider(transcript.sent(), transcript.received());
|
||||
let tree = EncodingTree::new(&Blake3::default(), [&idx_0, &idx_1], &provider).unwrap();
|
||||
|
||||
let proof = tree.proof([&idx_0, &idx_1].into_iter()).unwrap();
|
||||
|
||||
let commitment = EncodingCommitment {
|
||||
root: tree.root(),
|
||||
secret,
|
||||
};
|
||||
let commitment = EncodingCommitment { root: tree.root() };
|
||||
|
||||
EncodingFixture {
|
||||
transcript,
|
||||
@@ -276,11 +270,12 @@ mod test {
|
||||
transcript,
|
||||
proof,
|
||||
commitment,
|
||||
} = new_encoding_fixture(encoder_secret_tampered_seed());
|
||||
} = new_encoding_fixture();
|
||||
|
||||
let err = proof
|
||||
.verify_with_provider(
|
||||
&CryptoProvider::default(),
|
||||
&HashProvider::default(),
|
||||
&encoder_secret_tampered_seed(),
|
||||
&commitment,
|
||||
transcript.sent(),
|
||||
transcript.received(),
|
||||
@@ -296,13 +291,19 @@ mod test {
|
||||
transcript,
|
||||
proof,
|
||||
commitment,
|
||||
} = new_encoding_fixture(encoder_secret());
|
||||
} = new_encoding_fixture();
|
||||
|
||||
let sent = &transcript.sent()[transcript.sent().len() - 1..];
|
||||
let recv = &transcript.received()[transcript.received().len() - 2..];
|
||||
|
||||
let err = proof
|
||||
.verify_with_provider(&CryptoProvider::default(), &commitment, sent, recv)
|
||||
.verify_with_provider(
|
||||
&HashProvider::default(),
|
||||
&encoder_secret(),
|
||||
&commitment,
|
||||
sent,
|
||||
recv,
|
||||
)
|
||||
.unwrap_err();
|
||||
|
||||
assert!(matches!(err.kind, ErrorKind::Proof));
|
||||
@@ -314,15 +315,16 @@ mod test {
|
||||
transcript,
|
||||
mut proof,
|
||||
commitment,
|
||||
} = new_encoding_fixture(encoder_secret());
|
||||
} = new_encoding_fixture();
|
||||
|
||||
let Opening { idx, .. } = proof.openings.values_mut().next().unwrap();
|
||||
|
||||
*idx = Idx::new([0..3, 13..15]);
|
||||
*idx = RangeSet::from([0..3, 13..15]);
|
||||
|
||||
let err = proof
|
||||
.verify_with_provider(
|
||||
&CryptoProvider::default(),
|
||||
&HashProvider::default(),
|
||||
&encoder_secret(),
|
||||
&commitment,
|
||||
transcript.sent(),
|
||||
transcript.received(),
|
||||
@@ -338,7 +340,7 @@ mod test {
|
||||
transcript,
|
||||
mut proof,
|
||||
commitment,
|
||||
} = new_encoding_fixture(encoder_secret());
|
||||
} = new_encoding_fixture();
|
||||
|
||||
let Opening { blinder, .. } = proof.openings.values_mut().next().unwrap();
|
||||
|
||||
@@ -346,7 +348,8 @@ mod test {
|
||||
|
||||
let err = proof
|
||||
.verify_with_provider(
|
||||
&CryptoProvider::default(),
|
||||
&HashProvider::default(),
|
||||
&encoder_secret(),
|
||||
&commitment,
|
||||
transcript.sent(),
|
||||
transcript.received(),
|
||||
|
||||
@@ -13,6 +13,7 @@ pub trait EncodingProvider {
|
||||
) -> Result<(), EncodingProviderError>;
|
||||
}
|
||||
|
||||
/// Error for [`EncodingProvider`].
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
#[error("failed to provide encoding")]
|
||||
pub struct EncodingProviderError;
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
use std::collections::HashMap;
|
||||
|
||||
use bimap::BiMap;
|
||||
use rangeset::{RangeSet, UnionMut};
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::{
|
||||
@@ -11,7 +12,7 @@ use crate::{
|
||||
proof::{EncodingProof, Opening},
|
||||
EncodingProvider,
|
||||
},
|
||||
Direction, Idx,
|
||||
Direction,
|
||||
},
|
||||
};
|
||||
|
||||
@@ -22,7 +23,7 @@ pub enum EncodingTreeError {
|
||||
#[error("index is out of bounds of the transcript")]
|
||||
OutOfBounds {
|
||||
/// The index.
|
||||
index: Idx,
|
||||
index: RangeSet<usize>,
|
||||
/// The transcript length.
|
||||
transcript_length: usize,
|
||||
},
|
||||
@@ -30,13 +31,13 @@ pub enum EncodingTreeError {
|
||||
#[error("encoding provider is missing an encoding for an index")]
|
||||
MissingEncoding {
|
||||
/// The index which is missing.
|
||||
index: Idx,
|
||||
index: RangeSet<usize>,
|
||||
},
|
||||
/// Index is missing from the tree.
|
||||
#[error("index is missing from the tree")]
|
||||
MissingLeaf {
|
||||
/// The index which is missing.
|
||||
index: Idx,
|
||||
index: RangeSet<usize>,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -49,11 +50,11 @@ pub struct EncodingTree {
|
||||
blinders: Vec<Blinder>,
|
||||
/// Mapping between the index of a leaf and the transcript index it
|
||||
/// corresponds to.
|
||||
idxs: BiMap<usize, (Direction, Idx)>,
|
||||
idxs: BiMap<usize, (Direction, RangeSet<usize>)>,
|
||||
/// Union of all transcript indices in the sent direction.
|
||||
sent_idx: Idx,
|
||||
sent_idx: RangeSet<usize>,
|
||||
/// Union of all transcript indices in the received direction.
|
||||
received_idx: Idx,
|
||||
received_idx: RangeSet<usize>,
|
||||
}
|
||||
|
||||
opaque_debug::implement!(EncodingTree);
|
||||
@@ -68,15 +69,15 @@ impl EncodingTree {
|
||||
/// * `provider` - The encoding provider.
|
||||
pub fn new<'idx>(
|
||||
hasher: &dyn HashAlgorithm,
|
||||
idxs: impl IntoIterator<Item = &'idx (Direction, Idx)>,
|
||||
idxs: impl IntoIterator<Item = &'idx (Direction, RangeSet<usize>)>,
|
||||
provider: &dyn EncodingProvider,
|
||||
) -> Result<Self, EncodingTreeError> {
|
||||
let mut this = Self {
|
||||
tree: MerkleTree::new(hasher.id()),
|
||||
blinders: Vec::new(),
|
||||
idxs: BiMap::new(),
|
||||
sent_idx: Idx::empty(),
|
||||
received_idx: Idx::empty(),
|
||||
sent_idx: RangeSet::default(),
|
||||
received_idx: RangeSet::default(),
|
||||
};
|
||||
|
||||
let mut leaves = Vec::new();
|
||||
@@ -138,7 +139,7 @@ impl EncodingTree {
|
||||
/// * `idxs` - The transcript indices to prove.
|
||||
pub fn proof<'idx>(
|
||||
&self,
|
||||
idxs: impl Iterator<Item = &'idx (Direction, Idx)>,
|
||||
idxs: impl Iterator<Item = &'idx (Direction, RangeSet<usize>)>,
|
||||
) -> Result<EncodingProof, EncodingTreeError> {
|
||||
let mut openings = HashMap::new();
|
||||
for dir_idx in idxs {
|
||||
@@ -171,11 +172,11 @@ impl EncodingTree {
|
||||
}
|
||||
|
||||
/// Returns whether the tree contains the given transcript index.
|
||||
pub fn contains(&self, idx: &(Direction, Idx)) -> bool {
|
||||
pub fn contains(&self, idx: &(Direction, RangeSet<usize>)) -> bool {
|
||||
self.idxs.contains_right(idx)
|
||||
}
|
||||
|
||||
pub(crate) fn idx(&self, direction: Direction) -> &Idx {
|
||||
pub(crate) fn idx(&self, direction: Direction) -> &RangeSet<usize> {
|
||||
match direction {
|
||||
Direction::Sent => &self.sent_idx,
|
||||
Direction::Received => &self.received_idx,
|
||||
@@ -183,7 +184,7 @@ impl EncodingTree {
|
||||
}
|
||||
|
||||
/// Returns the committed transcript indices.
|
||||
pub(crate) fn transcript_indices(&self) -> impl Iterator<Item = &(Direction, Idx)> {
|
||||
pub(crate) fn transcript_indices(&self) -> impl Iterator<Item = &(Direction, RangeSet<usize>)> {
|
||||
self.idxs.right_values()
|
||||
}
|
||||
}
|
||||
@@ -193,15 +194,14 @@ mod tests {
|
||||
use super::*;
|
||||
use crate::{
|
||||
fixtures::{encoder_secret, encoding_provider},
|
||||
hash::Blake3,
|
||||
hash::{Blake3, HashProvider},
|
||||
transcript::{encoding::EncodingCommitment, Transcript},
|
||||
CryptoProvider,
|
||||
};
|
||||
use tlsn_data_fixtures::http::{request::POST_JSON, response::OK_JSON};
|
||||
|
||||
fn new_tree<'seq>(
|
||||
transcript: &Transcript,
|
||||
idxs: impl Iterator<Item = &'seq (Direction, Idx)>,
|
||||
idxs: impl Iterator<Item = &'seq (Direction, RangeSet<usize>)>,
|
||||
) -> Result<EncodingTree, EncodingTreeError> {
|
||||
let provider = encoding_provider(transcript.sent(), transcript.received());
|
||||
|
||||
@@ -212,8 +212,8 @@ mod tests {
|
||||
fn test_encoding_tree() {
|
||||
let transcript = Transcript::new(POST_JSON, OK_JSON);
|
||||
|
||||
let idx_0 = (Direction::Sent, Idx::new(0..POST_JSON.len()));
|
||||
let idx_1 = (Direction::Received, Idx::new(0..OK_JSON.len()));
|
||||
let idx_0 = (Direction::Sent, RangeSet::from(0..POST_JSON.len()));
|
||||
let idx_1 = (Direction::Received, RangeSet::from(0..OK_JSON.len()));
|
||||
|
||||
let tree = new_tree(&transcript, [&idx_0, &idx_1].into_iter()).unwrap();
|
||||
|
||||
@@ -222,14 +222,12 @@ mod tests {
|
||||
|
||||
let proof = tree.proof([&idx_0, &idx_1].into_iter()).unwrap();
|
||||
|
||||
let commitment = EncodingCommitment {
|
||||
root: tree.root(),
|
||||
secret: encoder_secret(),
|
||||
};
|
||||
let commitment = EncodingCommitment { root: tree.root() };
|
||||
|
||||
let (auth_sent, auth_recv) = proof
|
||||
.verify_with_provider(
|
||||
&CryptoProvider::default(),
|
||||
&HashProvider::default(),
|
||||
&encoder_secret(),
|
||||
&commitment,
|
||||
transcript.sent(),
|
||||
transcript.received(),
|
||||
@@ -244,10 +242,10 @@ mod tests {
|
||||
fn test_encoding_tree_multiple_ranges() {
|
||||
let transcript = Transcript::new(POST_JSON, OK_JSON);
|
||||
|
||||
let idx_0 = (Direction::Sent, Idx::new(0..1));
|
||||
let idx_1 = (Direction::Sent, Idx::new(1..POST_JSON.len()));
|
||||
let idx_2 = (Direction::Received, Idx::new(0..1));
|
||||
let idx_3 = (Direction::Received, Idx::new(1..OK_JSON.len()));
|
||||
let idx_0 = (Direction::Sent, RangeSet::from(0..1));
|
||||
let idx_1 = (Direction::Sent, RangeSet::from(1..POST_JSON.len()));
|
||||
let idx_2 = (Direction::Received, RangeSet::from(0..1));
|
||||
let idx_3 = (Direction::Received, RangeSet::from(1..OK_JSON.len()));
|
||||
|
||||
let tree = new_tree(&transcript, [&idx_0, &idx_1, &idx_2, &idx_3].into_iter()).unwrap();
|
||||
|
||||
@@ -260,25 +258,23 @@ mod tests {
|
||||
.proof([&idx_0, &idx_1, &idx_2, &idx_3].into_iter())
|
||||
.unwrap();
|
||||
|
||||
let commitment = EncodingCommitment {
|
||||
root: tree.root(),
|
||||
secret: encoder_secret(),
|
||||
};
|
||||
let commitment = EncodingCommitment { root: tree.root() };
|
||||
|
||||
let (auth_sent, auth_recv) = proof
|
||||
.verify_with_provider(
|
||||
&CryptoProvider::default(),
|
||||
&HashProvider::default(),
|
||||
&encoder_secret(),
|
||||
&commitment,
|
||||
transcript.sent(),
|
||||
transcript.received(),
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let mut expected_auth_sent = Idx::default();
|
||||
let mut expected_auth_sent = RangeSet::default();
|
||||
expected_auth_sent.union_mut(&idx_0.1);
|
||||
expected_auth_sent.union_mut(&idx_1.1);
|
||||
|
||||
let mut expected_auth_recv = Idx::default();
|
||||
let mut expected_auth_recv = RangeSet::default();
|
||||
expected_auth_recv.union_mut(&idx_2.1);
|
||||
expected_auth_recv.union_mut(&idx_3.1);
|
||||
|
||||
@@ -290,9 +286,9 @@ mod tests {
|
||||
fn test_encoding_tree_proof_missing_leaf() {
|
||||
let transcript = Transcript::new(POST_JSON, OK_JSON);
|
||||
|
||||
let idx_0 = (Direction::Sent, Idx::new(0..POST_JSON.len()));
|
||||
let idx_1 = (Direction::Received, Idx::new(0..4));
|
||||
let idx_2 = (Direction::Received, Idx::new(4..OK_JSON.len()));
|
||||
let idx_0 = (Direction::Sent, RangeSet::from(0..POST_JSON.len()));
|
||||
let idx_1 = (Direction::Received, RangeSet::from(0..4));
|
||||
let idx_2 = (Direction::Received, RangeSet::from(4..OK_JSON.len()));
|
||||
|
||||
let tree = new_tree(&transcript, [&idx_0, &idx_1].into_iter()).unwrap();
|
||||
|
||||
@@ -306,8 +302,8 @@ mod tests {
|
||||
fn test_encoding_tree_out_of_bounds() {
|
||||
let transcript = Transcript::new(POST_JSON, OK_JSON);
|
||||
|
||||
let idx_0 = (Direction::Sent, Idx::new(0..POST_JSON.len() + 1));
|
||||
let idx_1 = (Direction::Received, Idx::new(0..OK_JSON.len() + 1));
|
||||
let idx_0 = (Direction::Sent, RangeSet::from(0..POST_JSON.len() + 1));
|
||||
let idx_1 = (Direction::Received, RangeSet::from(0..OK_JSON.len() + 1));
|
||||
|
||||
let result = new_tree(&transcript, [&idx_0].into_iter()).unwrap_err();
|
||||
assert!(matches!(result, EncodingTreeError::MissingEncoding { .. }));
|
||||
@@ -322,7 +318,7 @@ mod tests {
|
||||
|
||||
let result = EncodingTree::new(
|
||||
&Blake3::default(),
|
||||
[(Direction::Sent, Idx::new(0..8))].iter(),
|
||||
[(Direction::Sent, RangeSet::from(0..8))].iter(),
|
||||
&provider,
|
||||
)
|
||||
.unwrap_err();
|
||||
|
||||
@@ -3,8 +3,8 @@
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::{
|
||||
hash::{impl_domain_separator, Blinder, HashAlgId, HashAlgorithm, TypedHash},
|
||||
transcript::{Direction, Idx},
|
||||
hash::{Blinder, HashAlgId, HashAlgorithm, TypedHash},
|
||||
transcript::{Direction, RangeSet},
|
||||
};
|
||||
|
||||
/// Hashes plaintext with a blinder.
|
||||
@@ -23,20 +23,18 @@ pub struct PlaintextHash {
|
||||
/// Direction of the plaintext.
|
||||
pub direction: Direction,
|
||||
/// Index of plaintext.
|
||||
pub idx: Idx,
|
||||
pub idx: RangeSet<usize>,
|
||||
/// The hash of the data.
|
||||
pub hash: TypedHash,
|
||||
}
|
||||
|
||||
impl_domain_separator!(PlaintextHash);
|
||||
|
||||
/// Secret component of [`PlaintextHash`].
|
||||
#[derive(Clone, Serialize, Deserialize)]
|
||||
pub struct PlaintextHashSecret {
|
||||
/// Direction of the plaintext.
|
||||
pub direction: Direction,
|
||||
/// Index of plaintext.
|
||||
pub idx: Idx,
|
||||
pub idx: RangeSet<usize>,
|
||||
/// The algorithm of the hash.
|
||||
pub alg: HashAlgId,
|
||||
/// Blinder for the hash.
|
||||
|
||||
@@ -1,19 +1,19 @@
|
||||
//! Transcript proofs.
|
||||
|
||||
use rangeset::{Cover, ToRangeSet};
|
||||
use rangeset::{Cover, Difference, Subset, ToRangeSet, UnionMut};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::{collections::HashSet, fmt};
|
||||
|
||||
use crate::{
|
||||
connection::TranscriptLength,
|
||||
hash::HashAlgId,
|
||||
display::FmtRangeSet,
|
||||
hash::{HashAlgId, HashProvider},
|
||||
transcript::{
|
||||
commit::{TranscriptCommitment, TranscriptCommitmentKind},
|
||||
encoding::{EncodingProof, EncodingProofError, EncodingTree},
|
||||
encoding::{EncoderSecret, EncodingProof, EncodingProofError, EncodingTree},
|
||||
hash::{hash_plaintext, PlaintextHash, PlaintextHashSecret},
|
||||
Direction, Idx, PartialTranscript, Transcript, TranscriptSecret,
|
||||
Direction, PartialTranscript, RangeSet, Transcript, TranscriptSecret,
|
||||
},
|
||||
CryptoProvider,
|
||||
};
|
||||
|
||||
/// Default commitment kinds in order of preference for building transcript
|
||||
@@ -22,6 +22,9 @@ const DEFAULT_COMMITMENT_KINDS: &[TranscriptCommitmentKind] = &[
|
||||
TranscriptCommitmentKind::Hash {
|
||||
alg: HashAlgId::SHA256,
|
||||
},
|
||||
TranscriptCommitmentKind::Hash {
|
||||
alg: HashAlgId::BLAKE3,
|
||||
},
|
||||
TranscriptCommitmentKind::Encoding,
|
||||
];
|
||||
|
||||
@@ -42,12 +45,13 @@ impl TranscriptProof {
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `provider` - The crypto provider to use for verification.
|
||||
/// * `provider` - The hash provider to use for verification.
|
||||
/// * `attestation_body` - The attestation body to verify against.
|
||||
pub fn verify_with_provider<'a>(
|
||||
self,
|
||||
provider: &CryptoProvider,
|
||||
provider: &HashProvider,
|
||||
length: &TranscriptLength,
|
||||
encoder_secret: Option<&EncoderSecret>,
|
||||
commitments: impl IntoIterator<Item = &'a TranscriptCommitment>,
|
||||
) -> Result<PartialTranscript, TranscriptProofError> {
|
||||
let mut encoding_commitment = None;
|
||||
@@ -78,11 +82,18 @@ impl TranscriptProof {
|
||||
));
|
||||
}
|
||||
|
||||
let mut total_auth_sent = Idx::default();
|
||||
let mut total_auth_recv = Idx::default();
|
||||
let mut total_auth_sent = RangeSet::default();
|
||||
let mut total_auth_recv = RangeSet::default();
|
||||
|
||||
// Verify encoding proof.
|
||||
if let Some(proof) = self.encoding_proof {
|
||||
let secret = encoder_secret.ok_or_else(|| {
|
||||
TranscriptProofError::new(
|
||||
ErrorKind::Encoding,
|
||||
"contains an encoding proof but missing encoder secret",
|
||||
)
|
||||
})?;
|
||||
|
||||
let commitment = encoding_commitment.ok_or_else(|| {
|
||||
TranscriptProofError::new(
|
||||
ErrorKind::Encoding,
|
||||
@@ -92,6 +103,7 @@ impl TranscriptProof {
|
||||
|
||||
let (auth_sent, auth_recv) = proof.verify_with_provider(
|
||||
provider,
|
||||
secret,
|
||||
commitment,
|
||||
self.transcript.sent_unsafe(),
|
||||
self.transcript.received_unsafe(),
|
||||
@@ -109,7 +121,7 @@ impl TranscriptProof {
|
||||
blinder,
|
||||
} in self.hash_secrets
|
||||
{
|
||||
let hasher = provider.hash.get(&alg).map_err(|_| {
|
||||
let hasher = provider.get(&alg).map_err(|_| {
|
||||
TranscriptProofError::new(
|
||||
ErrorKind::Hash,
|
||||
format!("hash opening has unknown algorithm: {alg}"),
|
||||
@@ -121,7 +133,7 @@ impl TranscriptProof {
|
||||
Direction::Received => (self.transcript.received_unsafe(), &mut total_auth_recv),
|
||||
};
|
||||
|
||||
if idx.end() > plaintext.len() {
|
||||
if idx.end().unwrap_or(0) > plaintext.len() {
|
||||
return Err(TranscriptProofError::new(
|
||||
ErrorKind::Hash,
|
||||
"hash opening index is out of bounds",
|
||||
@@ -200,7 +212,7 @@ impl fmt::Display for TranscriptProofError {
|
||||
}
|
||||
|
||||
if let Some(source) = &self.source {
|
||||
write!(f, " caused by: {}", source)?;
|
||||
write!(f, " caused by: {source}")?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
@@ -216,15 +228,15 @@ impl From<EncodingProofError> for TranscriptProofError {
|
||||
/// Union of ranges to reveal.
|
||||
#[derive(Clone, Debug, PartialEq)]
|
||||
struct QueryIdx {
|
||||
sent: Idx,
|
||||
recv: Idx,
|
||||
sent: RangeSet<usize>,
|
||||
recv: RangeSet<usize>,
|
||||
}
|
||||
|
||||
impl QueryIdx {
|
||||
fn new() -> Self {
|
||||
Self {
|
||||
sent: Idx::empty(),
|
||||
recv: Idx::empty(),
|
||||
sent: RangeSet::default(),
|
||||
recv: RangeSet::default(),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -232,7 +244,7 @@ impl QueryIdx {
|
||||
self.sent.is_empty() && self.recv.is_empty()
|
||||
}
|
||||
|
||||
fn union(&mut self, direction: &Direction, other: &Idx) {
|
||||
fn union(&mut self, direction: &Direction, other: &RangeSet<usize>) {
|
||||
match direction {
|
||||
Direction::Sent => self.sent.union_mut(other),
|
||||
Direction::Received => self.recv.union_mut(other),
|
||||
@@ -242,7 +254,12 @@ impl QueryIdx {
|
||||
|
||||
impl std::fmt::Display for QueryIdx {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "sent: {}, received: {}", self.sent, self.recv)
|
||||
write!(
|
||||
f,
|
||||
"sent: {}, received: {}",
|
||||
FmtRangeSet(&self.sent),
|
||||
FmtRangeSet(&self.recv)
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -254,19 +271,19 @@ pub struct TranscriptProofBuilder<'a> {
|
||||
transcript: &'a Transcript,
|
||||
encoding_tree: Option<&'a EncodingTree>,
|
||||
hash_secrets: Vec<&'a PlaintextHashSecret>,
|
||||
committed_sent: Idx,
|
||||
committed_recv: Idx,
|
||||
committed_sent: RangeSet<usize>,
|
||||
committed_recv: RangeSet<usize>,
|
||||
query_idx: QueryIdx,
|
||||
}
|
||||
|
||||
impl<'a> TranscriptProofBuilder<'a> {
|
||||
/// Creates a new proof builder.
|
||||
pub(crate) fn new(
|
||||
pub fn new(
|
||||
transcript: &'a Transcript,
|
||||
secrets: impl IntoIterator<Item = &'a TranscriptSecret>,
|
||||
) -> Self {
|
||||
let mut committed_sent = Idx::empty();
|
||||
let mut committed_recv = Idx::empty();
|
||||
let mut committed_sent = RangeSet::default();
|
||||
let mut committed_recv = RangeSet::default();
|
||||
|
||||
let mut encoding_tree = None;
|
||||
let mut hash_secrets = Vec::new();
|
||||
@@ -324,15 +341,15 @@ impl<'a> TranscriptProofBuilder<'a> {
|
||||
ranges: &dyn ToRangeSet<usize>,
|
||||
direction: Direction,
|
||||
) -> Result<&mut Self, TranscriptProofBuilderError> {
|
||||
let idx = Idx::new(ranges.to_range_set());
|
||||
let idx = ranges.to_range_set();
|
||||
|
||||
if idx.end() > self.transcript.len_of_direction(direction) {
|
||||
if idx.end().unwrap_or(0) > self.transcript.len_of_direction(direction) {
|
||||
return Err(TranscriptProofBuilderError::new(
|
||||
BuilderErrorKind::Index,
|
||||
format!(
|
||||
"range is out of bounds of the transcript ({}): {} > {}",
|
||||
direction,
|
||||
idx.end(),
|
||||
idx.end().unwrap_or(0),
|
||||
self.transcript.len_of_direction(direction)
|
||||
),
|
||||
));
|
||||
@@ -349,7 +366,10 @@ impl<'a> TranscriptProofBuilder<'a> {
|
||||
let missing = idx.difference(committed);
|
||||
return Err(TranscriptProofBuilderError::new(
|
||||
BuilderErrorKind::MissingCommitment,
|
||||
format!("commitment is missing for ranges in {direction} transcript: {missing}"),
|
||||
format!(
|
||||
"commitment is missing for ranges in {direction} transcript: {}",
|
||||
FmtRangeSet(&missing)
|
||||
),
|
||||
));
|
||||
}
|
||||
Ok(self)
|
||||
@@ -404,25 +424,23 @@ impl<'a> TranscriptProofBuilder<'a> {
|
||||
continue;
|
||||
};
|
||||
|
||||
let (sent_dir_idxs, sent_uncovered) =
|
||||
uncovered_query_idx.sent.as_range_set().cover_by(
|
||||
encoding_tree
|
||||
.transcript_indices()
|
||||
.filter(|(dir, _)| *dir == Direction::Sent),
|
||||
|(_, idx)| &idx.0,
|
||||
);
|
||||
let (sent_dir_idxs, sent_uncovered) = uncovered_query_idx.sent.cover_by(
|
||||
encoding_tree
|
||||
.transcript_indices()
|
||||
.filter(|(dir, _)| *dir == Direction::Sent),
|
||||
|(_, idx)| idx,
|
||||
);
|
||||
// Uncovered ranges will be checked with ranges of the next
|
||||
// preferred commitment kind.
|
||||
uncovered_query_idx.sent = Idx(sent_uncovered);
|
||||
uncovered_query_idx.sent = sent_uncovered;
|
||||
|
||||
let (recv_dir_idxs, recv_uncovered) =
|
||||
uncovered_query_idx.recv.as_range_set().cover_by(
|
||||
encoding_tree
|
||||
.transcript_indices()
|
||||
.filter(|(dir, _)| *dir == Direction::Received),
|
||||
|(_, idx)| &idx.0,
|
||||
);
|
||||
uncovered_query_idx.recv = Idx(recv_uncovered);
|
||||
let (recv_dir_idxs, recv_uncovered) = uncovered_query_idx.recv.cover_by(
|
||||
encoding_tree
|
||||
.transcript_indices()
|
||||
.filter(|(dir, _)| *dir == Direction::Received),
|
||||
|(_, idx)| idx,
|
||||
);
|
||||
uncovered_query_idx.recv = recv_uncovered;
|
||||
|
||||
let dir_idxs = sent_dir_idxs
|
||||
.into_iter()
|
||||
@@ -440,25 +458,23 @@ impl<'a> TranscriptProofBuilder<'a> {
|
||||
}
|
||||
}
|
||||
TranscriptCommitmentKind::Hash { alg } => {
|
||||
let (sent_hashes, sent_uncovered) =
|
||||
uncovered_query_idx.sent.as_range_set().cover_by(
|
||||
self.hash_secrets.iter().filter(|hash| {
|
||||
hash.direction == Direction::Sent && &hash.alg == alg
|
||||
}),
|
||||
|hash| &hash.idx.0,
|
||||
);
|
||||
let (sent_hashes, sent_uncovered) = uncovered_query_idx.sent.cover_by(
|
||||
self.hash_secrets.iter().filter(|hash| {
|
||||
hash.direction == Direction::Sent && &hash.alg == alg
|
||||
}),
|
||||
|hash| &hash.idx,
|
||||
);
|
||||
// Uncovered ranges will be checked with ranges of the next
|
||||
// preferred commitment kind.
|
||||
uncovered_query_idx.sent = Idx(sent_uncovered);
|
||||
uncovered_query_idx.sent = sent_uncovered;
|
||||
|
||||
let (recv_hashes, recv_uncovered) =
|
||||
uncovered_query_idx.recv.as_range_set().cover_by(
|
||||
self.hash_secrets.iter().filter(|hash| {
|
||||
hash.direction == Direction::Received && &hash.alg == alg
|
||||
}),
|
||||
|hash| &hash.idx.0,
|
||||
);
|
||||
uncovered_query_idx.recv = Idx(recv_uncovered);
|
||||
let (recv_hashes, recv_uncovered) = uncovered_query_idx.recv.cover_by(
|
||||
self.hash_secrets.iter().filter(|hash| {
|
||||
hash.direction == Direction::Received && &hash.alg == alg
|
||||
}),
|
||||
|hash| &hash.idx,
|
||||
);
|
||||
uncovered_query_idx.recv = recv_uncovered;
|
||||
|
||||
transcript_proof.hash_secrets.extend(
|
||||
sent_hashes
|
||||
@@ -546,14 +562,13 @@ impl fmt::Display for TranscriptProofBuilderError {
|
||||
BuilderErrorKind::Index => f.write_str("index error")?,
|
||||
BuilderErrorKind::MissingCommitment => f.write_str("commitment error")?,
|
||||
BuilderErrorKind::Cover { uncovered, kinds } => f.write_str(&format!(
|
||||
"unable to cover the following ranges in transcript using available {:?} commitments: {uncovered}",
|
||||
kinds
|
||||
"unable to cover the following ranges in transcript using available {kinds:?} commitments: {uncovered}"
|
||||
))?,
|
||||
BuilderErrorKind::NotSupported => f.write_str("not supported")?,
|
||||
}
|
||||
|
||||
if let Some(source) = &self.source {
|
||||
write!(f, " caused by: {}", source)?;
|
||||
write!(f, " caused by: {source}")?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
@@ -569,7 +584,7 @@ mod tests {
|
||||
use tlsn_data_fixtures::http::{request::GET_WITH_HEADER, response::OK_JSON};
|
||||
|
||||
use crate::{
|
||||
fixtures::{encoding_provider, request_fixture, ConnectionFixture, RequestFixture},
|
||||
fixtures::{encoder_secret, encoding_provider},
|
||||
hash::{Blake3, Blinder, HashAlgId},
|
||||
transcript::TranscriptCommitConfigBuilder,
|
||||
};
|
||||
@@ -579,15 +594,13 @@ mod tests {
|
||||
#[rstest]
|
||||
fn test_verify_missing_encoding_commitment_root() {
|
||||
let transcript = Transcript::new(GET_WITH_HEADER, OK_JSON);
|
||||
let connection = ConnectionFixture::tlsnotary(transcript.length());
|
||||
|
||||
let RequestFixture { encoding_tree, .. } = request_fixture(
|
||||
transcript.clone(),
|
||||
encoding_provider(GET_WITH_HEADER, OK_JSON),
|
||||
connection.clone(),
|
||||
Blake3::default(),
|
||||
Vec::new(),
|
||||
);
|
||||
let idxs = vec![(Direction::Received, RangeSet::from(0..transcript.len().1))];
|
||||
let encoding_tree = EncodingTree::new(
|
||||
&Blake3::default(),
|
||||
&idxs,
|
||||
&encoding_provider(transcript.sent(), transcript.received()),
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let secrets = vec![TranscriptSecret::Encoding(encoding_tree)];
|
||||
let mut builder = TranscriptProofBuilder::new(&transcript, &secrets);
|
||||
@@ -596,9 +609,14 @@ mod tests {
|
||||
|
||||
let transcript_proof = builder.build().unwrap();
|
||||
|
||||
let provider = CryptoProvider::default();
|
||||
let provider = HashProvider::default();
|
||||
let err = transcript_proof
|
||||
.verify_with_provider(&provider, &transcript.length(), &[])
|
||||
.verify_with_provider(
|
||||
&provider,
|
||||
&transcript.length(),
|
||||
Some(&encoder_secret()),
|
||||
&[],
|
||||
)
|
||||
.err()
|
||||
.unwrap();
|
||||
|
||||
@@ -636,16 +654,17 @@ mod tests {
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
fn test_reveal_with_hash_commitment() {
|
||||
#[case::sha256(HashAlgId::SHA256)]
|
||||
#[case::blake3(HashAlgId::BLAKE3)]
|
||||
fn test_reveal_with_hash_commitment(#[case] alg: HashAlgId) {
|
||||
let mut rng = rand::rngs::StdRng::seed_from_u64(0);
|
||||
let provider = CryptoProvider::default();
|
||||
let provider = HashProvider::default();
|
||||
let transcript = Transcript::new(GET_WITH_HEADER, OK_JSON);
|
||||
|
||||
let direction = Direction::Sent;
|
||||
let idx = Idx::new(0..10);
|
||||
let idx = RangeSet::from(0..10);
|
||||
let blinder: Blinder = rng.random();
|
||||
let alg = HashAlgId::SHA256;
|
||||
let hasher = provider.hash.get(&alg).unwrap();
|
||||
let hasher = provider.get(&alg).unwrap();
|
||||
|
||||
let commitment = PlaintextHash {
|
||||
direction,
|
||||
@@ -671,6 +690,7 @@ mod tests {
|
||||
.verify_with_provider(
|
||||
&provider,
|
||||
&transcript.length(),
|
||||
None,
|
||||
&[TranscriptCommitment::Hash(commitment)],
|
||||
)
|
||||
.unwrap();
|
||||
@@ -682,16 +702,17 @@ mod tests {
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
fn test_reveal_with_inconsistent_hash_commitment() {
|
||||
#[case::sha256(HashAlgId::SHA256)]
|
||||
#[case::blake3(HashAlgId::BLAKE3)]
|
||||
fn test_reveal_with_inconsistent_hash_commitment(#[case] alg: HashAlgId) {
|
||||
let mut rng = rand::rngs::StdRng::seed_from_u64(0);
|
||||
let provider = CryptoProvider::default();
|
||||
let provider = HashProvider::default();
|
||||
let transcript = Transcript::new(GET_WITH_HEADER, OK_JSON);
|
||||
|
||||
let direction = Direction::Sent;
|
||||
let idx = Idx::new(0..10);
|
||||
let idx = RangeSet::from(0..10);
|
||||
let blinder: Blinder = rng.random();
|
||||
let alg = HashAlgId::SHA256;
|
||||
let hasher = provider.hash.get(&alg).unwrap();
|
||||
let hasher = provider.get(&alg).unwrap();
|
||||
|
||||
let commitment = PlaintextHash {
|
||||
direction,
|
||||
@@ -718,6 +739,7 @@ mod tests {
|
||||
.verify_with_provider(
|
||||
&provider,
|
||||
&transcript.length(),
|
||||
None,
|
||||
&[TranscriptCommitment::Hash(commitment)],
|
||||
)
|
||||
.unwrap_err();
|
||||
@@ -898,13 +920,13 @@ mod tests {
|
||||
match kind {
|
||||
BuilderErrorKind::Cover { uncovered, .. } => {
|
||||
if !uncovered_sent_rangeset.is_empty() {
|
||||
assert_eq!(uncovered.sent, Idx(uncovered_sent_rangeset));
|
||||
assert_eq!(uncovered.sent, uncovered_sent_rangeset);
|
||||
}
|
||||
if !uncovered_recv_rangeset.is_empty() {
|
||||
assert_eq!(uncovered.recv, Idx(uncovered_recv_rangeset));
|
||||
assert_eq!(uncovered.recv, uncovered_recv_rangeset);
|
||||
}
|
||||
}
|
||||
_ => panic!("unexpected error kind: {:?}", kind),
|
||||
_ => panic!("unexpected error kind: {kind:?}"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
328
crates/core/src/transcript/tls.rs
Normal file
328
crates/core/src/transcript/tls.rs
Normal file
@@ -0,0 +1,328 @@
|
||||
//! TLS transcript.
|
||||
|
||||
use crate::{
|
||||
connection::{
|
||||
CertBinding, CertBindingV1_2, ServerEphemKey, ServerSignature, TlsVersion, VerifyData,
|
||||
},
|
||||
transcript::{Direction, Transcript},
|
||||
webpki::CertificateDer,
|
||||
};
|
||||
use tls_core::msgs::{
|
||||
alert::AlertMessagePayload,
|
||||
codec::{Codec, Reader},
|
||||
enums::{AlertDescription, ContentType, ProtocolVersion},
|
||||
handshake::{HandshakeMessagePayload, HandshakePayload},
|
||||
};
|
||||
|
||||
/// A transcript of TLS records sent and received by the prover.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct TlsTranscript {
|
||||
time: u64,
|
||||
version: TlsVersion,
|
||||
server_cert_chain: Option<Vec<CertificateDer>>,
|
||||
server_signature: Option<ServerSignature>,
|
||||
certificate_binding: CertBinding,
|
||||
sent: Vec<Record>,
|
||||
recv: Vec<Record>,
|
||||
}
|
||||
|
||||
impl TlsTranscript {
|
||||
/// Creates a new TLS transcript.
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub fn new(
|
||||
time: u64,
|
||||
version: TlsVersion,
|
||||
server_cert_chain: Option<Vec<CertificateDer>>,
|
||||
server_signature: Option<ServerSignature>,
|
||||
certificate_binding: CertBinding,
|
||||
verify_data: VerifyData,
|
||||
sent: Vec<Record>,
|
||||
recv: Vec<Record>,
|
||||
) -> Result<Self, TlsTranscriptError> {
|
||||
let mut sent_iter = sent.iter();
|
||||
let mut recv_iter = recv.iter();
|
||||
|
||||
// Make sure the client finished verify data message was sent first.
|
||||
if let Some(record) = sent_iter.next() {
|
||||
let payload = record
|
||||
.plaintext
|
||||
.as_ref()
|
||||
.ok_or(TlsTranscriptError::validation(
|
||||
"client finished message was hidden from the follower",
|
||||
))?;
|
||||
|
||||
let mut reader = Reader::init(payload);
|
||||
let payload =
|
||||
HandshakeMessagePayload::read_version(&mut reader, ProtocolVersion::TLSv1_2)
|
||||
.ok_or(TlsTranscriptError::validation(
|
||||
"first record sent was not a handshake message",
|
||||
))?;
|
||||
|
||||
let HandshakePayload::Finished(vd) = payload.payload else {
|
||||
return Err(TlsTranscriptError::validation(
|
||||
"first record sent was not a client finished message",
|
||||
));
|
||||
};
|
||||
|
||||
if vd.0 != verify_data.client_finished {
|
||||
return Err(TlsTranscriptError::validation(
|
||||
"inconsistent client finished verify data",
|
||||
));
|
||||
}
|
||||
} else {
|
||||
return Err(TlsTranscriptError::validation(
|
||||
"client finished was not sent",
|
||||
));
|
||||
}
|
||||
|
||||
// Make sure the server finished verify data message was received first.
|
||||
if let Some(record) = recv_iter.next() {
|
||||
let payload = record
|
||||
.plaintext
|
||||
.as_ref()
|
||||
.ok_or(TlsTranscriptError::validation(
|
||||
"server finished message was hidden from the follower",
|
||||
))?;
|
||||
|
||||
let mut reader = Reader::init(payload);
|
||||
let payload =
|
||||
HandshakeMessagePayload::read_version(&mut reader, ProtocolVersion::TLSv1_2)
|
||||
.ok_or(TlsTranscriptError::validation(
|
||||
"first record received was not a handshake message",
|
||||
))?;
|
||||
|
||||
let HandshakePayload::Finished(vd) = payload.payload else {
|
||||
return Err(TlsTranscriptError::validation(
|
||||
"first record received was not a server finished message",
|
||||
));
|
||||
};
|
||||
|
||||
if vd.0 != verify_data.server_finished {
|
||||
return Err(TlsTranscriptError::validation(
|
||||
"inconsistent server finished verify data",
|
||||
));
|
||||
}
|
||||
} else {
|
||||
return Err(TlsTranscriptError::validation(
|
||||
"server finished was not received",
|
||||
));
|
||||
}
|
||||
|
||||
// Verify last record sent was either application data or close notify.
|
||||
if let Some(record) = sent_iter.next_back() {
|
||||
match record.typ {
|
||||
ContentType::ApplicationData => {}
|
||||
ContentType::Alert => {
|
||||
// Ensure the alert is a close notify.
|
||||
let payload =
|
||||
record
|
||||
.plaintext
|
||||
.as_ref()
|
||||
.ok_or(TlsTranscriptError::validation(
|
||||
"alert content was hidden from the follower",
|
||||
))?;
|
||||
|
||||
let mut reader = Reader::init(payload);
|
||||
let payload = AlertMessagePayload::read(&mut reader).ok_or(
|
||||
TlsTranscriptError::validation("alert message was malformed"),
|
||||
)?;
|
||||
|
||||
let AlertDescription::CloseNotify = payload.description else {
|
||||
return Err(TlsTranscriptError::validation(
|
||||
"sent alert that is not close notify",
|
||||
));
|
||||
};
|
||||
}
|
||||
typ => {
|
||||
return Err(TlsTranscriptError::validation(format!(
|
||||
"sent unexpected record content type: {typ:?}"
|
||||
)))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Verify last record received was either application data or close notify.
|
||||
if let Some(record) = recv_iter.next_back() {
|
||||
match record.typ {
|
||||
ContentType::ApplicationData => {}
|
||||
ContentType::Alert => {
|
||||
// Ensure the alert is a close notify.
|
||||
let payload =
|
||||
record
|
||||
.plaintext
|
||||
.as_ref()
|
||||
.ok_or(TlsTranscriptError::validation(
|
||||
"alert content was hidden from the follower",
|
||||
))?;
|
||||
|
||||
let mut reader = Reader::init(payload);
|
||||
let payload = AlertMessagePayload::read(&mut reader).ok_or(
|
||||
TlsTranscriptError::validation("alert message was malformed"),
|
||||
)?;
|
||||
|
||||
let AlertDescription::CloseNotify = payload.description else {
|
||||
return Err(TlsTranscriptError::validation(
|
||||
"received alert that is not close notify",
|
||||
));
|
||||
};
|
||||
}
|
||||
typ => {
|
||||
return Err(TlsTranscriptError::validation(format!(
|
||||
"received unexpected record content type: {typ:?}"
|
||||
)))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure all other records were application data.
|
||||
for record in sent_iter {
|
||||
if record.typ != ContentType::ApplicationData {
|
||||
return Err(TlsTranscriptError::validation(format!(
|
||||
"sent unexpected record content type: {:?}",
|
||||
record.typ
|
||||
)));
|
||||
}
|
||||
}
|
||||
|
||||
for record in recv_iter {
|
||||
if record.typ != ContentType::ApplicationData {
|
||||
return Err(TlsTranscriptError::validation(format!(
|
||||
"received unexpected record content type: {:?}",
|
||||
record.typ
|
||||
)));
|
||||
}
|
||||
}
|
||||
|
||||
Ok(Self {
|
||||
time,
|
||||
version,
|
||||
server_cert_chain,
|
||||
server_signature,
|
||||
certificate_binding,
|
||||
sent,
|
||||
recv,
|
||||
})
|
||||
}
|
||||
|
||||
/// Returns the start time of the connection.
|
||||
pub fn time(&self) -> u64 {
|
||||
self.time
|
||||
}
|
||||
|
||||
/// Returns the TLS protocol version.
|
||||
pub fn version(&self) -> &TlsVersion {
|
||||
&self.version
|
||||
}
|
||||
|
||||
/// Returns the server certificate chain.
|
||||
pub fn server_cert_chain(&self) -> Option<&[CertificateDer]> {
|
||||
self.server_cert_chain.as_deref()
|
||||
}
|
||||
|
||||
/// Returns the server signature.
|
||||
pub fn server_signature(&self) -> Option<&ServerSignature> {
|
||||
self.server_signature.as_ref()
|
||||
}
|
||||
|
||||
/// Returns the server ephemeral key used in the TLS handshake.
|
||||
pub fn server_ephemeral_key(&self) -> &ServerEphemKey {
|
||||
match &self.certificate_binding {
|
||||
CertBinding::V1_2(CertBindingV1_2 {
|
||||
server_ephemeral_key,
|
||||
..
|
||||
}) => server_ephemeral_key,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the certificate binding data.
|
||||
pub fn certificate_binding(&self) -> &CertBinding {
|
||||
&self.certificate_binding
|
||||
}
|
||||
|
||||
/// Returns the sent records.
|
||||
pub fn sent(&self) -> &[Record] {
|
||||
&self.sent
|
||||
}
|
||||
|
||||
/// Returns the received records.
|
||||
pub fn recv(&self) -> &[Record] {
|
||||
&self.recv
|
||||
}
|
||||
|
||||
/// Returns the application data transcript.
|
||||
pub fn to_transcript(&self) -> Result<Transcript, TlsTranscriptError> {
|
||||
let mut sent = Vec::new();
|
||||
let mut recv = Vec::new();
|
||||
|
||||
for record in self
|
||||
.sent
|
||||
.iter()
|
||||
.filter(|record| record.typ == ContentType::ApplicationData)
|
||||
{
|
||||
let plaintext = record
|
||||
.plaintext
|
||||
.as_ref()
|
||||
.ok_or(ErrorRepr::Incomplete {
|
||||
direction: Direction::Sent,
|
||||
seq: record.seq,
|
||||
})?
|
||||
.clone();
|
||||
sent.extend_from_slice(&plaintext);
|
||||
}
|
||||
|
||||
for record in self
|
||||
.recv
|
||||
.iter()
|
||||
.filter(|record| record.typ == ContentType::ApplicationData)
|
||||
{
|
||||
let plaintext = record
|
||||
.plaintext
|
||||
.as_ref()
|
||||
.ok_or(ErrorRepr::Incomplete {
|
||||
direction: Direction::Received,
|
||||
seq: record.seq,
|
||||
})?
|
||||
.clone();
|
||||
recv.extend_from_slice(&plaintext);
|
||||
}
|
||||
|
||||
Ok(Transcript::new(sent, recv))
|
||||
}
|
||||
}
|
||||
|
||||
/// A TLS record.
|
||||
#[derive(Clone)]
|
||||
pub struct Record {
|
||||
/// Sequence number.
|
||||
pub seq: u64,
|
||||
/// Content type.
|
||||
pub typ: ContentType,
|
||||
/// Plaintext.
|
||||
pub plaintext: Option<Vec<u8>>,
|
||||
/// Explicit nonce.
|
||||
pub explicit_nonce: Vec<u8>,
|
||||
/// Ciphertext.
|
||||
pub ciphertext: Vec<u8>,
|
||||
/// Tag.
|
||||
pub tag: Option<Vec<u8>>,
|
||||
}
|
||||
|
||||
opaque_debug::implement!(Record);
|
||||
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
#[error("TLS transcript error: {0}")]
|
||||
pub struct TlsTranscriptError(#[from] ErrorRepr);
|
||||
|
||||
impl TlsTranscriptError {
|
||||
fn validation(msg: impl Into<String>) -> Self {
|
||||
Self(ErrorRepr::Validation(msg.into()))
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
enum ErrorRepr {
|
||||
#[error("validation error: {0}")]
|
||||
Validation(String),
|
||||
#[error("incomplete transcript ({direction}): seq {seq}")]
|
||||
Incomplete { direction: Direction, seq: u64 },
|
||||
}
|
||||
168
crates/core/src/webpki.rs
Normal file
168
crates/core/src/webpki.rs
Normal file
@@ -0,0 +1,168 @@
|
||||
//! Web PKI types.
|
||||
|
||||
use std::time::Duration;
|
||||
|
||||
use rustls_pki_types::{self as webpki_types, pem::PemObject};
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::connection::ServerName;
|
||||
|
||||
/// X.509 certificate, DER encoded.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct CertificateDer(pub Vec<u8>);
|
||||
|
||||
impl CertificateDer {
|
||||
/// Creates a DER-encoded certificate from a PEM-encoded certificate.
|
||||
pub fn from_pem_slice(pem: &[u8]) -> Result<Self, PemError> {
|
||||
let der = webpki_types::CertificateDer::from_pem_slice(pem).map_err(|_| PemError {})?;
|
||||
|
||||
Ok(Self(der.to_vec()))
|
||||
}
|
||||
}
|
||||
|
||||
/// Private key, DER encoded.
|
||||
#[derive(Debug, Clone, zeroize::ZeroizeOnDrop, Serialize, Deserialize)]
|
||||
pub struct PrivateKeyDer(pub Vec<u8>);
|
||||
|
||||
impl PrivateKeyDer {
|
||||
/// Creates a DER-encoded private key from a PEM-encoded private key.
|
||||
pub fn from_pem_slice(pem: &[u8]) -> Result<Self, PemError> {
|
||||
let der = webpki_types::PrivateKeyDer::from_pem_slice(pem).map_err(|_| PemError {})?;
|
||||
|
||||
Ok(Self(der.secret_der().to_vec()))
|
||||
}
|
||||
}
|
||||
|
||||
/// PEM parsing error.
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
#[error("failed to parse PEM object")]
|
||||
pub struct PemError {}
|
||||
|
||||
/// Root certificate store.
|
||||
///
|
||||
/// This stores root certificates which are used to verify end-entity
|
||||
/// certificates presented by a TLS server.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct RootCertStore {
|
||||
/// Unvalidated DER-encoded X.509 root certificates.
|
||||
pub roots: Vec<CertificateDer>,
|
||||
}
|
||||
|
||||
impl RootCertStore {
|
||||
/// Creates an empty root certificate store.
|
||||
pub fn empty() -> Self {
|
||||
Self { roots: Vec::new() }
|
||||
}
|
||||
}
|
||||
|
||||
/// Server certificate verifier.
|
||||
#[derive(Debug)]
|
||||
pub struct ServerCertVerifier {
|
||||
roots: Vec<webpki_types::TrustAnchor<'static>>,
|
||||
}
|
||||
|
||||
impl ServerCertVerifier {
|
||||
/// Creates a new server certificate verifier.
|
||||
pub fn new(roots: &RootCertStore) -> Result<Self, ServerCertVerifierError> {
|
||||
let roots = roots
|
||||
.roots
|
||||
.iter()
|
||||
.map(|cert| {
|
||||
webpki::anchor_from_trusted_cert(&webpki_types::CertificateDer::from(
|
||||
cert.0.as_slice(),
|
||||
))
|
||||
.map(|anchor| anchor.to_owned())
|
||||
.map_err(|err| ServerCertVerifierError::InvalidRootCertificate {
|
||||
cert: cert.clone(),
|
||||
reason: err.to_string(),
|
||||
})
|
||||
})
|
||||
.collect::<Result<Vec<_>, _>>()?;
|
||||
|
||||
Ok(Self { roots })
|
||||
}
|
||||
|
||||
/// Creates a new server certificate verifier with Mozilla root
|
||||
/// certificates.
|
||||
pub fn mozilla() -> Self {
|
||||
Self {
|
||||
roots: webpki_roots::TLS_SERVER_ROOTS.to_vec(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Verifies the server certificate was valid at the given time of
|
||||
/// presentation.
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `end_entity` - End-entity certificate to verify.
|
||||
/// * `intermediates` - Intermediate certificates to a trust anchor.
|
||||
/// * `server_name` - Server DNS name.
|
||||
/// * `time` - Unix time the certificate was presented.
|
||||
pub fn verify_server_cert(
|
||||
&self,
|
||||
end_entity: &CertificateDer,
|
||||
intermediates: &[CertificateDer],
|
||||
server_name: &ServerName,
|
||||
time: u64,
|
||||
) -> Result<(), ServerCertVerifierError> {
|
||||
let cert = webpki_types::CertificateDer::from(end_entity.0.as_slice());
|
||||
let cert = webpki::EndEntityCert::try_from(&cert).map_err(|e| {
|
||||
ServerCertVerifierError::InvalidEndEntityCertificate {
|
||||
cert: end_entity.clone(),
|
||||
reason: e.to_string(),
|
||||
}
|
||||
})?;
|
||||
let intermediates = intermediates
|
||||
.iter()
|
||||
.map(|c| webpki_types::CertificateDer::from(c.0.as_slice()))
|
||||
.collect::<Vec<_>>();
|
||||
let server_name = server_name.to_webpki();
|
||||
let time = webpki_types::UnixTime::since_unix_epoch(Duration::from_secs(time));
|
||||
|
||||
cert.verify_for_usage(
|
||||
webpki::ALL_VERIFICATION_ALGS,
|
||||
&self.roots,
|
||||
&intermediates,
|
||||
time,
|
||||
webpki::KeyUsage::server_auth(),
|
||||
None,
|
||||
None,
|
||||
)
|
||||
.map(|_| ())
|
||||
.map_err(|_| ServerCertVerifierError::InvalidPath)?;
|
||||
|
||||
cert.verify_is_valid_for_subject_name(&server_name)
|
||||
.map_err(|_| ServerCertVerifierError::InvalidServerName)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Error for [`ServerCertVerifier`].
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
#[error("server certificate verification failed: {0}")]
|
||||
pub enum ServerCertVerifierError {
|
||||
/// Root certificate store contains invalid certificate.
|
||||
#[error("root certificate store contains invalid certificate: {reason}")]
|
||||
InvalidRootCertificate {
|
||||
/// Invalid certificate.
|
||||
cert: CertificateDer,
|
||||
/// Reason for invalidity.
|
||||
reason: String,
|
||||
},
|
||||
/// End-entity certificate is invalid.
|
||||
#[error("end-entity certificate is invalid: {reason}")]
|
||||
InvalidEndEntityCertificate {
|
||||
/// Invalid certificate.
|
||||
cert: CertificateDer,
|
||||
/// Reason for invalidity.
|
||||
reason: String,
|
||||
},
|
||||
/// Failed to verify certificate path to provided trust anchors.
|
||||
#[error("failed to verify certificate path to provided trust anchors")]
|
||||
InvalidPath,
|
||||
/// Failed to verify certificate is valid for provided server name.
|
||||
#[error("failed to verify certificate is valid for provided server name")]
|
||||
InvalidServerName,
|
||||
}
|
||||
@@ -8,13 +8,8 @@ version = "0.0.0"
|
||||
workspace = true
|
||||
|
||||
[dependencies]
|
||||
notary-client = { workspace = true }
|
||||
tlsn-common = { workspace = true }
|
||||
tlsn-core = { workspace = true }
|
||||
tlsn-prover = { workspace = true }
|
||||
tlsn-verifier = { workspace = true }
|
||||
tlsn = { workspace = true }
|
||||
tlsn-formats = { workspace = true }
|
||||
tlsn-tls-core = { workspace = true }
|
||||
tls-server-fixture = { workspace = true }
|
||||
tlsn-server-fixture = { workspace = true }
|
||||
tlsn-server-fixture-certs = { workspace = true }
|
||||
@@ -23,13 +18,13 @@ spansy = { workspace = true }
|
||||
bincode = { workspace = true }
|
||||
chrono = { workspace = true }
|
||||
clap = { version = "4.5", features = ["derive"] }
|
||||
dotenv = { version = "0.15.0" }
|
||||
futures = { workspace = true }
|
||||
http-body-util = { workspace = true }
|
||||
hex = { workspace = true }
|
||||
hyper = { workspace = true, features = ["client", "http1"] }
|
||||
hyper-util = { workspace = true, features = ["full"] }
|
||||
k256 = { workspace = true, features = ["ecdsa"] }
|
||||
serde = { workspace = true, features = ["derive"] }
|
||||
serde_json = { workspace = true }
|
||||
tokio = { workspace = true, features = [
|
||||
"rt",
|
||||
@@ -42,6 +37,15 @@ tokio = { workspace = true, features = [
|
||||
tokio-util = { workspace = true }
|
||||
tracing = { workspace = true }
|
||||
tracing-subscriber = { workspace = true }
|
||||
noir = { git = "https://github.com/zkmopro/noir-rs", tag = "v1.0.0-beta.8", features = ["barretenberg"] }
|
||||
|
||||
[[example]]
|
||||
name = "interactive"
|
||||
path = "interactive/interactive.rs"
|
||||
|
||||
[[example]]
|
||||
name = "interactive_zk"
|
||||
path = "interactive_zk/interactive_zk.rs"
|
||||
|
||||
[[example]]
|
||||
name = "attestation_prove"
|
||||
@@ -54,7 +58,3 @@ path = "attestation/present.rs"
|
||||
[[example]]
|
||||
name = "attestation_verify"
|
||||
path = "attestation/verify.rs"
|
||||
|
||||
[[example]]
|
||||
name = "interactive"
|
||||
path = "interactive/interactive.rs"
|
||||
|
||||
@@ -5,4 +5,4 @@ This folder contains examples demonstrating how to use the TLSNotary protocol.
|
||||
* [Interactive](./interactive/README.md): Interactive Prover and Verifier session without a trusted notary.
|
||||
* [Attestation](./attestation/README.md): Performing a simple notarization with a trusted notary.
|
||||
|
||||
Refer to <https://docs.tlsnotary.org/quick_start/index.html> for a quick start guide to using TLSNotary with these examples.
|
||||
Refer to <https://tlsnotary.org/docs/quick_start> for a quick start guide to using TLSNotary with these examples.
|
||||
@@ -1,74 +1,81 @@
|
||||
## Simple Attestation Example: Notarize Public Data from example.com (Rust) <a name="rust-simple"></a>
|
||||
# Attestation Example
|
||||
|
||||
This example demonstrates the simplest possible use case for TLSNotary. A Prover notarizes data from a local test server with a local Notary.
|
||||
|
||||
**Overview**:
|
||||
1. Notarize a request and response from the test server and acquire an attestation of its content.
|
||||
2. Create a redacted, verifiable presentation using the attestation.
|
||||
3. Verify the presentation.
|
||||
This example demonstrates a **TLSNotary attestation workflow**: notarizing data from a server with a trusted third party (Notary), then creating verifiable presentations with selective disclosure of sensitive information to a Verifier.
|
||||
|
||||
### 1. Notarize
|
||||
## 🔍 How It Works
|
||||
|
||||
Before starting the notarization, set up the local test server and local notary.
|
||||
```mermaid
|
||||
sequenceDiagram
|
||||
participant P as Prover
|
||||
participant N as MPC-TLS<br/>Verifier
|
||||
participant S as Server<br/>Fixture
|
||||
participant V as Attestation<br/>Verifier
|
||||
|
||||
1. Run the test server:
|
||||
```shell
|
||||
PORT=4000 cargo run --bin tlsn-server-fixture
|
||||
```
|
||||
2. Run the notary server:
|
||||
```shell
|
||||
cargo run --release --bin notary-server
|
||||
```
|
||||
3. Run the prove example:
|
||||
```shell
|
||||
SERVER_PORT=4000 cargo run --release --example attestation_prove
|
||||
```
|
||||
Note over P,S: 1. Notarization Phase
|
||||
P->>N: Establish MPC-TLS connection
|
||||
P->>S: Request (MPC-TLS)
|
||||
S->>P: Response (MPC-TLS)
|
||||
N->>P: Issue signed attestation
|
||||
|
||||
To see more details, run with additional debug information:
|
||||
```shell
|
||||
RUST_LOG=debug,yamux=info,uid_mux=info SERVER_PORT=4000 cargo run --release --example attestation_prove
|
||||
Note over P: 2. Presentation Phase
|
||||
P->>P: Create redacted presentation
|
||||
|
||||
Note over P,V: 3. Verification Phase
|
||||
P->>V: Share presentation
|
||||
V->>V: Verify attestation signature
|
||||
```
|
||||
|
||||
If notarization is successful, you should see the following output in the console:
|
||||
```log
|
||||
Starting an MPC TLS connection with the server
|
||||
Got a response from the server: 200 OK
|
||||
Notarization complete!
|
||||
### The Three-Step Process
|
||||
|
||||
1. **🔐 Notarize**: Prover collaborates with Notary to create an authenticated TLS session and obtain a signed attestation
|
||||
2. **✂️ Present**: Prover creates a selective presentation, choosing which data to reveal or redact
|
||||
3. **✅ Verify**: Anyone can verify the presentation's authenticity using the Notary's public key
|
||||
|
||||
## 🚀 Quick Start
|
||||
|
||||
### Step 1: Notarize Data
|
||||
|
||||
**Start the test server** (from repository root):
|
||||
```bash
|
||||
RUST_LOG=info PORT=4000 cargo run --bin tlsn-server-fixture
|
||||
```
|
||||
|
||||
**Run the notarization** (in a new terminal):
|
||||
```bash
|
||||
RUST_LOG=info SERVER_PORT=4000 cargo run --release --example attestation_prove
|
||||
```
|
||||
|
||||
**Expected output:**
|
||||
```
|
||||
Notarization completed successfully!
|
||||
The attestation has been written to `example-json.attestation.tlsn` and the corresponding secrets to `example-json.secrets.tlsn`.
|
||||
```
|
||||
|
||||
⚠️ Note: In this example, we run a local Notary server for demonstration purposes. In real-world applications, the Notary should be operated by a trusted third party. Refer to the [Notary Server Documentation](https://docs.tlsnotary.org/developers/notary_server.html) for more details on running a Notary server.
|
||||
### Step 2: Create Verifiable Presentation
|
||||
|
||||
### 2. Build a Verifiable Presentation
|
||||
|
||||
This step creates a verifiable presentation with optional redactions, which can be shared with any verifier.
|
||||
|
||||
Run the present example:
|
||||
```shell
|
||||
**Generate a redacted presentation:**
|
||||
```bash
|
||||
cargo run --release --example attestation_present
|
||||
```
|
||||
|
||||
If successful, you’ll see this output in the console:
|
||||
|
||||
```log
|
||||
**Expected output:**
|
||||
```
|
||||
Presentation built successfully!
|
||||
The presentation has been written to `example-json.presentation.tlsn`.
|
||||
```
|
||||
|
||||
You can create multiple presentations from the attestation and secrets in the notarization step, each with customized data redactions. You are invited to experiment!
|
||||
> 💡 **Tip**: You can create multiple presentations from the same attestation, each with different redactions!
|
||||
|
||||
### 3. Verify the Presentation
|
||||
### Step 3: Verify the Presentation
|
||||
|
||||
This step reads the presentation created above, verifies it, and prints the disclosed data to the console.
|
||||
|
||||
Run the verify binary:
|
||||
```shell
|
||||
**Verify the presentation:**
|
||||
```bash
|
||||
cargo run --release --example attestation_verify
|
||||
```
|
||||
|
||||
Upon success, you should see output similar to:
|
||||
```log
|
||||
**Expected output:**
|
||||
```
|
||||
Verifying presentation with {key algorithm} key: { hex encoded key }
|
||||
|
||||
**Ask yourself, do you trust this key?**
|
||||
@@ -78,33 +85,80 @@ Successfully verified that the data below came from a session with test-server.i
|
||||
Note that the data which the Prover chose not to disclose are shown as X.
|
||||
|
||||
Data sent:
|
||||
...
|
||||
|
||||
GET /formats/json HTTP/1.1
|
||||
host: test-server.io
|
||||
accept: */*
|
||||
accept-encoding: identity
|
||||
connection: close
|
||||
user-agent: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
|
||||
|
||||
Data received:
|
||||
|
||||
HTTP/1.1 200 OK
|
||||
content-type: application/json
|
||||
content-length: 722
|
||||
connection: close
|
||||
date: Mon, 08 Sep 2025 09:18:29 GMT
|
||||
|
||||
XXXXXX1234567890XXXXXXXXXXXXXXXXXXXXXXXXJohn DoeXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX1.2XX
|
||||
```
|
||||
|
||||
⚠️ The presentation includes a “verifying key,” which the Notary used when issuing the attestation. If you trust this key, you can trust the authenticity of the presented data.
|
||||
## 🎯 Use Cases & Examples
|
||||
|
||||
### HTML
|
||||
### JSON Data (Default)
|
||||
Perfect for API responses, configuration data, or structured information:
|
||||
```bash
|
||||
# All three steps use JSON by default
|
||||
SERVER_PORT=4000 cargo run --release --example attestation_prove
|
||||
cargo run --release --example attestation_present
|
||||
cargo run --release --example attestation_verify
|
||||
```
|
||||
|
||||
In the example above, we notarized a JSON response. TLSNotary also supports notarizing HTML content. To run an HTML example, use:
|
||||
|
||||
```shell
|
||||
# notarize
|
||||
### HTML Content
|
||||
Ideal for web pages, forms, or any HTML-based data:
|
||||
```bash
|
||||
# Notarize HTML content
|
||||
SERVER_PORT=4000 cargo run --release --example attestation_prove -- html
|
||||
# present
|
||||
cargo run --release --example attestation_present -- html
|
||||
# verify
|
||||
cargo run --release --example attestation_verify -- html
|
||||
```
|
||||
|
||||
### Private Data
|
||||
|
||||
The examples above demonstrate how to use TLSNotary with publicly accessible data. TLSNotary can also be utilized for private data that requires authentication. To access this data, you can add the necessary headers (such as an authentication token) or cookies to your request. To run an example that uses an authentication token, execute the following command:
|
||||
|
||||
```shell
|
||||
# notarize
|
||||
### Authenticated/Private Data
|
||||
For APIs requiring authentication tokens, cookies, or private access:
|
||||
```bash
|
||||
# Notarize private data with authentication
|
||||
SERVER_PORT=4000 cargo run --release --example attestation_prove -- authenticated
|
||||
# present
|
||||
cargo run --release --example attestation_present -- authenticated
|
||||
# verify
|
||||
cargo run --release --example attestation_verify -- authenticated
|
||||
```
|
||||
```
|
||||
|
||||
### Debug Mode
|
||||
|
||||
For detailed logging and troubleshooting:
|
||||
```bash
|
||||
RUST_LOG=debug,yamux=info,uid_mux=info SERVER_PORT=4000 cargo run --release --example attestation_prove
|
||||
```
|
||||
|
||||
### Generated Files
|
||||
|
||||
After running the examples, you'll find:
|
||||
- **`*.attestation.tlsn`**: The cryptographically signed attestation from the Notary
|
||||
- **`*.secrets.tlsn`**: Cryptographic secrets needed to create presentations
|
||||
- **`*.presentation.tlsn`**: The verifiable presentation with your chosen redactions
|
||||
|
||||
## 🔐 Security Considerations
|
||||
|
||||
### Trust Model
|
||||
- ✅ **Notary Key**: The presentation includes the Notary's verifying key - The verifier must trust this key
|
||||
- ✅ **Data Authenticity**: Cryptographically guaranteed that data came from the specified server
|
||||
- ✅ **Tamper Evidence**: Any modification to the presentation will fail verification
|
||||
- ⚠️ **Notary Trust**: The verifier must trust the Notary not to collude with the Prover
|
||||
|
||||
### Production Deployment
|
||||
- 🏭 **Independent Notary**: Use a trusted third-party Notary service (not a local one)
|
||||
- 🔒 **Key Management**: Implement proper Notary key distribution and verification
|
||||
- 📋 **Audit Trail**: Maintain logs of notarization and verification events
|
||||
- 🔄 **Key Rotation**: Plan for Notary key updates and migration
|
||||
|
||||
> ⚠️ **Demo Notice**: This example uses a local test server and local Notary for demonstration. In production, use trusted third-party Notary services and real server endpoints.
|
||||
@@ -5,7 +5,7 @@
|
||||
use clap::Parser;
|
||||
use hyper::header;
|
||||
|
||||
use tlsn_core::{attestation::Attestation, presentation::Presentation, CryptoProvider, Secrets};
|
||||
use tlsn::attestation::{presentation::Presentation, Attestation, CryptoProvider, Secrets};
|
||||
use tlsn_examples::ExampleType;
|
||||
use tlsn_formats::http::HttpTranscript;
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// This example demonstrates how to use the Prover to acquire an attestation for
|
||||
// an HTTP request sent to example.com. The attestation and secrets are saved to
|
||||
// disk.
|
||||
// an HTTP request sent to a server fixture. The attestation and secrets are
|
||||
// saved to disk.
|
||||
|
||||
use std::env;
|
||||
|
||||
@@ -9,18 +9,31 @@ use http_body_util::Empty;
|
||||
use hyper::{body::Bytes, Request, StatusCode};
|
||||
use hyper_util::rt::TokioIo;
|
||||
use spansy::Spanned;
|
||||
use tokio::{
|
||||
io::{AsyncRead, AsyncWrite},
|
||||
sync::oneshot::{self, Receiver, Sender},
|
||||
};
|
||||
use tokio_util::compat::{FuturesAsyncReadCompatExt, TokioAsyncReadCompatExt};
|
||||
use tracing::debug;
|
||||
use tracing::info;
|
||||
|
||||
use notary_client::{Accepted, NotarizationRequest, NotaryClient};
|
||||
use tls_core::verify::WebPkiVerifier;
|
||||
use tls_server_fixture::{CA_CERT_DER, SERVER_DOMAIN};
|
||||
use tlsn_common::config::ProtocolConfig;
|
||||
use tlsn_core::{request::RequestConfig, transcript::TranscriptCommitConfig, CryptoProvider};
|
||||
use tlsn::{
|
||||
attestation::{
|
||||
request::{Request as AttestationRequest, RequestConfig},
|
||||
signing::Secp256k1Signer,
|
||||
Attestation, AttestationConfig, CryptoProvider, Secrets,
|
||||
},
|
||||
config::{
|
||||
CertificateDer, PrivateKeyDer, ProtocolConfig, ProtocolConfigValidator, RootCertStore,
|
||||
},
|
||||
connection::{ConnectionInfo, HandshakeData, ServerName, TranscriptLength},
|
||||
prover::{state::Committed, ProveConfig, Prover, ProverConfig, ProverOutput, TlsConfig},
|
||||
transcript::{ContentType, TranscriptCommitConfig},
|
||||
verifier::{Verifier, VerifierConfig, VerifierOutput, VerifyConfig},
|
||||
};
|
||||
use tlsn_examples::ExampleType;
|
||||
use tlsn_formats::http::{DefaultHttpCommitter, HttpCommit, HttpTranscript};
|
||||
use tlsn_prover::{Prover, ProverConfig};
|
||||
use tlsn_server_fixture::DEFAULT_FIXTURE_PORT;
|
||||
use tlsn_server_fixture_certs::{CA_CERT_DER, CLIENT_CERT_DER, CLIENT_KEY_DER, SERVER_DOMAIN};
|
||||
|
||||
// Setting of the application server.
|
||||
const USER_AGENT: &str = "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36";
|
||||
@@ -35,79 +48,72 @@ struct Args {
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||
let args = Args::parse();
|
||||
tracing_subscriber::fmt::init();
|
||||
|
||||
let args = Args::parse();
|
||||
let (uri, extra_headers) = match args.example_type {
|
||||
ExampleType::Json => ("/formats/json", vec![]),
|
||||
ExampleType::Html => ("/formats/html", vec![]),
|
||||
ExampleType::Authenticated => ("/protected", vec![("Authorization", "random_auth_token")]),
|
||||
};
|
||||
|
||||
notarize(uri, extra_headers, &args.example_type).await
|
||||
let (notary_socket, prover_socket) = tokio::io::duplex(1 << 23);
|
||||
let (request_tx, request_rx) = oneshot::channel();
|
||||
let (attestation_tx, attestation_rx) = oneshot::channel();
|
||||
|
||||
tokio::spawn(async move {
|
||||
notary(notary_socket, request_rx, attestation_tx)
|
||||
.await
|
||||
.unwrap()
|
||||
});
|
||||
|
||||
prover(
|
||||
prover_socket,
|
||||
request_tx,
|
||||
attestation_rx,
|
||||
uri,
|
||||
extra_headers,
|
||||
&args.example_type,
|
||||
)
|
||||
.await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn notarize(
|
||||
async fn prover<S: AsyncWrite + AsyncRead + Send + Sync + Unpin + 'static>(
|
||||
socket: S,
|
||||
req_tx: Sender<AttestationRequest>,
|
||||
resp_rx: Receiver<Attestation>,
|
||||
uri: &str,
|
||||
extra_headers: Vec<(&str, &str)>,
|
||||
example_type: &ExampleType,
|
||||
) -> Result<(), Box<dyn std::error::Error>> {
|
||||
tracing_subscriber::fmt::init();
|
||||
|
||||
let notary_host: String = env::var("NOTARY_HOST").unwrap_or("127.0.0.1".into());
|
||||
let notary_port: u16 = env::var("NOTARY_PORT")
|
||||
.map(|port| port.parse().expect("port should be valid integer"))
|
||||
.unwrap_or(7047);
|
||||
let server_host: String = env::var("SERVER_HOST").unwrap_or("127.0.0.1".into());
|
||||
let server_port: u16 = env::var("SERVER_PORT")
|
||||
.map(|port| port.parse().expect("port should be valid integer"))
|
||||
.unwrap_or(DEFAULT_FIXTURE_PORT);
|
||||
|
||||
// Build a client to connect to the notary server.
|
||||
let notary_client = NotaryClient::builder()
|
||||
.host(notary_host)
|
||||
.port(notary_port)
|
||||
// WARNING: Always use TLS to connect to notary server, except if notary is running locally
|
||||
// e.g. this example, hence `enable_tls` is set to False (else it always defaults to True).
|
||||
.enable_tls(false)
|
||||
.build()
|
||||
.unwrap();
|
||||
// Create a root certificate store with the server-fixture's self-signed
|
||||
// certificate. This is only required for offline testing with the
|
||||
// server-fixture.
|
||||
let mut tls_config_builder = TlsConfig::builder();
|
||||
tls_config_builder
|
||||
.root_store(RootCertStore {
|
||||
roots: vec![CertificateDer(CA_CERT_DER.to_vec())],
|
||||
})
|
||||
// (Optional) Set up TLS client authentication if required by the server.
|
||||
.client_auth((
|
||||
vec![CertificateDer(CLIENT_CERT_DER.to_vec())],
|
||||
PrivateKeyDer(CLIENT_KEY_DER.to_vec()),
|
||||
));
|
||||
|
||||
// Send requests for configuration and notarization to the notary server.
|
||||
let notarization_request = NotarizationRequest::builder()
|
||||
// We must configure the amount of data we expect to exchange beforehand, which will
|
||||
// be preprocessed prior to the connection. Reducing these limits will improve
|
||||
// performance.
|
||||
.max_sent_data(tlsn_examples::MAX_SENT_DATA)
|
||||
.max_recv_data(tlsn_examples::MAX_RECV_DATA)
|
||||
.build()?;
|
||||
|
||||
let Accepted {
|
||||
io: notary_connection,
|
||||
id: _session_id,
|
||||
..
|
||||
} = notary_client
|
||||
.request_notarization(notarization_request)
|
||||
.await
|
||||
.expect("Could not connect to notary. Make sure it is running.");
|
||||
|
||||
// Create a crypto provider accepting the server-fixture's self-signed
|
||||
// root certificate.
|
||||
//
|
||||
// This is only required for offline testing with the server-fixture. In
|
||||
// production, use `CryptoProvider::default()` instead.
|
||||
let mut root_store = tls_core::anchors::RootCertStore::empty();
|
||||
root_store
|
||||
.add(&tls_core::key::Certificate(CA_CERT_DER.to_vec()))
|
||||
.unwrap();
|
||||
let crypto_provider = CryptoProvider {
|
||||
cert: WebPkiVerifier::new(root_store, None),
|
||||
..Default::default()
|
||||
};
|
||||
let tls_config = tls_config_builder.build().unwrap();
|
||||
|
||||
// Set up protocol configuration for prover.
|
||||
// Prover configuration.
|
||||
let prover_config = ProverConfig::builder()
|
||||
.server_name(SERVER_DOMAIN)
|
||||
let mut prover_config_builder = ProverConfig::builder();
|
||||
prover_config_builder
|
||||
.server_name(ServerName::Dns(SERVER_DOMAIN.try_into().unwrap()))
|
||||
.tls_config(tls_config)
|
||||
.protocol_config(
|
||||
ProtocolConfig::builder()
|
||||
// We must configure the amount of data we expect to exchange beforehand, which will
|
||||
@@ -116,14 +122,12 @@ async fn notarize(
|
||||
.max_sent_data(tlsn_examples::MAX_SENT_DATA)
|
||||
.max_recv_data(tlsn_examples::MAX_RECV_DATA)
|
||||
.build()?,
|
||||
)
|
||||
.crypto_provider(crypto_provider)
|
||||
.build()?;
|
||||
);
|
||||
|
||||
let prover_config = prover_config_builder.build()?;
|
||||
|
||||
// Create a new prover and perform necessary setup.
|
||||
let prover = Prover::new(prover_config)
|
||||
.setup(notary_connection.compat())
|
||||
.await?;
|
||||
let prover = Prover::new(prover_config).setup(socket.compat()).await?;
|
||||
|
||||
// Open a TCP connection to the server.
|
||||
let client_socket = tokio::net::TcpStream::connect((server_host, server_port)).await?;
|
||||
@@ -161,17 +165,17 @@ async fn notarize(
|
||||
}
|
||||
let request = request_builder.body(Empty::<Bytes>::new())?;
|
||||
|
||||
println!("Starting an MPC TLS connection with the server");
|
||||
info!("Starting an MPC TLS connection with the server");
|
||||
|
||||
// Send the request to the server and wait for the response.
|
||||
let response = request_sender.send_request(request).await?;
|
||||
|
||||
println!("Got a response from the server: {}", response.status());
|
||||
info!("Got a response from the server: {}", response.status());
|
||||
|
||||
assert!(response.status() == StatusCode::OK);
|
||||
|
||||
// The prover task should be done now, so we can await it.
|
||||
let mut prover = prover_task.await??;
|
||||
let prover = prover_task.await??;
|
||||
|
||||
// Parse the HTTP transcript.
|
||||
let transcript = HttpTranscript::parse(prover.transcript())?;
|
||||
@@ -182,10 +186,10 @@ async fn notarize(
|
||||
match body_content {
|
||||
tlsn_formats::http::BodyContent::Json(_json) => {
|
||||
let parsed = serde_json::from_str::<serde_json::Value>(&body)?;
|
||||
debug!("{}", serde_json::to_string_pretty(&parsed)?);
|
||||
info!("{}", serde_json::to_string_pretty(&parsed)?);
|
||||
}
|
||||
tlsn_formats::http::BodyContent::Unknown(_span) => {
|
||||
debug!("{}", &body);
|
||||
info!("{}", &body);
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
@@ -213,10 +217,7 @@ async fn notarize(
|
||||
|
||||
let request_config = builder.build()?;
|
||||
|
||||
#[allow(deprecated)]
|
||||
let (attestation, secrets) = prover.notarize(&request_config).await?;
|
||||
|
||||
println!("Notarization complete!");
|
||||
let (attestation, secrets) = notarize(prover, &request_config, req_tx, resp_rx).await?;
|
||||
|
||||
// Write the attestation to disk.
|
||||
let attestation_path = tlsn_examples::get_file_path(example_type, "attestation");
|
||||
@@ -235,3 +236,168 @@ async fn notarize(
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn notarize(
|
||||
mut prover: Prover<Committed>,
|
||||
config: &RequestConfig,
|
||||
request_tx: Sender<AttestationRequest>,
|
||||
attestation_rx: Receiver<Attestation>,
|
||||
) -> Result<(Attestation, Secrets), Box<dyn std::error::Error>> {
|
||||
let mut builder = ProveConfig::builder(prover.transcript());
|
||||
|
||||
if let Some(config) = config.transcript_commit() {
|
||||
builder.transcript_commit(config.clone());
|
||||
}
|
||||
|
||||
let disclosure_config = builder.build()?;
|
||||
|
||||
let ProverOutput {
|
||||
transcript_commitments,
|
||||
transcript_secrets,
|
||||
..
|
||||
} = prover.prove(&disclosure_config).await?;
|
||||
|
||||
let transcript = prover.transcript().clone();
|
||||
let tls_transcript = prover.tls_transcript().clone();
|
||||
prover.close().await?;
|
||||
|
||||
// Build an attestation request.
|
||||
let mut builder = AttestationRequest::builder(config);
|
||||
|
||||
builder
|
||||
.server_name(ServerName::Dns(SERVER_DOMAIN.try_into().unwrap()))
|
||||
.handshake_data(HandshakeData {
|
||||
certs: tls_transcript
|
||||
.server_cert_chain()
|
||||
.expect("server cert chain is present")
|
||||
.to_vec(),
|
||||
sig: tls_transcript
|
||||
.server_signature()
|
||||
.expect("server signature is present")
|
||||
.clone(),
|
||||
binding: tls_transcript.certificate_binding().clone(),
|
||||
})
|
||||
.transcript(transcript)
|
||||
.transcript_commitments(transcript_secrets, transcript_commitments);
|
||||
|
||||
let (request, secrets) = builder.build(&CryptoProvider::default())?;
|
||||
|
||||
// Send attestation request to notary.
|
||||
request_tx
|
||||
.send(request.clone())
|
||||
.map_err(|_| "notary is not receiving attestation request".to_string())?;
|
||||
|
||||
// Receive attestation from notary.
|
||||
let attestation = attestation_rx
|
||||
.await
|
||||
.map_err(|err| format!("notary did not respond with attestation: {err}"))?;
|
||||
|
||||
// Check the attestation is consistent with the Prover's view.
|
||||
request.validate(&attestation)?;
|
||||
|
||||
Ok((attestation, secrets))
|
||||
}
|
||||
|
||||
async fn notary<S: AsyncWrite + AsyncRead + Send + Sync + Unpin + 'static>(
|
||||
socket: S,
|
||||
request_rx: Receiver<AttestationRequest>,
|
||||
attestation_tx: Sender<Attestation>,
|
||||
) -> Result<(), Box<dyn std::error::Error>> {
|
||||
// Set up Verifier.
|
||||
let config_validator = ProtocolConfigValidator::builder()
|
||||
.max_sent_data(tlsn_examples::MAX_SENT_DATA)
|
||||
.max_recv_data(tlsn_examples::MAX_RECV_DATA)
|
||||
.build()
|
||||
.unwrap();
|
||||
|
||||
// Create a root certificate store with the server-fixture's self-signed
|
||||
// certificate. This is only required for offline testing with the
|
||||
// server-fixture.
|
||||
let verifier_config = VerifierConfig::builder()
|
||||
.root_store(RootCertStore {
|
||||
roots: vec![CertificateDer(CA_CERT_DER.to_vec())],
|
||||
})
|
||||
.protocol_config_validator(config_validator)
|
||||
.build()
|
||||
.unwrap();
|
||||
|
||||
let mut verifier = Verifier::new(verifier_config)
|
||||
.setup(socket.compat())
|
||||
.await?
|
||||
.run()
|
||||
.await?;
|
||||
|
||||
let VerifierOutput {
|
||||
transcript_commitments,
|
||||
encoder_secret,
|
||||
..
|
||||
} = verifier.verify(&VerifyConfig::default()).await?;
|
||||
|
||||
let tls_transcript = verifier.tls_transcript().clone();
|
||||
|
||||
verifier.close().await?;
|
||||
|
||||
let sent_len = tls_transcript
|
||||
.sent()
|
||||
.iter()
|
||||
.filter_map(|record| {
|
||||
if let ContentType::ApplicationData = record.typ {
|
||||
Some(record.ciphertext.len())
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
.sum::<usize>();
|
||||
|
||||
let recv_len = tls_transcript
|
||||
.recv()
|
||||
.iter()
|
||||
.filter_map(|record| {
|
||||
if let ContentType::ApplicationData = record.typ {
|
||||
Some(record.ciphertext.len())
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
.sum::<usize>();
|
||||
|
||||
// Receive attestation request from prover.
|
||||
let request = request_rx.await?;
|
||||
|
||||
// Load a dummy signing key.
|
||||
let signing_key = k256::ecdsa::SigningKey::from_bytes(&[1u8; 32].into())?;
|
||||
let signer = Box::new(Secp256k1Signer::new(&signing_key.to_bytes())?);
|
||||
let mut provider = CryptoProvider::default();
|
||||
provider.signer.set_signer(signer);
|
||||
|
||||
// Build an attestation.
|
||||
let mut att_config_builder = AttestationConfig::builder();
|
||||
att_config_builder.supported_signature_algs(Vec::from_iter(provider.signer.supported_algs()));
|
||||
let att_config = att_config_builder.build()?;
|
||||
|
||||
let mut builder = Attestation::builder(&att_config).accept_request(request)?;
|
||||
builder
|
||||
.connection_info(ConnectionInfo {
|
||||
time: tls_transcript.time(),
|
||||
version: (*tls_transcript.version()),
|
||||
transcript_length: TranscriptLength {
|
||||
sent: sent_len as u32,
|
||||
received: recv_len as u32,
|
||||
},
|
||||
})
|
||||
.server_ephemeral_key(tls_transcript.server_ephemeral_key().clone())
|
||||
.transcript_commitments(transcript_commitments);
|
||||
|
||||
if let Some(encoder_secret) = encoder_secret {
|
||||
builder.encoder_secret(encoder_secret);
|
||||
}
|
||||
|
||||
let attestation = builder.build(&provider)?;
|
||||
|
||||
// Send attestation to prover.
|
||||
attestation_tx
|
||||
.send(attestation)
|
||||
.map_err(|_| "prover is not receiving attestation".to_string())?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -6,14 +6,17 @@ use std::time::Duration;
|
||||
|
||||
use clap::Parser;
|
||||
|
||||
use tls_core::verify::WebPkiVerifier;
|
||||
use tls_server_fixture::CA_CERT_DER;
|
||||
use tlsn_core::{
|
||||
presentation::{Presentation, PresentationOutput},
|
||||
signing::VerifyingKey,
|
||||
CryptoProvider,
|
||||
use tlsn::{
|
||||
attestation::{
|
||||
presentation::{Presentation, PresentationOutput},
|
||||
signing::VerifyingKey,
|
||||
CryptoProvider,
|
||||
},
|
||||
config::{CertificateDer, RootCertStore},
|
||||
verifier::ServerCertVerifier,
|
||||
};
|
||||
use tlsn_examples::ExampleType;
|
||||
use tlsn_server_fixture_certs::CA_CERT_DER;
|
||||
|
||||
#[derive(Parser, Debug)]
|
||||
#[command(version, about, long_about = None)]
|
||||
@@ -41,12 +44,11 @@ async fn verify_presentation(example_type: &ExampleType) -> Result<(), Box<dyn s
|
||||
//
|
||||
// This is only required for offline testing with the server-fixture. In
|
||||
// production, use `CryptoProvider::default()` instead.
|
||||
let mut root_store = tls_core::anchors::RootCertStore::empty();
|
||||
root_store
|
||||
.add(&tls_core::key::Certificate(CA_CERT_DER.to_vec()))
|
||||
.unwrap();
|
||||
let root_cert_store = RootCertStore {
|
||||
roots: vec![CertificateDer(CA_CERT_DER.to_vec())],
|
||||
};
|
||||
let crypto_provider = CryptoProvider {
|
||||
cert: WebPkiVerifier::new(root_store, None),
|
||||
cert: ServerCertVerifier::new(&root_cert_store)?,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
@@ -85,9 +87,9 @@ async fn verify_presentation(example_type: &ExampleType) -> Result<(), Box<dyn s
|
||||
);
|
||||
println!("Note that the data which the Prover chose not to disclose are shown as X.\n");
|
||||
println!("Data sent:\n");
|
||||
println!("{}\n", sent);
|
||||
println!("{sent}\n");
|
||||
println!("Data received:\n");
|
||||
println!("{}\n", recv);
|
||||
println!("{recv}\n");
|
||||
println!("-------------------------------------------------------------------");
|
||||
|
||||
Ok(())
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user