mirror of
https://github.com/zkonduit/ezkl.git
synced 2026-01-07 21:43:54 -05:00
feat: vka hashing squash (#982)
This commit is contained in:
62
.github/workflows/engine.yml
vendored
62
.github/workflows/engine.yml
vendored
@@ -87,7 +87,7 @@ jobs:
|
||||
|
||||
- name: Replace memory definition in nodejs
|
||||
run: |
|
||||
sed -i "3s|.*|imports['env'] = {memory: new WebAssembly.Memory({initial:20,maximum:65536,shared:true})}|" pkg/nodejs/ezkl.js
|
||||
sed -i "3s|.*|imports['env'] = {memory: new WebAssembly.Memory({initial:21,maximum:65536,shared:true})}|" pkg/nodejs/ezkl.js
|
||||
|
||||
- name: Replace `import.meta.url` with `import.meta.resolve` definition in workerHelpers.js
|
||||
run: |
|
||||
@@ -188,63 +188,3 @@ jobs:
|
||||
npm publish
|
||||
env:
|
||||
NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }}
|
||||
|
||||
|
||||
in-browser-evm-ver-publish:
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
name: publish-in-browser-evm-verifier-package
|
||||
needs: [publish-wasm-bindings]
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
RELEASE_TAG: ${{ github.ref_name }}
|
||||
if: startsWith(github.ref, 'refs/tags/')
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 #v4.2.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
- name: Update version in package.json
|
||||
shell: bash
|
||||
run: |
|
||||
sed -i "s|\"version\": \".*\"|\"version\": \"$RELEASE_TAG\"|" in-browser-evm-verifier/package.json
|
||||
- name: Prepare tag and fetch package integrity
|
||||
run: |
|
||||
CLEANED_TAG=${RELEASE_TAG} # Get the tag from ref_name
|
||||
CLEANED_TAG="${CLEANED_TAG#v}" # Remove leading 'v'
|
||||
echo "CLEANED_TAG=${CLEANED_TAG}" >> $GITHUB_ENV # Set it as an environment variable for later steps
|
||||
ENGINE_INTEGRITY=$(npm view @ezkljs/engine@$CLEANED_TAG dist.integrity)
|
||||
echo "ENGINE_INTEGRITY=$ENGINE_INTEGRITY" >> $GITHUB_ENV
|
||||
- name: Update @ezkljs/engine version in package.json
|
||||
shell: bash
|
||||
env:
|
||||
RELEASE_TAG: ${{ github.ref_name }}
|
||||
run: |
|
||||
sed -i "s|\"@ezkljs/engine\": \".*\"|\"@ezkljs/engine\": \"$CLEANED_TAG\"|" in-browser-evm-verifier/package.json
|
||||
- name: Update the engine import in in-browser-evm-verifier to use @ezkljs/engine package instead of the local one;
|
||||
run: |
|
||||
sed -i "s|import { encodeVerifierCalldata } from '../nodejs/ezkl';|import { encodeVerifierCalldata } from '@ezkljs/engine';|" in-browser-evm-verifier/src/index.ts
|
||||
- name: Update pnpm-lock.yaml versions and integrity
|
||||
run: |
|
||||
awk -v integrity="$ENGINE_INTEGRITY" -v tag="$CLEANED_TAG" '
|
||||
NR==30{$0=" specifier: \"" tag "\""}
|
||||
NR==31{$0=" version: \"" tag "\""}
|
||||
NR==400{$0=" /@ezkljs/engine@" tag ":"}
|
||||
NR==401{$0=" resolution: {integrity: \"" integrity "\"}"} 1' in-browser-evm-verifier/pnpm-lock.yaml > temp.yaml && mv temp.yaml in-browser-evm-verifier/pnpm-lock.yaml
|
||||
- name: Use pnpm 8
|
||||
uses: pnpm/action-setup@eae0cfeb286e66ffb5155f1a79b90583a127a68b #v2.4.1
|
||||
with:
|
||||
version: 8
|
||||
- name: Set up Node.js
|
||||
uses: actions/setup-node@1a4442cacd436585916779262731d5b162bc6ec7 #v3.8.2
|
||||
with:
|
||||
node-version: "18.12.1"
|
||||
registry-url: "https://registry.npmjs.org"
|
||||
- name: Publish to npm
|
||||
run: |
|
||||
cd in-browser-evm-verifier
|
||||
pnpm install --frozen-lockfile
|
||||
pnpm run build
|
||||
pnpm publish --no-git-checks
|
||||
env:
|
||||
NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }}
|
||||
|
||||
493
.github/workflows/rust.yml
vendored
493
.github/workflows/rust.yml
vendored
@@ -24,10 +24,31 @@ jobs:
|
||||
permissions:
|
||||
contents: read
|
||||
runs-on: large-self-hosted
|
||||
env:
|
||||
EVM_VERIFIER_EZKL_TOKEN: ${{ secrets.EVM_VERIFIER_EZKL_TOKEN }}
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 #v4.2.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
- name: Configure Git credentials
|
||||
run: |
|
||||
if [ -z "$EVM_VERIFIER_EZKL_TOKEN" ]; then
|
||||
echo "❌ EVM_VERIFIER_EZKL_TOKEN is empty – check repo/org secrets" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# For libgit2 (what Cargo uses internally)
|
||||
git config --global credential.helper store
|
||||
echo "https://oauth2:${EVM_VERIFIER_EZKL_TOKEN}@github.com" > ~/.git-credentials
|
||||
chmod 600 ~/.git-credentials
|
||||
|
||||
# Also set URL replacement with oauth2 format
|
||||
git config --global \
|
||||
url."https://oauth2:${EVM_VERIFIER_EZKL_TOKEN}@github.com/".insteadOf \
|
||||
"https://github.com/"
|
||||
env:
|
||||
EVM_VERIFIER_EZKL_TOKEN: ${{ secrets.EVM_VERIFIER_EZKL_TOKEN }}
|
||||
|
||||
- uses: actions-rs/toolchain@b2417cde72dcf67f306c0ae8e0828a81bf0b189f #v1.0.6
|
||||
with:
|
||||
toolchain: nightly-2025-02-17
|
||||
@@ -44,10 +65,31 @@ jobs:
|
||||
permissions:
|
||||
contents: read
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
EVM_VERIFIER_EZKL_TOKEN: ${{ secrets.EVM_VERIFIER_EZKL_TOKEN }}
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 #v4.2.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
- name: Configure Git credentials
|
||||
run: |
|
||||
if [ -z "$EVM_VERIFIER_EZKL_TOKEN" ]; then
|
||||
echo "❌ EVM_VERIFIER_EZKL_TOKEN is empty – check repo/org secrets" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# For libgit2 (what Cargo uses internally)
|
||||
git config --global credential.helper store
|
||||
echo "https://oauth2:${EVM_VERIFIER_EZKL_TOKEN}@github.com" > ~/.git-credentials
|
||||
chmod 600 ~/.git-credentials
|
||||
|
||||
# Also set URL replacement with oauth2 format
|
||||
git config --global \
|
||||
url."https://oauth2:${EVM_VERIFIER_EZKL_TOKEN}@github.com/".insteadOf \
|
||||
"https://github.com/"
|
||||
env:
|
||||
EVM_VERIFIER_EZKL_TOKEN: ${{ secrets.EVM_VERIFIER_EZKL_TOKEN }}
|
||||
|
||||
- uses: actions-rs/toolchain@b2417cde72dcf67f306c0ae8e0828a81bf0b189f #v1.0.6
|
||||
with:
|
||||
toolchain: nightly-2025-02-17
|
||||
@@ -60,10 +102,31 @@ jobs:
|
||||
permissions:
|
||||
contents: read
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
EVM_VERIFIER_EZKL_TOKEN: ${{ secrets.EVM_VERIFIER_EZKL_TOKEN }}
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 #v4.2.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
- name: Configure Git credentials
|
||||
run: |
|
||||
if [ -z "$EVM_VERIFIER_EZKL_TOKEN" ]; then
|
||||
echo "❌ EVM_VERIFIER_EZKL_TOKEN is empty – check repo/org secrets" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# For libgit2 (what Cargo uses internally)
|
||||
git config --global credential.helper store
|
||||
echo "https://oauth2:${EVM_VERIFIER_EZKL_TOKEN}@github.com" > ~/.git-credentials
|
||||
chmod 600 ~/.git-credentials
|
||||
|
||||
# Also set URL replacement with oauth2 format
|
||||
git config --global \
|
||||
url."https://oauth2:${EVM_VERIFIER_EZKL_TOKEN}@github.com/".insteadOf \
|
||||
"https://github.com/"
|
||||
env:
|
||||
EVM_VERIFIER_EZKL_TOKEN: ${{ secrets.EVM_VERIFIER_EZKL_TOKEN }}
|
||||
|
||||
- uses: actions-rs/toolchain@b2417cde72dcf67f306c0ae8e0828a81bf0b189f #v1.0.6
|
||||
with:
|
||||
toolchain: nightly-2025-02-17
|
||||
@@ -76,10 +139,31 @@ jobs:
|
||||
permissions:
|
||||
contents: read
|
||||
runs-on: ubuntu-latest-32-cores
|
||||
env:
|
||||
EVM_VERIFIER_EZKL_TOKEN: ${{ secrets.EVM_VERIFIER_EZKL_TOKEN }}
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 #v4.2.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
- name: Configure Git credentials
|
||||
run: |
|
||||
if [ -z "$EVM_VERIFIER_EZKL_TOKEN" ]; then
|
||||
echo "❌ EVM_VERIFIER_EZKL_TOKEN is empty – check repo/org secrets" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# For libgit2 (what Cargo uses internally)
|
||||
git config --global credential.helper store
|
||||
echo "https://oauth2:${EVM_VERIFIER_EZKL_TOKEN}@github.com" > ~/.git-credentials
|
||||
chmod 600 ~/.git-credentials
|
||||
|
||||
# Also set URL replacement with oauth2 format
|
||||
git config --global \
|
||||
url."https://oauth2:${EVM_VERIFIER_EZKL_TOKEN}@github.com/".insteadOf \
|
||||
"https://github.com/"
|
||||
env:
|
||||
EVM_VERIFIER_EZKL_TOKEN: ${{ secrets.EVM_VERIFIER_EZKL_TOKEN }}
|
||||
|
||||
- uses: actions-rs/toolchain@b2417cde72dcf67f306c0ae8e0828a81bf0b189f #v1.0.6
|
||||
with:
|
||||
toolchain: nightly-2025-02-17
|
||||
@@ -95,7 +179,7 @@ jobs:
|
||||
- name: Library tests
|
||||
run: cargo nextest run --lib --verbose
|
||||
- name: Library tests (original lookup)
|
||||
run: cargo nextest run --lib --verbose --no-default-features --features ezkl
|
||||
run: cargo nextest run --lib --verbose --no-default-features --features ezkl,eth-original-lookup
|
||||
|
||||
# ultra-overflow-tests-gpu:
|
||||
# runs-on: GPU
|
||||
@@ -134,10 +218,31 @@ jobs:
|
||||
permissions:
|
||||
contents: read
|
||||
runs-on: non-gpu
|
||||
env:
|
||||
EVM_VERIFIER_EZKL_TOKEN: ${{ secrets.EVM_VERIFIER_EZKL_TOKEN }}
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 #v4.2.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
- name: Configure Git credentials
|
||||
run: |
|
||||
if [ -z "$EVM_VERIFIER_EZKL_TOKEN" ]; then
|
||||
echo "❌ EVM_VERIFIER_EZKL_TOKEN is empty – check repo/org secrets" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# For libgit2 (what Cargo uses internally)
|
||||
git config --global credential.helper store
|
||||
echo "https://oauth2:${EVM_VERIFIER_EZKL_TOKEN}@github.com" > ~/.git-credentials
|
||||
chmod 600 ~/.git-credentials
|
||||
|
||||
# Also set URL replacement with oauth2 format
|
||||
git config --global \
|
||||
url."https://oauth2:${EVM_VERIFIER_EZKL_TOKEN}@github.com/".insteadOf \
|
||||
"https://github.com/"
|
||||
env:
|
||||
EVM_VERIFIER_EZKL_TOKEN: ${{ secrets.EVM_VERIFIER_EZKL_TOKEN }}
|
||||
|
||||
- uses: actions-rs/toolchain@b2417cde72dcf67f306c0ae8e0828a81bf0b189f #v1.0.6
|
||||
with:
|
||||
toolchain: nightly-2025-02-17
|
||||
@@ -155,22 +260,43 @@ jobs:
|
||||
# - name: Conv overflow (wasi)
|
||||
# run: cargo wasi test conv_col_ultra_overflow -- --include-ignored --nocapture
|
||||
- name: lookup overflow
|
||||
run: cargo nextest run --release lookup_ultra_overflow --no-capture --no-default-features --features ezkl -- --include-ignored
|
||||
run: cargo nextest run --release lookup_ultra_overflow --no-capture --no-default-features --features ezkl,eth-original-lookup -- --include-ignored
|
||||
- name: Matmul overflow
|
||||
run: RUST_LOG=debug cargo nextest run --release matmul_col_ultra_overflow --no-capture --no-default-features --features ezkl -- --include-ignored
|
||||
run: RUST_LOG=debug cargo nextest run --release matmul_col_ultra_overflow --no-capture --no-default-features --features ezkl,eth-original-lookup -- --include-ignored
|
||||
- name: Conv overflow
|
||||
run: RUST_LOG=debug cargo nextest run --release conv_col_ultra_overflow --no-capture --no-default-features --features ezkl -- --include-ignored
|
||||
run: RUST_LOG=debug cargo nextest run --release conv_col_ultra_overflow --no-capture --no-default-features --features ezkl,eth-original-lookup -- --include-ignored
|
||||
- name: Conv + relu overflow
|
||||
run: cargo nextest run --release conv_relu_col_ultra_overflow --no-capture --no-default-features --features ezkl -- --include-ignored
|
||||
run: cargo nextest run --release conv_relu_col_ultra_overflow --no-capture --no-default-features --features ezkl,eth-original-lookup -- --include-ignored
|
||||
|
||||
ultra-overflow-tests:
|
||||
permissions:
|
||||
contents: read
|
||||
runs-on: non-gpu
|
||||
env:
|
||||
EVM_VERIFIER_EZKL_TOKEN: ${{ secrets.EVM_VERIFIER_EZKL_TOKEN }}
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 #v4.2.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
- name: Configure Git credentials
|
||||
run: |
|
||||
if [ -z "$EVM_VERIFIER_EZKL_TOKEN" ]; then
|
||||
echo "❌ EVM_VERIFIER_EZKL_TOKEN is empty – check repo/org secrets" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# For libgit2 (what Cargo uses internally)
|
||||
git config --global credential.helper store
|
||||
echo "https://oauth2:${EVM_VERIFIER_EZKL_TOKEN}@github.com" > ~/.git-credentials
|
||||
chmod 600 ~/.git-credentials
|
||||
|
||||
# Also set URL replacement with oauth2 format
|
||||
git config --global \
|
||||
url."https://oauth2:${EVM_VERIFIER_EZKL_TOKEN}@github.com/".insteadOf \
|
||||
"https://github.com/"
|
||||
env:
|
||||
EVM_VERIFIER_EZKL_TOKEN: ${{ secrets.EVM_VERIFIER_EZKL_TOKEN }}
|
||||
|
||||
- uses: actions-rs/toolchain@b2417cde72dcf67f306c0ae8e0828a81bf0b189f #v1.0.6
|
||||
with:
|
||||
toolchain: nightly-2025-02-17
|
||||
@@ -200,10 +326,31 @@ jobs:
|
||||
permissions:
|
||||
contents: read
|
||||
runs-on: ubuntu-latest-16-cores
|
||||
env:
|
||||
EVM_VERIFIER_EZKL_TOKEN: ${{ secrets.EVM_VERIFIER_EZKL_TOKEN }}
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 #v4.2.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
- name: Configure Git credentials
|
||||
run: |
|
||||
if [ -z "$EVM_VERIFIER_EZKL_TOKEN" ]; then
|
||||
echo "❌ EVM_VERIFIER_EZKL_TOKEN is empty – check repo/org secrets" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# For libgit2 (what Cargo uses internally)
|
||||
git config --global credential.helper store
|
||||
echo "https://oauth2:${EVM_VERIFIER_EZKL_TOKEN}@github.com" > ~/.git-credentials
|
||||
chmod 600 ~/.git-credentials
|
||||
|
||||
# Also set URL replacement with oauth2 format
|
||||
git config --global \
|
||||
url."https://oauth2:${EVM_VERIFIER_EZKL_TOKEN}@github.com/".insteadOf \
|
||||
"https://github.com/"
|
||||
env:
|
||||
EVM_VERIFIER_EZKL_TOKEN: ${{ secrets.EVM_VERIFIER_EZKL_TOKEN }}
|
||||
|
||||
- uses: actions-rs/toolchain@b2417cde72dcf67f306c0ae8e0828a81bf0b189f #v1.0.6
|
||||
with:
|
||||
toolchain: nightly-2025-02-17
|
||||
@@ -219,11 +366,32 @@ jobs:
|
||||
wasm32-tests:
|
||||
permissions:
|
||||
contents: read
|
||||
runs-on: non-gpu
|
||||
runs-on: ubuntu-latest-64-cores
|
||||
env:
|
||||
EVM_VERIFIER_EZKL_TOKEN: ${{ secrets.EVM_VERIFIER_EZKL_TOKEN }}
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 #v4.2.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
- name: Configure Git credentials
|
||||
run: |
|
||||
if [ -z "$EVM_VERIFIER_EZKL_TOKEN" ]; then
|
||||
echo "❌ EVM_VERIFIER_EZKL_TOKEN is empty – check repo/org secrets" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# For libgit2 (what Cargo uses internally)
|
||||
git config --global credential.helper store
|
||||
echo "https://oauth2:${EVM_VERIFIER_EZKL_TOKEN}@github.com" > ~/.git-credentials
|
||||
chmod 600 ~/.git-credentials
|
||||
|
||||
# Also set URL replacement with oauth2 format
|
||||
git config --global \
|
||||
url."https://oauth2:${EVM_VERIFIER_EZKL_TOKEN}@github.com/".insteadOf \
|
||||
"https://github.com/"
|
||||
env:
|
||||
EVM_VERIFIER_EZKL_TOKEN: ${{ secrets.EVM_VERIFIER_EZKL_TOKEN }}
|
||||
|
||||
- uses: actions-rs/toolchain@b2417cde72dcf67f306c0ae8e0828a81bf0b189f #v1.0.6
|
||||
with:
|
||||
toolchain: nightly-2025-02-17
|
||||
@@ -240,19 +408,46 @@ jobs:
|
||||
run: rustup target add wasm32-unknown-unknown
|
||||
- name: Add rust-src
|
||||
run: rustup component add rust-src --toolchain nightly-2025-02-17-x86_64-unknown-linux-gnu
|
||||
- name: Create webdriver.json to disable timeouts
|
||||
run: |
|
||||
echo '{"args": ["--headless", "--disable-gpu", "--disable-dev-shm-usage", "--no-sandbox"]}' > webdriver.json
|
||||
- name: Run wasm verifier tests
|
||||
# on mac:
|
||||
# AR=/opt/homebrew/opt/llvm/bin/llvm-ar CC=/opt/homebrew/opt/llvm/bin/clang wasm-pack test --firefox --headless -- -Z build-std="panic_abort,std" --features web
|
||||
run: wasm-pack test --chrome --headless -- -Z build-std="panic_abort,std" --features web
|
||||
run: |
|
||||
ulimit -n 65536
|
||||
WASM_BINDGEN_TEST_THREADS=1 \
|
||||
WASM_BINDGEN_TEST_TIMEOUT=1800 \
|
||||
CHROMEDRIVER_ARGS="--log-level=INFO" \
|
||||
wasm-pack test --chrome --headless -- -Z build-std="panic_abort,std" --features web -- --nocapture
|
||||
|
||||
mock-proving-tests:
|
||||
permissions:
|
||||
contents: read
|
||||
runs-on: non-gpu
|
||||
env:
|
||||
EVM_VERIFIER_EZKL_TOKEN: ${{ secrets.EVM_VERIFIER_EZKL_TOKEN }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 #v4.2.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
- name: Configure Git credentials
|
||||
run: |
|
||||
if [ -z "$EVM_VERIFIER_EZKL_TOKEN" ]; then
|
||||
echo "❌ EVM_VERIFIER_EZKL_TOKEN is empty – check repo/org secrets" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# For libgit2 (what Cargo uses internally)
|
||||
git config --global credential.helper store
|
||||
echo "https://oauth2:${EVM_VERIFIER_EZKL_TOKEN}@github.com" > ~/.git-credentials
|
||||
chmod 600 ~/.git-credentials
|
||||
|
||||
# Also set URL replacement with oauth2 format
|
||||
git config --global \
|
||||
url."https://oauth2:${EVM_VERIFIER_EZKL_TOKEN}@github.com/".insteadOf \
|
||||
"https://github.com/"
|
||||
env:
|
||||
EVM_VERIFIER_EZKL_TOKEN: ${{ secrets.EVM_VERIFIER_EZKL_TOKEN }}
|
||||
- uses: actions-rs/toolchain@b2417cde72dcf67f306c0ae8e0828a81bf0b189f #v1.0.6
|
||||
with:
|
||||
toolchain: nightly-2025-02-17
|
||||
@@ -315,11 +510,32 @@ jobs:
|
||||
permissions:
|
||||
contents: read
|
||||
runs-on: non-gpu
|
||||
needs: [build, library-tests, docs, python-tests, python-integration-tests]
|
||||
# needs: [build, library-tests, docs, python-tests, python-integration-tests]
|
||||
env:
|
||||
EVM_VERIFIER_EZKL_TOKEN: ${{ secrets.EVM_VERIFIER_EZKL_TOKEN }}
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 #v4.2.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
- name: Configure Git credentials
|
||||
run: |
|
||||
if [ -z "$EVM_VERIFIER_EZKL_TOKEN" ]; then
|
||||
echo "❌ EVM_VERIFIER_EZKL_TOKEN is empty – check repo/org secrets" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# For libgit2 (what Cargo uses internally)
|
||||
git config --global credential.helper store
|
||||
echo "https://oauth2:${EVM_VERIFIER_EZKL_TOKEN}@github.com" > ~/.git-credentials
|
||||
chmod 600 ~/.git-credentials
|
||||
|
||||
# Also set URL replacement with oauth2 format
|
||||
git config --global \
|
||||
url."https://oauth2:${EVM_VERIFIER_EZKL_TOKEN}@github.com/".insteadOf \
|
||||
"https://github.com/"
|
||||
env:
|
||||
EVM_VERIFIER_EZKL_TOKEN: ${{ secrets.EVM_VERIFIER_EZKL_TOKEN }}
|
||||
|
||||
- uses: actions-rs/toolchain@b2417cde72dcf67f306c0ae8e0828a81bf0b189f #v1.0.6
|
||||
with:
|
||||
toolchain: nightly-2025-02-17
|
||||
@@ -343,49 +559,26 @@ jobs:
|
||||
cache: "pnpm"
|
||||
- name: "Add rust-src"
|
||||
run: rustup component add rust-src --toolchain nightly-2025-02-17-x86_64-unknown-linux-gnu
|
||||
- name: Install dependencies for js tests and in-browser-evm-verifier package
|
||||
- name: Install dependencies for js tests and package
|
||||
run: |
|
||||
pnpm install --frozen-lockfile
|
||||
pnpm install --dir ./in-browser-evm-verifier --frozen-lockfile
|
||||
env:
|
||||
CI: false
|
||||
NODE_ENV: development
|
||||
- name: Build wasm package for nodejs target.
|
||||
run: |
|
||||
wasm-pack build --target nodejs --out-dir ./in-browser-evm-verifier/nodejs . -- -Z build-std="panic_abort,std"
|
||||
- name: Build @ezkljs/verify package
|
||||
run: |
|
||||
cd in-browser-evm-verifier
|
||||
pnpm build:commonjs
|
||||
cd ..
|
||||
# - name: Install solc
|
||||
# run: (hash svm 2>/dev/null || cargo install svm-rs) && svm install 0.8.20 && solc --version
|
||||
- name: Install Anvil
|
||||
run: cargo install --git https://github.com/foundry-rs/foundry --rev 62cdea8ff9e6efef011f77e295823b5f2dbeb3a1 --locked anvil --force
|
||||
- name: Build wasm package for nodejs target.
|
||||
run: |
|
||||
wasm-pack build --target nodejs --out-dir ./tests/wasm/nodejs . -- -Z build-std="panic_abort,std"
|
||||
- name: KZG prove and verify tests (EVM)
|
||||
run: cargo nextest run --verbose "tests_evm::kzg_evm_prove_and_verify_::" --test-threads 1
|
||||
- name: KZG prove and verify tests (EVM + reusable verifier + col-overflow)
|
||||
run: cargo nextest run --verbose tests_evm::kzg_evm_prove_and_verify_reusable_verifier --test-threads 1
|
||||
run: cargo nextest run --verbose tests_evm::kzg_evm_prove_and_verify_reusable_verifier --features reusable-verifier --test-threads 1
|
||||
- name: KZG prove and verify tests (EVM + kzg all)
|
||||
run: cargo nextest run --verbose tests_evm::kzg_evm_kzg_all_prove_and_verify --test-threads 1
|
||||
- name: KZG prove and verify tests (EVM + kzg inputs)
|
||||
run: cargo nextest run --verbose tests_evm::kzg_evm_kzg_input_prove_and_verify --test-threads 1
|
||||
- name: KZG prove and verify tests (EVM + kzg params)
|
||||
run: cargo nextest run --verbose tests_evm::kzg_evm_kzg_params_prove_and_verify --test-threads 1
|
||||
- name: KZG prove and verify tests (EVM + on chain inputs)
|
||||
run: cargo nextest run --verbose tests_evm::kzg_evm_on_chain_input_prove_and_verify --test-threads 1
|
||||
- name: KZG prove and verify tests (EVM + on chain outputs)
|
||||
run: cargo nextest run --verbose tests_evm::kzg_evm_on_chain_output_prove_and_verify --test-threads 1
|
||||
- name: KZG prove and verify tests (EVM + on chain inputs & outputs)
|
||||
run: cargo nextest run --verbose tests_evm::kzg_evm_on_chain_input_output_prove_and_verify --test-threads 1
|
||||
- name: KZG prove and verify tests (EVM + on chain inputs & kzg outputs + params)
|
||||
run: cargo nextest run --verbose tests_evm::kzg_evm_on_chain_input_kzg_output_kzg_params_prove_and_verify --test-threads 1
|
||||
- name: KZG prove and verify tests (EVM + on chain outputs & kzg inputs + params)
|
||||
run: cargo nextest run --verbose tests_evm::kzg_evm_on_chain_output_kzg_input_kzg_params_prove_and_verify --test-threads 1
|
||||
- name: KZG prove and verify tests (EVM + on chain all kzg)
|
||||
run: cargo nextest run --verbose tests_evm::kzg_evm_on_chain_all_kzg_params_prove_and_verify --test-threads 1
|
||||
- name: KZG prove and verify tests (EVM + on chain inputs & outputs hashes)
|
||||
run: cargo nextest run --verbose tests_evm::kzg_evm_on_chain_input_output_hashed_prove_and_verify --test-threads 1
|
||||
- name: KZG prove and verify tests (EVM)
|
||||
run: cargo nextest run --verbose tests_evm::kzg_evm_prove_and_verify --test-threads 1
|
||||
- name: KZG prove and verify tests (EVM + hashed inputs)
|
||||
run: cargo nextest run --verbose tests_evm::kzg_evm_hashed_input_prove_and_verify --test-threads 1
|
||||
- name: KZG prove and verify tests (EVM + hashed params)
|
||||
@@ -432,10 +625,31 @@ jobs:
|
||||
contents: read
|
||||
runs-on: non-gpu
|
||||
needs: [build, library-tests, docs]
|
||||
env:
|
||||
EVM_VERIFIER_EZKL_TOKEN: ${{ secrets.EVM_VERIFIER_EZKL_TOKEN }}
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 #v4.2.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
- name: Configure Git credentials
|
||||
run: |
|
||||
if [ -z "$EVM_VERIFIER_EZKL_TOKEN" ]; then
|
||||
echo "❌ EVM_VERIFIER_EZKL_TOKEN is empty – check repo/org secrets" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# For libgit2 (what Cargo uses internally)
|
||||
git config --global credential.helper store
|
||||
echo "https://oauth2:${EVM_VERIFIER_EZKL_TOKEN}@github.com" > ~/.git-credentials
|
||||
chmod 600 ~/.git-credentials
|
||||
|
||||
# Also set URL replacement with oauth2 format
|
||||
git config --global \
|
||||
url."https://oauth2:${EVM_VERIFIER_EZKL_TOKEN}@github.com/".insteadOf \
|
||||
"https://github.com/"
|
||||
env:
|
||||
EVM_VERIFIER_EZKL_TOKEN: ${{ secrets.EVM_VERIFIER_EZKL_TOKEN }}
|
||||
|
||||
- uses: actions-rs/toolchain@b2417cde72dcf67f306c0ae8e0828a81bf0b189f #v1.0.6
|
||||
with:
|
||||
toolchain: nightly-2025-02-17
|
||||
@@ -548,10 +762,31 @@ jobs:
|
||||
contents: read
|
||||
runs-on: self-hosted
|
||||
needs: [build, library-tests, docs, python-tests, python-integration-tests]
|
||||
env:
|
||||
EVM_VERIFIER_EZKL_TOKEN: ${{ secrets.EVM_VERIFIER_EZKL_TOKEN }}
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 #v4.2.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
- name: Configure Git credentials
|
||||
run: |
|
||||
if [ -z "$EVM_VERIFIER_EZKL_TOKEN" ]; then
|
||||
echo "❌ EVM_VERIFIER_EZKL_TOKEN is empty – check repo/org secrets" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# For libgit2 (what Cargo uses internally)
|
||||
git config --global credential.helper store
|
||||
echo "https://oauth2:${EVM_VERIFIER_EZKL_TOKEN}@github.com" > ~/.git-credentials
|
||||
chmod 600 ~/.git-credentials
|
||||
|
||||
# Also set URL replacement with oauth2 format
|
||||
git config --global \
|
||||
url."https://oauth2:${EVM_VERIFIER_EZKL_TOKEN}@github.com/".insteadOf \
|
||||
"https://github.com/"
|
||||
env:
|
||||
EVM_VERIFIER_EZKL_TOKEN: ${{ secrets.EVM_VERIFIER_EZKL_TOKEN }}
|
||||
|
||||
- uses: dtolnay/rust-toolchain@4f94fbe7e03939b0e674bcc9ca609a16088f63ff #nightly branch, TODO: update when required
|
||||
with:
|
||||
toolchain: nightly-2025-02-17
|
||||
@@ -589,10 +824,31 @@ jobs:
|
||||
contents: read
|
||||
runs-on: large-self-hosted
|
||||
needs: [build, library-tests, docs, python-tests, python-integration-tests]
|
||||
env:
|
||||
EVM_VERIFIER_EZKL_TOKEN: ${{ secrets.EVM_VERIFIER_EZKL_TOKEN }}
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 #v4.2.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
- name: Configure Git credentials
|
||||
run: |
|
||||
if [ -z "$EVM_VERIFIER_EZKL_TOKEN" ]; then
|
||||
echo "❌ EVM_VERIFIER_EZKL_TOKEN is empty – check repo/org secrets" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# For libgit2 (what Cargo uses internally)
|
||||
git config --global credential.helper store
|
||||
echo "https://oauth2:${EVM_VERIFIER_EZKL_TOKEN}@github.com" > ~/.git-credentials
|
||||
chmod 600 ~/.git-credentials
|
||||
|
||||
# Also set URL replacement with oauth2 format
|
||||
git config --global \
|
||||
url."https://oauth2:${EVM_VERIFIER_EZKL_TOKEN}@github.com/".insteadOf \
|
||||
"https://github.com/"
|
||||
env:
|
||||
EVM_VERIFIER_EZKL_TOKEN: ${{ secrets.EVM_VERIFIER_EZKL_TOKEN }}
|
||||
|
||||
- uses: actions-rs/toolchain@b2417cde72dcf67f306c0ae8e0828a81bf0b189f #v1.0.6
|
||||
with:
|
||||
toolchain: nightly-2025-02-17
|
||||
@@ -610,10 +866,31 @@ jobs:
|
||||
contents: read
|
||||
runs-on: large-self-hosted
|
||||
needs: [build, library-tests, docs, python-tests, python-integration-tests]
|
||||
env:
|
||||
EVM_VERIFIER_EZKL_TOKEN: ${{ secrets.EVM_VERIFIER_EZKL_TOKEN }}
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 #v4.2.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
- name: Configure Git credentials
|
||||
run: |
|
||||
if [ -z "$EVM_VERIFIER_EZKL_TOKEN" ]; then
|
||||
echo "❌ EVM_VERIFIER_EZKL_TOKEN is empty – check repo/org secrets" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# For libgit2 (what Cargo uses internally)
|
||||
git config --global credential.helper store
|
||||
echo "https://oauth2:${EVM_VERIFIER_EZKL_TOKEN}@github.com" > ~/.git-credentials
|
||||
chmod 600 ~/.git-credentials
|
||||
|
||||
# Also set URL replacement with oauth2 format
|
||||
git config --global \
|
||||
url."https://oauth2:${EVM_VERIFIER_EZKL_TOKEN}@github.com/".insteadOf \
|
||||
"https://github.com/"
|
||||
env:
|
||||
EVM_VERIFIER_EZKL_TOKEN: ${{ secrets.EVM_VERIFIER_EZKL_TOKEN }}
|
||||
|
||||
- uses: actions-rs/toolchain@b2417cde72dcf67f306c0ae8e0828a81bf0b189f #v1.0.6
|
||||
with:
|
||||
toolchain: nightly-2025-02-17
|
||||
@@ -635,10 +912,31 @@ jobs:
|
||||
contents: read
|
||||
runs-on: ubuntu-latest-32-cores
|
||||
needs: [build, library-tests, docs]
|
||||
env:
|
||||
EVM_VERIFIER_EZKL_TOKEN: ${{ secrets.EVM_VERIFIER_EZKL_TOKEN }}
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 #v4.2.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
- name: Configure Git credentials
|
||||
run: |
|
||||
if [ -z "$EVM_VERIFIER_EZKL_TOKEN" ]; then
|
||||
echo "❌ EVM_VERIFIER_EZKL_TOKEN is empty – check repo/org secrets" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# For libgit2 (what Cargo uses internally)
|
||||
git config --global credential.helper store
|
||||
echo "https://oauth2:${EVM_VERIFIER_EZKL_TOKEN}@github.com" > ~/.git-credentials
|
||||
chmod 600 ~/.git-credentials
|
||||
|
||||
# Also set URL replacement with oauth2 format
|
||||
git config --global \
|
||||
url."https://oauth2:${EVM_VERIFIER_EZKL_TOKEN}@github.com/".insteadOf \
|
||||
"https://github.com/"
|
||||
env:
|
||||
EVM_VERIFIER_EZKL_TOKEN: ${{ secrets.EVM_VERIFIER_EZKL_TOKEN }}
|
||||
|
||||
- uses: actions-rs/toolchain@b2417cde72dcf67f306c0ae8e0828a81bf0b189f #v1.0.6
|
||||
with:
|
||||
toolchain: nightly-2025-02-17
|
||||
@@ -656,10 +954,31 @@ jobs:
|
||||
contents: read
|
||||
runs-on: non-gpu
|
||||
needs: [build, library-tests, docs]
|
||||
env:
|
||||
EVM_VERIFIER_EZKL_TOKEN: ${{ secrets.EVM_VERIFIER_EZKL_TOKEN }}
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 #v4.2.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
- name: Configure Git credentials
|
||||
run: |
|
||||
if [ -z "$EVM_VERIFIER_EZKL_TOKEN" ]; then
|
||||
echo "❌ EVM_VERIFIER_EZKL_TOKEN is empty – check repo/org secrets" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# For libgit2 (what Cargo uses internally)
|
||||
git config --global credential.helper store
|
||||
echo "https://oauth2:${EVM_VERIFIER_EZKL_TOKEN}@github.com" > ~/.git-credentials
|
||||
chmod 600 ~/.git-credentials
|
||||
|
||||
# Also set URL replacement with oauth2 format
|
||||
git config --global \
|
||||
url."https://oauth2:${EVM_VERIFIER_EZKL_TOKEN}@github.com/".insteadOf \
|
||||
"https://github.com/"
|
||||
env:
|
||||
EVM_VERIFIER_EZKL_TOKEN: ${{ secrets.EVM_VERIFIER_EZKL_TOKEN }}
|
||||
|
||||
- uses: actions/setup-python@b64ffcaf5b410884ad320a9cfac8866006a109aa #v4.8.0
|
||||
with:
|
||||
python-version: "3.12"
|
||||
@@ -677,7 +996,7 @@ jobs:
|
||||
- name: Install Anvil
|
||||
run: cargo install --git https://github.com/foundry-rs/foundry --rev 62cdea8ff9e6efef011f77e295823b5f2dbeb3a1 --locked anvil --force
|
||||
- name: Build python ezkl
|
||||
run: source .env/bin/activate; unset CONDA_PREFIX; maturin develop --features python-bindings --profile=test-runs
|
||||
run: source .env/bin/activate; unset CONDA_PREFIX; maturin develop --features python-bindings,reusable-verifier --profile=test-runs
|
||||
- name: Run pytest
|
||||
run: source .env/bin/activate; pip install pytest-asyncio; pytest -vv
|
||||
|
||||
@@ -686,10 +1005,31 @@ jobs:
|
||||
contents: read
|
||||
runs-on: non-gpu
|
||||
needs: [build, library-tests, docs, python-tests, python-integration-tests]
|
||||
env:
|
||||
EVM_VERIFIER_EZKL_TOKEN: ${{ secrets.EVM_VERIFIER_EZKL_TOKEN }}
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 #v4.2.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
- name: Configure Git credentials
|
||||
run: |
|
||||
if [ -z "$EVM_VERIFIER_EZKL_TOKEN" ]; then
|
||||
echo "❌ EVM_VERIFIER_EZKL_TOKEN is empty – check repo/org secrets" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# For libgit2 (what Cargo uses internally)
|
||||
git config --global credential.helper store
|
||||
echo "https://oauth2:${EVM_VERIFIER_EZKL_TOKEN}@github.com" > ~/.git-credentials
|
||||
chmod 600 ~/.git-credentials
|
||||
|
||||
# Also set URL replacement with oauth2 format
|
||||
git config --global \
|
||||
url."https://oauth2:${EVM_VERIFIER_EZKL_TOKEN}@github.com/".insteadOf \
|
||||
"https://github.com/"
|
||||
env:
|
||||
EVM_VERIFIER_EZKL_TOKEN: ${{ secrets.EVM_VERIFIER_EZKL_TOKEN }}
|
||||
|
||||
- uses: actions/setup-python@b64ffcaf5b410884ad320a9cfac8866006a109aa #v4.8.0
|
||||
with:
|
||||
python-version: "3.12"
|
||||
@@ -705,7 +1045,7 @@ jobs:
|
||||
- name: Setup Virtual Env and Install python dependencies
|
||||
run: python -m venv .env --clear; source .env/bin/activate; pip install -r requirements.txt;
|
||||
- name: Build python ezkl
|
||||
run: source .env/bin/activate; unset CONDA_PREFIX; maturin develop --features python-bindings --profile=test-runs
|
||||
run: source .env/bin/activate; unset CONDA_PREFIX; maturin develop --features python-bindings,reusable-verifier --profile=test-runs
|
||||
- name: Public inputs
|
||||
run: source .env/bin/activate; cargo nextest run --verbose tests::accuracy_measurement_public_inputs_
|
||||
- name: fixed params
|
||||
@@ -719,10 +1059,31 @@ jobs:
|
||||
permissions:
|
||||
contents: read
|
||||
runs-on: large-self-hosted
|
||||
env:
|
||||
EVM_VERIFIER_EZKL_TOKEN: ${{ secrets.EVM_VERIFIER_EZKL_TOKEN }}
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 #v4.2.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
- name: Configure Git credentials
|
||||
run: |
|
||||
if [ -z "$EVM_VERIFIER_EZKL_TOKEN" ]; then
|
||||
echo "❌ EVM_VERIFIER_EZKL_TOKEN is empty – check repo/org secrets" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# For libgit2 (what Cargo uses internally)
|
||||
git config --global credential.helper store
|
||||
echo "https://oauth2:${EVM_VERIFIER_EZKL_TOKEN}@github.com" > ~/.git-credentials
|
||||
chmod 600 ~/.git-credentials
|
||||
|
||||
# Also set URL replacement with oauth2 format
|
||||
git config --global \
|
||||
url."https://oauth2:${EVM_VERIFIER_EZKL_TOKEN}@github.com/".insteadOf \
|
||||
"https://github.com/"
|
||||
env:
|
||||
EVM_VERIFIER_EZKL_TOKEN: ${{ secrets.EVM_VERIFIER_EZKL_TOKEN }}
|
||||
|
||||
- uses: actions/setup-python@b64ffcaf5b410884ad320a9cfac8866006a109aa #v4.8.0
|
||||
with:
|
||||
python-version: "3.11"
|
||||
@@ -744,7 +1105,7 @@ jobs:
|
||||
- name: Setup Virtual Env and Install python dependencies
|
||||
run: python -m venv .env --clear; source .env/bin/activate; pip install -r requirements.txt; python -m ensurepip --upgrade
|
||||
- name: Build python ezkl
|
||||
run: source .env/bin/activate; unset CONDA_PREFIX; maturin develop --features python-bindings --profile=test-runs
|
||||
run: source .env/bin/activate; unset CONDA_PREFIX; maturin develop --features python-bindings,reusable-verifier --profile=test-runs
|
||||
- name: Cat and Dog notebook
|
||||
run: source .env/bin/activate; cargo nextest run py_tests::tests::cat_and_dog_notebook_
|
||||
- name: All notebooks
|
||||
@@ -769,16 +1130,39 @@ jobs:
|
||||
- name: NBEATS tutorial
|
||||
run: source .env/bin/activate; cargo nextest run py_tests::tests::nbeats_
|
||||
# - name: Reusable verifier tutorial
|
||||
# run: source .env/bin/activate; cargo nextest run py_tests::tests::reusable_
|
||||
# run: source .env/bin/activate; cargo nextest run py_tests::tests::reusable_verifier_ --no-capture
|
||||
- name: Reusable verifier tutorial
|
||||
run: source .env/bin/activate; cargo nextest run py_tests::tests::reusable_verifier_ --no-capture --test-threads 1
|
||||
|
||||
ios-integration-tests:
|
||||
permissions:
|
||||
contents: read
|
||||
runs-on: macos-latest
|
||||
env:
|
||||
EVM_VERIFIER_EZKL_TOKEN: ${{ secrets.EVM_VERIFIER_EZKL_TOKEN }}
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 #v4.2.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
- name: Configure Git credentials
|
||||
run: |
|
||||
if [ -z "$EVM_VERIFIER_EZKL_TOKEN" ]; then
|
||||
echo "❌ EVM_VERIFIER_EZKL_TOKEN is empty – check repo/org secrets" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# For libgit2 (what Cargo uses internally)
|
||||
git config --global credential.helper store
|
||||
echo "https://oauth2:${EVM_VERIFIER_EZKL_TOKEN}@github.com" > ~/.git-credentials
|
||||
chmod 600 ~/.git-credentials
|
||||
|
||||
# Also set URL replacement with oauth2 format
|
||||
git config --global \
|
||||
url."https://oauth2:${EVM_VERIFIER_EZKL_TOKEN}@github.com/".insteadOf \
|
||||
"https://github.com/"
|
||||
env:
|
||||
EVM_VERIFIER_EZKL_TOKEN: ${{ secrets.EVM_VERIFIER_EZKL_TOKEN }}
|
||||
|
||||
- uses: actions-rs/toolchain@b2417cde72dcf67f306c0ae8e0828a81bf0b189f #v1.0.6
|
||||
with:
|
||||
toolchain: nightly-2025-02-17
|
||||
@@ -797,10 +1181,31 @@ jobs:
|
||||
runs-on: macos-latest
|
||||
needs: [ios-integration-tests]
|
||||
|
||||
env:
|
||||
EVM_VERIFIER_EZKL_TOKEN: ${{ secrets.EVM_VERIFIER_EZKL_TOKEN }}
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 #v4.2.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
- name: Configure Git credentials
|
||||
run: |
|
||||
if [ -z "$EVM_VERIFIER_EZKL_TOKEN" ]; then
|
||||
echo "❌ EVM_VERIFIER_EZKL_TOKEN is empty – check repo/org secrets" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# For libgit2 (what Cargo uses internally)
|
||||
git config --global credential.helper store
|
||||
echo "https://oauth2:${EVM_VERIFIER_EZKL_TOKEN}@github.com" > ~/.git-credentials
|
||||
chmod 600 ~/.git-credentials
|
||||
|
||||
# Also set URL replacement with oauth2 format
|
||||
git config --global \
|
||||
url."https://oauth2:${EVM_VERIFIER_EZKL_TOKEN}@github.com/".insteadOf \
|
||||
"https://github.com/"
|
||||
env:
|
||||
EVM_VERIFIER_EZKL_TOKEN: ${{ secrets.EVM_VERIFIER_EZKL_TOKEN }}
|
||||
|
||||
- uses: actions-rs/toolchain@b2417cde72dcf67f306c0ae8e0828a81bf0b189f #v1.0.6
|
||||
with:
|
||||
toolchain: nightly-2025-02-17
|
||||
|
||||
2
.gitignore
vendored
2
.gitignore
vendored
@@ -52,3 +52,5 @@ docs/python/build
|
||||
!tests/assets/vk_aggr.key
|
||||
cache
|
||||
out
|
||||
!tests/assets/wasm.code
|
||||
!tests/assets/wasm.sol
|
||||
145
Cargo.lock
generated
145
Cargo.lock
generated
@@ -126,6 +126,27 @@ dependencies = [
|
||||
"winnow 0.6.26",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "alloy-eip2930"
|
||||
version = "0.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0069cf0642457f87a01a014f6dc29d5d893cd4fd8fddf0c3cdfad1bb3ebafc41"
|
||||
dependencies = [
|
||||
"alloy-primitives 0.8.25",
|
||||
"alloy-rlp",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "alloy-eip7702"
|
||||
version = "0.1.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ea59dc42102bc9a1905dc57901edc6dd48b9f38115df86c7d252acba70d71d04"
|
||||
dependencies = [
|
||||
"alloy-primitives 0.8.25",
|
||||
"alloy-rlp",
|
||||
"k256",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "alloy-eips"
|
||||
version = "0.1.0"
|
||||
@@ -217,7 +238,7 @@ dependencies = [
|
||||
"bytes",
|
||||
"cfg-if",
|
||||
"const-hex",
|
||||
"derive_more",
|
||||
"derive_more 0.99.20",
|
||||
"hex-literal",
|
||||
"itoa",
|
||||
"ruint",
|
||||
@@ -234,7 +255,7 @@ dependencies = [
|
||||
"bytes",
|
||||
"cfg-if",
|
||||
"const-hex",
|
||||
"derive_more",
|
||||
"derive_more 0.99.20",
|
||||
"getrandom 0.2.16",
|
||||
"hex-literal",
|
||||
"itoa",
|
||||
@@ -247,6 +268,25 @@ dependencies = [
|
||||
"tiny-keccak",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "alloy-primitives"
|
||||
version = "0.8.25"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8c77490fe91a0ce933a1f219029521f20fc28c2c0ca95d53fa4da9c00b8d9d4e"
|
||||
dependencies = [
|
||||
"alloy-rlp",
|
||||
"bytes",
|
||||
"cfg-if",
|
||||
"const-hex",
|
||||
"derive_more 2.0.1",
|
||||
"hashbrown 0.15.2",
|
||||
"itoa",
|
||||
"k256",
|
||||
"paste",
|
||||
"ruint",
|
||||
"tiny-keccak",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "alloy-provider"
|
||||
version = "0.1.0"
|
||||
@@ -852,6 +892,16 @@ dependencies = [
|
||||
"winapi",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "aurora-engine-modexp"
|
||||
version = "1.2.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "518bc5745a6264b5fd7b09dffb9667e400ee9e2bbe18555fac75e1fe9afa0df9"
|
||||
dependencies = [
|
||||
"hex",
|
||||
"num",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "auto_impl"
|
||||
version = "1.3.0"
|
||||
@@ -1693,6 +1743,27 @@ dependencies = [
|
||||
"syn 2.0.101",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "derive_more"
|
||||
version = "2.0.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "093242cf7570c207c83073cf82f79706fe7b8317e98620a47d5be7c3d8497678"
|
||||
dependencies = [
|
||||
"derive_more-impl",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "derive_more-impl"
|
||||
version = "2.0.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "bda628edc44c4bb645fbe0f758797143e4e07926f7ebf4e9bdfbd3d2ce621df3"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn 2.0.101",
|
||||
"unicode-xid",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "digest"
|
||||
version = "0.9.0"
|
||||
@@ -2514,13 +2585,15 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "halo2_solidity_verifier"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/alexander-camuto/halo2-solidity-verifier#3d237d566ca6714c9ee6fcf3f2dcefffa79f914c"
|
||||
source = "git+https://github.com/zkonduit/ezkl-verifier?branch=main#956b77a89057b184d17c37e9ceaa75091e5caba2"
|
||||
dependencies = [
|
||||
"askama",
|
||||
"blake2b_simd",
|
||||
"halo2_proofs",
|
||||
"hex",
|
||||
"itertools 0.11.0",
|
||||
"regex",
|
||||
"revm 14.0.3",
|
||||
"ruint",
|
||||
"sha3 0.10.8",
|
||||
]
|
||||
@@ -4918,8 +4991,21 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "68f4ca8ae0345104523b4af1a8a7ea97cfa1865cdb7a7c25d23c1a18d9b48598"
|
||||
dependencies = [
|
||||
"auto_impl",
|
||||
"revm-interpreter",
|
||||
"revm-precompile",
|
||||
"revm-interpreter 1.3.0",
|
||||
"revm-precompile 2.2.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "revm"
|
||||
version = "14.0.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "641702b12847f9ed418d552f4fcabe536d867a2c980e96b6e7e25d7b992f929f"
|
||||
dependencies = [
|
||||
"auto_impl",
|
||||
"cfg-if",
|
||||
"dyn-clone",
|
||||
"revm-interpreter 10.0.3",
|
||||
"revm-precompile 11.0.3",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -4928,7 +5014,16 @@ version = "1.3.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f959cafdf64a7f89b014fa73dc2325001cf654b3d9400260b212d19a2ebe3da0"
|
||||
dependencies = [
|
||||
"revm-primitives",
|
||||
"revm-primitives 1.3.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "revm-interpreter"
|
||||
version = "10.0.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2e5e14002afae20b5bf1566f22316122f42f57517000e559c55b25bf7a49cba2"
|
||||
dependencies = [
|
||||
"revm-primitives 10.0.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -4940,7 +5035,23 @@ dependencies = [
|
||||
"k256",
|
||||
"num",
|
||||
"once_cell",
|
||||
"revm-primitives",
|
||||
"revm-primitives 1.3.0",
|
||||
"ripemd",
|
||||
"sha2",
|
||||
"substrate-bn",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "revm-precompile"
|
||||
version = "11.0.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3198c06247e8d4ad0d1312591edf049b0de4ddffa9fecb625c318fd67db8639b"
|
||||
dependencies = [
|
||||
"aurora-engine-modexp",
|
||||
"cfg-if",
|
||||
"k256",
|
||||
"once_cell",
|
||||
"revm-primitives 10.0.0",
|
||||
"ripemd",
|
||||
"sha2",
|
||||
"substrate-bn",
|
||||
@@ -4962,6 +5073,24 @@ dependencies = [
|
||||
"hex",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "revm-primitives"
|
||||
version = "10.0.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6f1525851a03aff9a9d6a1d018b414d76252d6802ab54695b27093ecd7e7a101"
|
||||
dependencies = [
|
||||
"alloy-eip2930",
|
||||
"alloy-eip7702",
|
||||
"alloy-primitives 0.8.25",
|
||||
"auto_impl",
|
||||
"bitflags 2.9.0",
|
||||
"bitvec",
|
||||
"cfg-if",
|
||||
"dyn-clone",
|
||||
"enumn",
|
||||
"hex",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rfc6979"
|
||||
version = "0.4.0"
|
||||
@@ -5555,7 +5684,7 @@ dependencies = [
|
||||
"num-traits",
|
||||
"poseidon",
|
||||
"rand 0.8.5",
|
||||
"revm",
|
||||
"revm 3.5.0",
|
||||
"serde",
|
||||
"sha3 0.10.8",
|
||||
]
|
||||
|
||||
39
Cargo.toml
39
Cargo.toml
@@ -35,7 +35,9 @@ halo2_wrong_ecc = { git = "https://github.com/zkonduit/halo2wrong", branch = "ac
|
||||
snark-verifier = { git = "https://github.com/zkonduit/snark-verifier", branch = "ac/chunked-mv-lookup", features = [
|
||||
"derive_serde",
|
||||
] }
|
||||
halo2_solidity_verifier = { git = "https://github.com/alexander-camuto/halo2-solidity-verifier", optional = true }
|
||||
halo2_solidity_verifier = { git = "https://github.com/zkonduit/ezkl-verifier", branch = "main", optional = true, features = [
|
||||
"evm",
|
||||
] }
|
||||
maybe-rayon = { version = "0.1.1", default-features = false }
|
||||
bincode = { version = "1.3.3", default-features = false }
|
||||
unzip-n = "0.1.2"
|
||||
@@ -43,10 +45,12 @@ num = "0.4.1"
|
||||
tosubcommand = { git = "https://github.com/zkonduit/enum_to_subcommand", package = "tosubcommand", optional = true }
|
||||
semver = { version = "1.0.22", optional = true }
|
||||
|
||||
|
||||
[target.'cfg(not(target_arch = "wasm32"))'.dependencies]
|
||||
serde_json = { version = "1.0.97", features = ["float_roundtrip", "raw_value"] }
|
||||
|
||||
# evm related deps
|
||||
serde_json = { version = "1.0.97", features = ["float_roundtrip", "raw_value"] }
|
||||
|
||||
alloy = { git = "https://github.com/alloy-rs/alloy", version = "0.1.0", rev = "5fbf57bac99edef9d8475190109a7ea9fb7e5e83", features = [
|
||||
"provider-http",
|
||||
"signers",
|
||||
@@ -56,6 +60,7 @@ alloy = { git = "https://github.com/alloy-rs/alloy", version = "0.1.0", rev = "5
|
||||
"node-bindings",
|
||||
|
||||
], optional = true }
|
||||
|
||||
foundry-compilers = { version = "0.4.1", features = [
|
||||
"svm-solc",
|
||||
], optional = true }
|
||||
@@ -217,25 +222,29 @@ required-features = ["python-bindings"]
|
||||
[features]
|
||||
web = ["wasm-bindgen-rayon"]
|
||||
default = [
|
||||
"eth-mv-lookup",
|
||||
"ezkl",
|
||||
"mv-lookup",
|
||||
"precompute-coset",
|
||||
"no-banner",
|
||||
"parallel-poly-read",
|
||||
]
|
||||
onnx = ["dep:tract-onnx"]
|
||||
python-bindings = ["pyo3", "pyo3-log", "pyo3-async-runtimes", "pyo3-stub-gen"]
|
||||
ios-bindings = ["mv-lookup", "precompute-coset", "parallel-poly-read", "uniffi"]
|
||||
universal-bindings = [
|
||||
"uniffi",
|
||||
"mv-lookup",
|
||||
"precompute-coset",
|
||||
"parallel-poly-read",
|
||||
"solidity-verifier-mv-lookup",
|
||||
]
|
||||
logging = ["dep:colored", "dep:env_logger", "dep:chrono"]
|
||||
ios-bindings = ["universal-bindings"]
|
||||
ios-bindings-test = ["ios-bindings", "uniffi/bindgen-tests"]
|
||||
ezkl = [
|
||||
"onnx",
|
||||
"dep:colored",
|
||||
"dep:env_logger",
|
||||
"tabled/color",
|
||||
"serde_json/std",
|
||||
"colored_json",
|
||||
"dep:alloy",
|
||||
"dep:foundry-compilers",
|
||||
"dep:ethabi",
|
||||
"dep:indicatif",
|
||||
"dep:gag",
|
||||
@@ -246,20 +255,21 @@ ezkl = [
|
||||
"dep:chrono",
|
||||
"dep:sha256",
|
||||
"dep:clap_complete",
|
||||
"dep:halo2_solidity_verifier",
|
||||
"dep:semver",
|
||||
"dep:clap",
|
||||
"dep:tosubcommand",
|
||||
"logging",
|
||||
]
|
||||
eth = ["dep:alloy", "dep:foundry-compilers", "dep:ethabi"]
|
||||
solidity-verifier = ["dep:halo2_solidity_verifier"]
|
||||
solidity-verifier-mv-lookup = ["halo2_solidity_verifier/mv-lookup"]
|
||||
eth-mv-lookup = ["solidity-verifier-mv-lookup", "mv-lookup", "eth"]
|
||||
eth-original-lookup = ["eth", "solidity-verifier"]
|
||||
parallel-poly-read = [
|
||||
"halo2_proofs/circuit-params",
|
||||
"halo2_proofs/parallel-poly-read",
|
||||
]
|
||||
mv-lookup = [
|
||||
"halo2_proofs/mv-lookup",
|
||||
"snark-verifier/mv-lookup",
|
||||
"halo2_solidity_verifier/mv-lookup",
|
||||
]
|
||||
mv-lookup = ["halo2_proofs/mv-lookup", "snark-verifier/mv-lookup"]
|
||||
asm = ["halo2curves/asm", "halo2_proofs/asm"]
|
||||
precompute-coset = ["halo2_proofs/precompute-coset"]
|
||||
det-prove = []
|
||||
@@ -271,6 +281,7 @@ macos-metal = ["halo2_proofs/macos"]
|
||||
ios-metal = ["halo2_proofs/ios"]
|
||||
jemalloc = ["dep:jemallocator"]
|
||||
mimalloc = ["dep:mimalloc"]
|
||||
reusable-verifier = []
|
||||
|
||||
|
||||
[patch.crates-io]
|
||||
|
||||
@@ -76,11 +76,6 @@ For more details visit the [docs](https://docs.ezkl.xyz). The CLI is faster than
|
||||
|
||||
Build the auto-generated rust documentation and open the docs in your browser locally. `cargo doc --open`
|
||||
|
||||
#### In-browser EVM Verifier
|
||||
|
||||
As an alternative to running the native Halo2 verifier as a WASM binding in the browser, you can use the in-browser EVM verifier. The source code of which you can find in the `in-browser-evm-verifier` directory and a README with instructions on how to use it.
|
||||
|
||||
|
||||
### Building the Project 🔨
|
||||
|
||||
#### Rust CLI
|
||||
|
||||
@@ -1,312 +0,0 @@
|
||||
[
|
||||
{
|
||||
"inputs": [
|
||||
{
|
||||
"internalType": "address",
|
||||
"name": "_contractAddresses",
|
||||
"type": "address"
|
||||
},
|
||||
{
|
||||
"internalType": "bytes",
|
||||
"name": "_callData",
|
||||
"type": "bytes"
|
||||
},
|
||||
{
|
||||
"internalType": "uint256[]",
|
||||
"name": "_decimals",
|
||||
"type": "uint256[]"
|
||||
},
|
||||
{
|
||||
"internalType": "uint256[]",
|
||||
"name": "_bits",
|
||||
"type": "uint256[]"
|
||||
},
|
||||
{
|
||||
"internalType": "uint8",
|
||||
"name": "_instanceOffset",
|
||||
"type": "uint8"
|
||||
}
|
||||
],
|
||||
"stateMutability": "nonpayable",
|
||||
"type": "constructor"
|
||||
},
|
||||
{
|
||||
"inputs": [],
|
||||
"name": "HALF_ORDER",
|
||||
"outputs": [
|
||||
{
|
||||
"internalType": "uint256",
|
||||
"name": "",
|
||||
"type": "uint256"
|
||||
}
|
||||
],
|
||||
"stateMutability": "view",
|
||||
"type": "function"
|
||||
},
|
||||
{
|
||||
"inputs": [],
|
||||
"name": "ORDER",
|
||||
"outputs": [
|
||||
{
|
||||
"internalType": "uint256",
|
||||
"name": "",
|
||||
"type": "uint256"
|
||||
}
|
||||
],
|
||||
"stateMutability": "view",
|
||||
"type": "function"
|
||||
},
|
||||
{
|
||||
"inputs": [
|
||||
{
|
||||
"internalType": "uint256[]",
|
||||
"name": "instances",
|
||||
"type": "uint256[]"
|
||||
}
|
||||
],
|
||||
"name": "attestData",
|
||||
"outputs": [],
|
||||
"stateMutability": "view",
|
||||
"type": "function"
|
||||
},
|
||||
{
|
||||
"inputs": [],
|
||||
"name": "callData",
|
||||
"outputs": [
|
||||
{
|
||||
"internalType": "bytes",
|
||||
"name": "",
|
||||
"type": "bytes"
|
||||
}
|
||||
],
|
||||
"stateMutability": "view",
|
||||
"type": "function"
|
||||
},
|
||||
{
|
||||
"inputs": [],
|
||||
"name": "contractAddress",
|
||||
"outputs": [
|
||||
{
|
||||
"internalType": "address",
|
||||
"name": "",
|
||||
"type": "address"
|
||||
}
|
||||
],
|
||||
"stateMutability": "view",
|
||||
"type": "function"
|
||||
},
|
||||
{
|
||||
"inputs": [
|
||||
{
|
||||
"internalType": "bytes",
|
||||
"name": "encoded",
|
||||
"type": "bytes"
|
||||
}
|
||||
],
|
||||
"name": "getInstancesCalldata",
|
||||
"outputs": [
|
||||
{
|
||||
"internalType": "uint256[]",
|
||||
"name": "instances",
|
||||
"type": "uint256[]"
|
||||
}
|
||||
],
|
||||
"stateMutability": "pure",
|
||||
"type": "function"
|
||||
},
|
||||
{
|
||||
"inputs": [
|
||||
{
|
||||
"internalType": "bytes",
|
||||
"name": "encoded",
|
||||
"type": "bytes"
|
||||
}
|
||||
],
|
||||
"name": "getInstancesMemory",
|
||||
"outputs": [
|
||||
{
|
||||
"internalType": "uint256[]",
|
||||
"name": "instances",
|
||||
"type": "uint256[]"
|
||||
}
|
||||
],
|
||||
"stateMutability": "pure",
|
||||
"type": "function"
|
||||
},
|
||||
{
|
||||
"inputs": [
|
||||
{
|
||||
"internalType": "uint256",
|
||||
"name": "index",
|
||||
"type": "uint256"
|
||||
}
|
||||
],
|
||||
"name": "getScalars",
|
||||
"outputs": [
|
||||
{
|
||||
"components": [
|
||||
{
|
||||
"internalType": "uint256",
|
||||
"name": "decimals",
|
||||
"type": "uint256"
|
||||
},
|
||||
{
|
||||
"internalType": "uint256",
|
||||
"name": "bits",
|
||||
"type": "uint256"
|
||||
}
|
||||
],
|
||||
"internalType": "struct DataAttestation.Scalars",
|
||||
"name": "",
|
||||
"type": "tuple"
|
||||
}
|
||||
],
|
||||
"stateMutability": "view",
|
||||
"type": "function"
|
||||
},
|
||||
{
|
||||
"inputs": [],
|
||||
"name": "instanceOffset",
|
||||
"outputs": [
|
||||
{
|
||||
"internalType": "uint8",
|
||||
"name": "",
|
||||
"type": "uint8"
|
||||
}
|
||||
],
|
||||
"stateMutability": "view",
|
||||
"type": "function"
|
||||
},
|
||||
{
|
||||
"inputs": [
|
||||
{
|
||||
"internalType": "uint256",
|
||||
"name": "x",
|
||||
"type": "uint256"
|
||||
},
|
||||
{
|
||||
"internalType": "uint256",
|
||||
"name": "y",
|
||||
"type": "uint256"
|
||||
},
|
||||
{
|
||||
"internalType": "uint256",
|
||||
"name": "denominator",
|
||||
"type": "uint256"
|
||||
}
|
||||
],
|
||||
"name": "mulDiv",
|
||||
"outputs": [
|
||||
{
|
||||
"internalType": "uint256",
|
||||
"name": "result",
|
||||
"type": "uint256"
|
||||
}
|
||||
],
|
||||
"stateMutability": "pure",
|
||||
"type": "function"
|
||||
},
|
||||
{
|
||||
"inputs": [
|
||||
{
|
||||
"internalType": "int256",
|
||||
"name": "x",
|
||||
"type": "int256"
|
||||
},
|
||||
{
|
||||
"components": [
|
||||
{
|
||||
"internalType": "uint256",
|
||||
"name": "decimals",
|
||||
"type": "uint256"
|
||||
},
|
||||
{
|
||||
"internalType": "uint256",
|
||||
"name": "bits",
|
||||
"type": "uint256"
|
||||
}
|
||||
],
|
||||
"internalType": "struct DataAttestation.Scalars",
|
||||
"name": "_scalars",
|
||||
"type": "tuple"
|
||||
}
|
||||
],
|
||||
"name": "quantizeData",
|
||||
"outputs": [
|
||||
{
|
||||
"internalType": "int256",
|
||||
"name": "quantized_data",
|
||||
"type": "int256"
|
||||
}
|
||||
],
|
||||
"stateMutability": "pure",
|
||||
"type": "function"
|
||||
},
|
||||
{
|
||||
"inputs": [
|
||||
{
|
||||
"internalType": "address",
|
||||
"name": "target",
|
||||
"type": "address"
|
||||
},
|
||||
{
|
||||
"internalType": "bytes",
|
||||
"name": "data",
|
||||
"type": "bytes"
|
||||
}
|
||||
],
|
||||
"name": "staticCall",
|
||||
"outputs": [
|
||||
{
|
||||
"internalType": "bytes",
|
||||
"name": "",
|
||||
"type": "bytes"
|
||||
}
|
||||
],
|
||||
"stateMutability": "view",
|
||||
"type": "function"
|
||||
},
|
||||
{
|
||||
"inputs": [
|
||||
{
|
||||
"internalType": "int256",
|
||||
"name": "x",
|
||||
"type": "int256"
|
||||
}
|
||||
],
|
||||
"name": "toFieldElement",
|
||||
"outputs": [
|
||||
{
|
||||
"internalType": "uint256",
|
||||
"name": "field_element",
|
||||
"type": "uint256"
|
||||
}
|
||||
],
|
||||
"stateMutability": "pure",
|
||||
"type": "function"
|
||||
},
|
||||
{
|
||||
"inputs": [
|
||||
{
|
||||
"internalType": "address",
|
||||
"name": "verifier",
|
||||
"type": "address"
|
||||
},
|
||||
{
|
||||
"internalType": "bytes",
|
||||
"name": "encoded",
|
||||
"type": "bytes"
|
||||
}
|
||||
],
|
||||
"name": "verifyWithDataAttestation",
|
||||
"outputs": [
|
||||
{
|
||||
"internalType": "bool",
|
||||
"name": "",
|
||||
"type": "bool"
|
||||
}
|
||||
],
|
||||
"stateMutability": "view",
|
||||
"type": "function"
|
||||
}
|
||||
]
|
||||
@@ -1,98 +0,0 @@
|
||||
[
|
||||
{
|
||||
"inputs": [
|
||||
{
|
||||
"internalType": "int256[]",
|
||||
"name": "quantized_data",
|
||||
"type": "int256[]"
|
||||
}
|
||||
],
|
||||
"name": "check_is_valid_field_element",
|
||||
"outputs": [
|
||||
{
|
||||
"internalType": "uint256[]",
|
||||
"name": "output",
|
||||
"type": "uint256[]"
|
||||
}
|
||||
],
|
||||
"stateMutability": "pure",
|
||||
"type": "function"
|
||||
},
|
||||
{
|
||||
"inputs": [
|
||||
{
|
||||
"internalType": "bytes[]",
|
||||
"name": "data",
|
||||
"type": "bytes[]"
|
||||
},
|
||||
{
|
||||
"internalType": "uint256[]",
|
||||
"name": "decimals",
|
||||
"type": "uint256[]"
|
||||
},
|
||||
{
|
||||
"internalType": "uint256[]",
|
||||
"name": "scales",
|
||||
"type": "uint256[]"
|
||||
}
|
||||
],
|
||||
"name": "quantize_data_multi",
|
||||
"outputs": [
|
||||
{
|
||||
"internalType": "int256[]",
|
||||
"name": "quantized_data",
|
||||
"type": "int256[]"
|
||||
}
|
||||
],
|
||||
"stateMutability": "pure",
|
||||
"type": "function"
|
||||
},
|
||||
{
|
||||
"inputs": [
|
||||
{
|
||||
"internalType": "bytes",
|
||||
"name": "data",
|
||||
"type": "bytes"
|
||||
},
|
||||
{
|
||||
"internalType": "uint256",
|
||||
"name": "decimals",
|
||||
"type": "uint256"
|
||||
},
|
||||
{
|
||||
"internalType": "uint256[]",
|
||||
"name": "scales",
|
||||
"type": "uint256[]"
|
||||
}
|
||||
],
|
||||
"name": "quantize_data_single",
|
||||
"outputs": [
|
||||
{
|
||||
"internalType": "int256[]",
|
||||
"name": "quantized_data",
|
||||
"type": "int256[]"
|
||||
}
|
||||
],
|
||||
"stateMutability": "pure",
|
||||
"type": "function"
|
||||
},
|
||||
{
|
||||
"inputs": [
|
||||
{
|
||||
"internalType": "int64[]",
|
||||
"name": "quantized_data",
|
||||
"type": "int64[]"
|
||||
}
|
||||
],
|
||||
"name": "to_field_element",
|
||||
"outputs": [
|
||||
{
|
||||
"internalType": "uint256[]",
|
||||
"name": "output",
|
||||
"type": "uint256[]"
|
||||
}
|
||||
],
|
||||
"stateMutability": "pure",
|
||||
"type": "function"
|
||||
}
|
||||
]
|
||||
@@ -1,32 +0,0 @@
|
||||
[
|
||||
{
|
||||
"inputs": [
|
||||
{
|
||||
"internalType": "int256[]",
|
||||
"name": "_numbers",
|
||||
"type": "int256[]"
|
||||
}
|
||||
],
|
||||
"stateMutability": "nonpayable",
|
||||
"type": "constructor"
|
||||
},
|
||||
{
|
||||
"inputs": [
|
||||
{
|
||||
"internalType": "uint256",
|
||||
"name": "",
|
||||
"type": "uint256"
|
||||
}
|
||||
],
|
||||
"name": "arr",
|
||||
"outputs": [
|
||||
{
|
||||
"internalType": "int256",
|
||||
"name": "",
|
||||
"type": "int256"
|
||||
}
|
||||
],
|
||||
"stateMutability": "view",
|
||||
"type": "function"
|
||||
}
|
||||
]
|
||||
@@ -1,397 +0,0 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
pragma solidity ^0.8.20;
|
||||
contract LoadInstances {
|
||||
/**
|
||||
* @dev Parse the instances array from the Halo2Verifier encoded calldata.
|
||||
* @notice must pass encoded bytes from memory
|
||||
* @param encoded - verifier calldata
|
||||
*/
|
||||
function getInstancesMemory(
|
||||
bytes memory encoded
|
||||
) public pure returns (uint256[] memory instances) {
|
||||
bytes4 funcSig;
|
||||
uint256 instances_offset;
|
||||
uint256 instances_length;
|
||||
assembly {
|
||||
// fetch function sig. Either `verifyProof(bytes,uint256[])` or `verifyProof(address,bytes,uint256[])`
|
||||
funcSig := mload(add(encoded, 0x20))
|
||||
}
|
||||
if (funcSig == 0xaf83a18d) {
|
||||
instances_offset = 0x64;
|
||||
} else if (funcSig == 0x1e8e1e13) {
|
||||
instances_offset = 0x44;
|
||||
} else {
|
||||
revert("Invalid function signature");
|
||||
}
|
||||
assembly {
|
||||
// Fetch instances offset which is 4 + 32 + 32 bytes away from
|
||||
// start of encoded for `verifyProof(bytes,uint256[])`,
|
||||
// and 4 + 32 + 32 +32 away for `verifyProof(address,bytes,uint256[])`
|
||||
|
||||
instances_offset := mload(add(encoded, instances_offset))
|
||||
|
||||
instances_length := mload(add(add(encoded, 0x24), instances_offset))
|
||||
}
|
||||
instances = new uint256[](instances_length); // Allocate memory for the instances array.
|
||||
assembly {
|
||||
// Now instances points to the start of the array data
|
||||
// (right after the length field).
|
||||
for {
|
||||
let i := 0x20
|
||||
} lt(i, add(mul(instances_length, 0x20), 0x20)) {
|
||||
i := add(i, 0x20)
|
||||
} {
|
||||
mstore(
|
||||
add(instances, i),
|
||||
mload(add(add(encoded, add(i, 0x24)), instances_offset))
|
||||
)
|
||||
}
|
||||
}
|
||||
require(
|
||||
funcSig == 0xaf83a18d || funcSig == 0x1e8e1e13,
|
||||
"Invalid function signature"
|
||||
);
|
||||
}
|
||||
/**
|
||||
* @dev Parse the instances array from the Halo2Verifier encoded calldata.
|
||||
* @notice must pass encoded bytes from calldata
|
||||
* @param encoded - verifier calldata
|
||||
*/
|
||||
function getInstancesCalldata(
|
||||
bytes calldata encoded
|
||||
) public pure returns (uint256[] memory instances) {
|
||||
bytes4 funcSig;
|
||||
uint256 instances_offset;
|
||||
uint256 instances_length;
|
||||
assembly {
|
||||
// fetch function sig. Either `verifyProof(bytes,uint256[])` or `verifyProof(address,bytes,uint256[])`
|
||||
funcSig := calldataload(encoded.offset)
|
||||
}
|
||||
if (funcSig == 0xaf83a18d) {
|
||||
instances_offset = 0x44;
|
||||
} else if (funcSig == 0x1e8e1e13) {
|
||||
instances_offset = 0x24;
|
||||
} else {
|
||||
revert("Invalid function signature");
|
||||
}
|
||||
// We need to create a new assembly block in order for solidity
|
||||
// to cast the funcSig to a bytes4 type. Otherwise it will load the entire first 32 bytes of the calldata
|
||||
// within the block
|
||||
assembly {
|
||||
// Fetch instances offset which is 4 + 32 + 32 bytes away from
|
||||
// start of encoded for `verifyProof(bytes,uint256[])`,
|
||||
// and 4 + 32 + 32 +32 away for `verifyProof(address,bytes,uint256[])`
|
||||
|
||||
instances_offset := calldataload(
|
||||
add(encoded.offset, instances_offset)
|
||||
)
|
||||
|
||||
instances_length := calldataload(
|
||||
add(add(encoded.offset, 0x04), instances_offset)
|
||||
)
|
||||
}
|
||||
instances = new uint256[](instances_length); // Allocate memory for the instances array.
|
||||
assembly {
|
||||
// Now instances points to the start of the array data
|
||||
// (right after the length field).
|
||||
|
||||
for {
|
||||
let i := 0x20
|
||||
} lt(i, add(mul(instances_length, 0x20), 0x20)) {
|
||||
i := add(i, 0x20)
|
||||
} {
|
||||
mstore(
|
||||
add(instances, i),
|
||||
calldataload(
|
||||
add(add(encoded.offset, add(i, 0x04)), instances_offset)
|
||||
)
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// The kzg commitments of a given model, all aggregated into a single bytes array.
|
||||
// At solidity generation time, the commitments are hardcoded into the contract via the COMMITMENT_KZG constant.
|
||||
// It will be used to check that the proof commitments match the expected commitments.
|
||||
bytes constant COMMITMENT_KZG = hex"1234";
|
||||
|
||||
contract SwapProofCommitments {
|
||||
/**
|
||||
* @dev Swap the proof commitments
|
||||
* @notice must pass encoded bytes from memory
|
||||
* @param encoded - verifier calldata
|
||||
*/
|
||||
function checkKzgCommits(
|
||||
bytes calldata encoded
|
||||
) internal pure returns (bool equal) {
|
||||
bytes4 funcSig;
|
||||
uint256 proof_offset;
|
||||
uint256 proof_length;
|
||||
assembly {
|
||||
// fetch function sig. Either `verifyProof(bytes,uint256[])` or `verifyProof(address,bytes,uint256[])`
|
||||
funcSig := calldataload(encoded.offset)
|
||||
}
|
||||
if (funcSig == 0xaf83a18d) {
|
||||
proof_offset = 0x24;
|
||||
} else if (funcSig == 0x1e8e1e13) {
|
||||
proof_offset = 0x04;
|
||||
} else {
|
||||
revert("Invalid function signature");
|
||||
}
|
||||
assembly {
|
||||
// Fetch proof offset which is 4 + 32 bytes away from
|
||||
// start of encoded for `verifyProof(bytes,uint256[])`,
|
||||
// and 4 + 32 + 32 away for `verifyProof(address,bytes,uint256[])`
|
||||
|
||||
proof_offset := calldataload(add(encoded.offset, proof_offset))
|
||||
|
||||
proof_length := calldataload(
|
||||
add(add(encoded.offset, 0x04), proof_offset)
|
||||
)
|
||||
}
|
||||
// Check the length of the commitment against the proof bytes
|
||||
if (proof_length < COMMITMENT_KZG.length) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Load COMMITMENT_KZG into memory
|
||||
bytes memory commitment = COMMITMENT_KZG;
|
||||
|
||||
// Compare the first N bytes of the proof with COMMITMENT_KZG
|
||||
uint words = (commitment.length + 31) / 32; // Calculate the number of 32-byte words
|
||||
|
||||
assembly {
|
||||
// Now we compare the commitment with the proof,
|
||||
// ensuring that the commitments divided up into 32 byte words are all equal.
|
||||
for {
|
||||
let i := 0x20
|
||||
} lt(i, add(mul(words, 0x20), 0x20)) {
|
||||
i := add(i, 0x20)
|
||||
} {
|
||||
let wordProof := calldataload(
|
||||
add(add(encoded.offset, add(i, 0x04)), proof_offset)
|
||||
)
|
||||
let wordCommitment := mload(add(commitment, i))
|
||||
equal := eq(wordProof, wordCommitment)
|
||||
if eq(equal, 0) {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return equal; // Return true if the commitment comparison passed
|
||||
} /// end checkKzgCommits
|
||||
}
|
||||
|
||||
contract DataAttestation is LoadInstances, SwapProofCommitments {
|
||||
// the address of the account to make calls to
|
||||
address public immutable contractAddress;
|
||||
|
||||
// the abi encoded function calls to make to the `contractAddress` that returns the attested to data
|
||||
bytes public callData;
|
||||
|
||||
struct Scalars {
|
||||
// The number of base 10 decimals to scale the data by.
|
||||
// For most ERC20 tokens this is 1e18
|
||||
uint256 decimals;
|
||||
// The number of fractional bits of the fixed point EZKL data points.
|
||||
uint256 bits;
|
||||
}
|
||||
|
||||
Scalars[] private scalars;
|
||||
|
||||
function getScalars(uint256 index) public view returns (Scalars memory) {
|
||||
return scalars[index];
|
||||
}
|
||||
|
||||
/**
|
||||
* @notice EZKL P value
|
||||
* @dev In order to prevent the verifier from accepting two version of the same pubInput, n and the quantity (n + P), where n + P <= 2^256, we require that all instances are stricly less than P. a
|
||||
* @dev The reason for this is that the assmebly code of the verifier performs all arithmetic operations modulo P and as a consequence can't distinguish between n and n + P.
|
||||
*/
|
||||
uint256 public constant ORDER =
|
||||
uint256(
|
||||
0x30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f0000001
|
||||
);
|
||||
|
||||
uint256 public constant HALF_ORDER = ORDER >> 1;
|
||||
|
||||
uint8 public instanceOffset;
|
||||
|
||||
/**
|
||||
* @dev Initialize the contract with account calls the EZKL model will read from.
|
||||
* @param _contractAddresses - The calls to all the contracts EZKL reads storage from.
|
||||
* @param _callData - The abi encoded function calls to make to the `contractAddress` that EZKL reads storage from.
|
||||
*/
|
||||
constructor(
|
||||
address _contractAddresses,
|
||||
bytes memory _callData,
|
||||
uint256[] memory _decimals,
|
||||
uint[] memory _bits,
|
||||
uint8 _instanceOffset
|
||||
) {
|
||||
require(
|
||||
_bits.length == _decimals.length,
|
||||
"Invalid scalar array lengths"
|
||||
);
|
||||
for (uint i; i < _bits.length; i++) {
|
||||
scalars.push(Scalars(10 ** _decimals[i], 1 << _bits[i]));
|
||||
}
|
||||
contractAddress = _contractAddresses;
|
||||
callData = _callData;
|
||||
instanceOffset = _instanceOffset;
|
||||
}
|
||||
|
||||
function mulDiv(
|
||||
uint256 x,
|
||||
uint256 y,
|
||||
uint256 denominator
|
||||
) public pure returns (uint256 result) {
|
||||
unchecked {
|
||||
uint256 prod0;
|
||||
uint256 prod1;
|
||||
assembly {
|
||||
let mm := mulmod(x, y, not(0))
|
||||
prod0 := mul(x, y)
|
||||
prod1 := sub(sub(mm, prod0), lt(mm, prod0))
|
||||
}
|
||||
|
||||
if (prod1 == 0) {
|
||||
return prod0 / denominator;
|
||||
}
|
||||
|
||||
require(denominator > prod1, "Math: mulDiv overflow");
|
||||
|
||||
uint256 remainder;
|
||||
assembly {
|
||||
remainder := mulmod(x, y, denominator)
|
||||
prod1 := sub(prod1, gt(remainder, prod0))
|
||||
prod0 := sub(prod0, remainder)
|
||||
}
|
||||
|
||||
uint256 twos = denominator & (~denominator + 1);
|
||||
assembly {
|
||||
denominator := div(denominator, twos)
|
||||
prod0 := div(prod0, twos)
|
||||
twos := add(div(sub(0, twos), twos), 1)
|
||||
}
|
||||
|
||||
prod0 |= prod1 * twos;
|
||||
|
||||
uint256 inverse = (3 * denominator) ^ 2;
|
||||
|
||||
inverse *= 2 - denominator * inverse;
|
||||
inverse *= 2 - denominator * inverse;
|
||||
inverse *= 2 - denominator * inverse;
|
||||
inverse *= 2 - denominator * inverse;
|
||||
inverse *= 2 - denominator * inverse;
|
||||
inverse *= 2 - denominator * inverse;
|
||||
|
||||
result = prod0 * inverse;
|
||||
return result;
|
||||
}
|
||||
}
|
||||
/**
|
||||
* @dev Quantize the data returned from the account calls to the scale used by the EZKL model.
|
||||
* @param x - One of the elements of the data returned from the account calls
|
||||
* @param _scalars - The scaling factors for the data returned from the account calls.
|
||||
*
|
||||
*/
|
||||
function quantizeData(
|
||||
int x,
|
||||
Scalars memory _scalars
|
||||
) public pure returns (int256 quantized_data) {
|
||||
if (_scalars.bits == 1 && _scalars.decimals == 1) {
|
||||
return x;
|
||||
}
|
||||
bool neg = x < 0;
|
||||
if (neg) x = -x;
|
||||
uint output = mulDiv(uint256(x), _scalars.bits, _scalars.decimals);
|
||||
if (
|
||||
mulmod(uint256(x), _scalars.bits, _scalars.decimals) * 2 >=
|
||||
_scalars.decimals
|
||||
) {
|
||||
output += 1;
|
||||
}
|
||||
if (output > HALF_ORDER) {
|
||||
revert("Overflow field modulus");
|
||||
}
|
||||
quantized_data = neg ? -int256(output) : int256(output);
|
||||
}
|
||||
/**
|
||||
* @dev Make a static call to the account to fetch the data that EZKL reads from.
|
||||
* @param target - The address of the account to make calls to.
|
||||
* @param data - The abi encoded function calls to make to the `contractAddress` that EZKL reads storage from.
|
||||
* @return The data returned from the account calls. (Must come from either a view or pure function. Will throw an error otherwise)
|
||||
*/
|
||||
function staticCall(
|
||||
address target,
|
||||
bytes memory data
|
||||
) public view returns (bytes memory) {
|
||||
(bool success, bytes memory returndata) = target.staticcall(data);
|
||||
if (success) {
|
||||
if (returndata.length == 0) {
|
||||
require(
|
||||
target.code.length > 0,
|
||||
"Address: call to non-contract"
|
||||
);
|
||||
}
|
||||
return returndata;
|
||||
} else {
|
||||
revert("Address: low-level call failed");
|
||||
}
|
||||
}
|
||||
/**
|
||||
* @dev Convert the fixed point quantized data into a field element.
|
||||
* @param x - The quantized data.
|
||||
* @return field_element - The field element.
|
||||
*/
|
||||
function toFieldElement(
|
||||
int256 x
|
||||
) public pure returns (uint256 field_element) {
|
||||
// The casting down to uint256 is safe because the order is about 2^254, and the value
|
||||
// of x ranges of -2^127 to 2^127, so x + int(ORDER) is always positive.
|
||||
return uint256(x + int(ORDER)) % ORDER;
|
||||
}
|
||||
|
||||
/**
|
||||
* @dev Make the account calls to fetch the data that EZKL reads from and attest to the data.
|
||||
* @param instances - The public instances to the proof (the data in the proof that publicly accessible to the verifier).
|
||||
*/
|
||||
function attestData(uint256[] memory instances) public view {
|
||||
bytes memory returnData = staticCall(contractAddress, callData);
|
||||
int256[] memory x = abi.decode(returnData, (int256[]));
|
||||
int output;
|
||||
uint fieldElement;
|
||||
for (uint i = 0; i < x.length; i++) {
|
||||
output = quantizeData(x[i], scalars[i]);
|
||||
fieldElement = toFieldElement(output);
|
||||
if (fieldElement != instances[i]) {
|
||||
revert("Public input does not match");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @dev Verify the proof with the data attestation.
|
||||
* @param verifier - The address of the verifier contract.
|
||||
* @param encoded - The verifier calldata.
|
||||
*/
|
||||
function verifyWithDataAttestation(
|
||||
address verifier,
|
||||
bytes calldata encoded
|
||||
) public view returns (bool) {
|
||||
require(verifier.code.length > 0, "Address: call to non-contract");
|
||||
attestData(getInstancesCalldata(encoded));
|
||||
require(checkKzgCommits(encoded), "Invalid KZG commitments");
|
||||
// static call the verifier contract to verify the proof
|
||||
(bool success, bytes memory returndata) = verifier.staticcall(encoded);
|
||||
|
||||
if (success) {
|
||||
return abi.decode(returndata, (bool));
|
||||
} else {
|
||||
revert("low-level call to verifier failed");
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -904,7 +904,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"\n",
|
||||
"res = await ezkl.calibrate_settings(\"input.json\", target=\"resources\", scales = [4])\n",
|
||||
"res = ezkl.calibrate_settings(\"input.json\", target=\"resources\", scales = [4])\n",
|
||||
"assert res == True\n",
|
||||
"print(\"verified\")\n"
|
||||
]
|
||||
@@ -954,7 +954,7 @@
|
||||
"source": [
|
||||
"\n",
|
||||
"\n",
|
||||
"res = await ezkl.gen_witness()\n"
|
||||
"res = ezkl.gen_witness()\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -1142,4 +1142,4 @@
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
}
|
||||
@@ -1,589 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# data-attest-ezkl\n",
|
||||
"\n",
|
||||
"Here's an example leveraging EZKL whereby the inputs to the model are read and attested to from an on-chain source.\n",
|
||||
"\n",
|
||||
"In this setup:\n",
|
||||
"- the inputs and outputs are publicly known to the prover and verifier\n",
|
||||
"- the on chain inputs will be fetched and then fed directly into the circuit\n",
|
||||
"- the quantization of the on-chain inputs happens within the evm and is replicated at proving time \n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"First we import the necessary dependencies and set up logging to be as informative as possible. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# check if notebook is in colab\n",
|
||||
"try:\n",
|
||||
" # install ezkl\n",
|
||||
" import google.colab\n",
|
||||
" import subprocess\n",
|
||||
" import sys\n",
|
||||
" subprocess.check_call([sys.executable, \"-m\", \"pip\", \"install\", \"ezkl\"])\n",
|
||||
" subprocess.check_call([sys.executable, \"-m\", \"pip\", \"install\", \"onnx\"])\n",
|
||||
"\n",
|
||||
"# rely on local installation of ezkl if the notebook is not in colab\n",
|
||||
"except:\n",
|
||||
" pass\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"from torch import nn\n",
|
||||
"import ezkl\n",
|
||||
"import os\n",
|
||||
"import json\n",
|
||||
"import logging\n",
|
||||
"\n",
|
||||
"# uncomment for more descriptive logging \n",
|
||||
"FORMAT = '%(levelname)s %(name)s %(asctime)-15s %(filename)s:%(lineno)d %(message)s'\n",
|
||||
"logging.basicConfig(format=FORMAT)\n",
|
||||
"logging.getLogger().setLevel(logging.DEBUG)\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Now we define our model. It is a very simple PyTorch model that has just one layer, an average pooling 2D layer. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import torch\n",
|
||||
"# Defines the model\n",
|
||||
"\n",
|
||||
"class MyModel(nn.Module):\n",
|
||||
" def __init__(self):\n",
|
||||
" super(MyModel, self).__init__()\n",
|
||||
" self.layer = nn.AvgPool2d(2, 1, (1, 1))\n",
|
||||
"\n",
|
||||
" def forward(self, x):\n",
|
||||
" return self.layer(x)[0]\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"circuit = MyModel()\n",
|
||||
"\n",
|
||||
"# this is where you'd train your model"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We omit training for purposes of this demonstration. We've marked where training would happen in the cell above. \n",
|
||||
"Now we export the model to onnx and create a corresponding (randomly generated) input. This input data will eventually be stored on chain and read from according to the call_data field in the graph input.\n",
|
||||
"\n",
|
||||
"You can replace the random `x` with real data if you so wish. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"x = 0.1*torch.rand(1,*[3, 2, 2], requires_grad=True)\n",
|
||||
"\n",
|
||||
"# Flips the neural net into inference mode\n",
|
||||
"circuit.eval()\n",
|
||||
"\n",
|
||||
" # Export the model\n",
|
||||
"torch.onnx.export(circuit, # model being run\n",
|
||||
" x, # model input (or a tuple for multiple inputs)\n",
|
||||
" \"network.onnx\", # where to save the model (can be a file or file-like object)\n",
|
||||
" export_params=True, # store the trained parameter weights inside the model file\n",
|
||||
" opset_version=10, # the ONNX version to export the model to\n",
|
||||
" do_constant_folding=True, # whether to execute constant folding for optimization\n",
|
||||
" input_names = ['input'], # the model's input names\n",
|
||||
" output_names = ['output'], # the model's output names\n",
|
||||
" dynamic_axes={'input' : {0 : 'batch_size'}, # variable length axes\n",
|
||||
" 'output' : {0 : 'batch_size'}})\n",
|
||||
"\n",
|
||||
"data_array = ((x).detach().numpy()).reshape([-1]).tolist()\n",
|
||||
"\n",
|
||||
"data = dict(input_data = [data_array])\n",
|
||||
"\n",
|
||||
" # Serialize data into file:\n",
|
||||
"json.dump(data, open(\"input.json\", 'w' ))\n",
|
||||
"\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We now define a function that will create a new anvil instance which we will deploy our test contract too. This contract will contain in its storage the data that we will read from and attest to. In production you would not need to set up a local anvil instance. Instead you would replace RPC_URL with the actual RPC endpoint of the chain you are deploying your verifiers too, reading from the data on said chain."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import subprocess\n",
|
||||
"import time\n",
|
||||
"import threading\n",
|
||||
"\n",
|
||||
"# make sure anvil is running locally\n",
|
||||
"# $ anvil -p 3030\n",
|
||||
"\n",
|
||||
"RPC_URL = \"http://localhost:3030\"\n",
|
||||
"\n",
|
||||
"# Save process globally\n",
|
||||
"anvil_process = None\n",
|
||||
"\n",
|
||||
"def start_anvil():\n",
|
||||
" global anvil_process\n",
|
||||
" if anvil_process is None:\n",
|
||||
" anvil_process = subprocess.Popen([\"anvil\", \"-p\", \"3030\", \"--code-size-limit=41943040\"])\n",
|
||||
" if anvil_process.returncode is not None:\n",
|
||||
" raise Exception(\"failed to start anvil process\")\n",
|
||||
" time.sleep(3)\n",
|
||||
"\n",
|
||||
"def stop_anvil():\n",
|
||||
" global anvil_process\n",
|
||||
" if anvil_process is not None:\n",
|
||||
" anvil_process.terminate()\n",
|
||||
" anvil_process = None\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We define our `PyRunArgs` objects which contains the visibility parameters for out model. \n",
|
||||
"- `input_visibility` defines the visibility of the model inputs\n",
|
||||
"- `param_visibility` defines the visibility of the model weights and constants and parameters \n",
|
||||
"- `output_visibility` defines the visibility of the model outputs\n",
|
||||
"\n",
|
||||
"Here we create the following setup:\n",
|
||||
"- `input_visibility`: \"public\"\n",
|
||||
"- `param_visibility`: \"private\"\n",
|
||||
"- `output_visibility`: public\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import ezkl\n",
|
||||
"\n",
|
||||
"model_path = os.path.join('network.onnx')\n",
|
||||
"compiled_model_path = os.path.join('network.compiled')\n",
|
||||
"pk_path = os.path.join('test.pk')\n",
|
||||
"vk_path = os.path.join('test.vk')\n",
|
||||
"settings_path = os.path.join('settings.json')\n",
|
||||
"srs_path = os.path.join('kzg.srs')\n",
|
||||
"data_path = os.path.join('input.json')\n",
|
||||
"\n",
|
||||
"run_args = ezkl.PyRunArgs()\n",
|
||||
"run_args.input_visibility = \"public\"\n",
|
||||
"run_args.param_visibility = \"private\"\n",
|
||||
"run_args.output_visibility = \"public\"\n",
|
||||
"run_args.num_inner_cols = 1\n",
|
||||
"run_args.variables = [(\"batch_size\", 1)]\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Now we generate a settings file. This file basically instantiates a bunch of parameters that determine their circuit shape, size etc... Because of the way we represent nonlinearities in the circuit (using Halo2's [lookup tables](https://zcash.github.io/halo2/design/proving-system/lookup.html)), it is often best to _calibrate_ this settings file as some data can fall out of range of these lookups.\n",
|
||||
"\n",
|
||||
"You can pass a dataset for calibration that will be representative of real inputs you might find if and when you deploy the prover. Here we create a dummy calibration dataset for demonstration purposes. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!RUST_LOG=trace\n",
|
||||
"# TODO: Dictionary outputs\n",
|
||||
"res = ezkl.gen_settings(model_path, settings_path, py_run_args=run_args)\n",
|
||||
"assert res == True"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# generate a bunch of dummy calibration data\n",
|
||||
"cal_data = {\n",
|
||||
" \"input_data\": [(0.1*torch.rand(2, *[3, 2, 2])).flatten().tolist()],\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"cal_path = os.path.join('val_data.json')\n",
|
||||
"# save as json file\n",
|
||||
"with open(cal_path, \"w\") as f:\n",
|
||||
" json.dump(cal_data, f)\n",
|
||||
"\n",
|
||||
"res = await ezkl.calibrate_settings(cal_path, model_path, settings_path, \"resources\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"res = ezkl.compile_circuit(model_path, compiled_model_path, settings_path)\n",
|
||||
"assert res == True"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The graph input for on chain data sources is formatted completely differently compared to file based data sources.\n",
|
||||
"\n",
|
||||
"- For file data sources, the raw floating point values that eventually get quantized, converted into field elements and stored in `witness.json` to be consumed by the circuit are stored. The output data contains the expected floating point values returned as outputs from running your vanilla pytorch model on the given inputs.\n",
|
||||
"- For on chain data sources, the input_data field contains all the data necessary to read and format the on chain data into something digestable by EZKL (aka field elements :-D). \n",
|
||||
"Here is what the schema for an on-chain data source graph input file should look like for a single call data source:\n",
|
||||
" \n",
|
||||
"```json\n",
|
||||
"{\n",
|
||||
" \"input_data\": {\n",
|
||||
" \"rpc\": \"http://localhost:3030\", // The rpc endpoint of the chain you are deploying your verifier to\n",
|
||||
" \"calls\": {\n",
|
||||
" \"call_data\": \"1f3be514000000000000000000000000c6962004f452be9203591991d15f6b388e09e8d00000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000c000000000000000000000000000000000000000000000000000000000000000b000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000009000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000070000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000000500000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000003000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000\", // The abi encoded call data to a view function that returns an array of on-chain data points we are attesting to. \n",
|
||||
" \"decimals\": 0, // The number of decimal places of the large uint256 value. This is our way of representing large wei values as floating points on chain, since the evm only natively supports integer values.\n",
|
||||
" \"address\": \"9A213F53334279C128C37DA962E5472eCD90554f\", // The address of the contract that we are calling to get the data. \n",
|
||||
" \"len\": 12 // The number of data points returned by the view function (the length of the array)\n",
|
||||
" }\n",
|
||||
" }\n",
|
||||
"}\n",
|
||||
"```"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"await ezkl.setup_test_evm_data(\n",
|
||||
" data_path,\n",
|
||||
" compiled_model_path,\n",
|
||||
" # we write the call data to the same file as the input data\n",
|
||||
" data_path,\n",
|
||||
" input_source=ezkl.PyTestDataSource.OnChain,\n",
|
||||
" output_source=ezkl.PyTestDataSource.File,\n",
|
||||
" rpc_url=RPC_URL)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"As we use Halo2 with KZG-commitments we need an SRS string from (preferably) a multi-party trusted setup ceremony. For an overview of the procedures for such a ceremony check out [this page](https://blog.ethereum.org/2023/01/16/announcing-kzg-ceremony). The `get_srs` command retrieves a correctly sized SRS given the calibrated settings file from [here](https://github.com/han0110/halo2-kzg-srs). \n",
|
||||
"\n",
|
||||
"These SRS were generated with [this](https://github.com/privacy-scaling-explorations/perpetualpowersoftau) ceremony. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"res = await ezkl.get_srs( settings_path)\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We now need to generate the circuit witness. These are the model outputs (and any hashes) that are generated when feeding the previously generated `input.json` through the circuit / model. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!export RUST_BACKTRACE=1\n",
|
||||
"\n",
|
||||
"witness_path = \"witness.json\"\n",
|
||||
"\n",
|
||||
"res = await ezkl.gen_witness(data_path, compiled_model_path, witness_path)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Here we setup verifying and proving keys for the circuit. As the name suggests the proving key is needed for ... proving and the verifying key is needed for ... verifying. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# HERE WE SETUP THE CIRCUIT PARAMS\n",
|
||||
"# WE GOT KEYS\n",
|
||||
"# WE GOT CIRCUIT PARAMETERS\n",
|
||||
"# EVERYTHING ANYONE HAS EVER NEEDED FOR ZK\n",
|
||||
"res = ezkl.setup(\n",
|
||||
" compiled_model_path,\n",
|
||||
" vk_path,\n",
|
||||
" pk_path,\n",
|
||||
" \n",
|
||||
" )\n",
|
||||
"\n",
|
||||
"assert res == True\n",
|
||||
"assert os.path.isfile(vk_path)\n",
|
||||
"assert os.path.isfile(pk_path)\n",
|
||||
"assert os.path.isfile(settings_path)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Now we generate a full proof. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# GENERATE A PROOF\n",
|
||||
"\n",
|
||||
"proof_path = os.path.join('test.pf')\n",
|
||||
"\n",
|
||||
"res = ezkl.prove(\n",
|
||||
" witness_path,\n",
|
||||
" compiled_model_path,\n",
|
||||
" pk_path,\n",
|
||||
" proof_path,\n",
|
||||
" \n",
|
||||
" \"single\",\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
"print(res)\n",
|
||||
"assert os.path.isfile(proof_path)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"And verify it as a sanity check. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# VERIFY IT\n",
|
||||
"\n",
|
||||
"res = ezkl.verify(\n",
|
||||
" proof_path,\n",
|
||||
" settings_path,\n",
|
||||
" vk_path,\n",
|
||||
" \n",
|
||||
" )\n",
|
||||
"\n",
|
||||
"assert res == True\n",
|
||||
"print(\"verified\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We can now create and then deploy a vanilla evm verifier."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"abi_path = 'test.abi'\n",
|
||||
"sol_code_path = 'test.sol'\n",
|
||||
"\n",
|
||||
"res = await ezkl.create_evm_verifier(\n",
|
||||
" vk_path,\n",
|
||||
" \n",
|
||||
" settings_path,\n",
|
||||
" sol_code_path,\n",
|
||||
" abi_path,\n",
|
||||
" )\n",
|
||||
"assert res == True"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import json\n",
|
||||
"\n",
|
||||
"addr_path_verifier = \"addr_verifier.txt\"\n",
|
||||
"\n",
|
||||
"res = await ezkl.deploy_evm(\n",
|
||||
" addr_path_verifier,\n",
|
||||
" 'http://127.0.0.1:3030',\n",
|
||||
" sol_code_path,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"assert res == True"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"With the vanilla verifier deployed, we can now create the data attestation contract, which will read in the instances from the calldata to the verifier, attest to them, call the verifier and then return the result. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"\n",
|
||||
"abi_path = 'test.abi'\n",
|
||||
"sol_code_path = 'test.sol'\n",
|
||||
"input_path = 'input.json'\n",
|
||||
"\n",
|
||||
"res = await ezkl.create_evm_data_attestation(\n",
|
||||
" input_path,\n",
|
||||
" settings_path,\n",
|
||||
" sol_code_path,\n",
|
||||
" abi_path,\n",
|
||||
" )"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Now we can deploy the data attest verifier contract. For security reasons, this binding will only deploy to a local anvil instance, using accounts generated by anvil. \n",
|
||||
"So should only be used for testing purposes."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"addr_path_da = \"addr_da.txt\"\n",
|
||||
"\n",
|
||||
"res = await ezkl.deploy_da_evm(\n",
|
||||
" addr_path_da,\n",
|
||||
" input_path,\n",
|
||||
" RPC_URL,\n",
|
||||
" settings_path,\n",
|
||||
" sol_code_path,\n",
|
||||
" )\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Call the view only verify method on the contract to verify the proof. Since it is a view function this is safe to use in production since you don't have to pass your private key."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# read the verifier address\n",
|
||||
"addr_verifier = None\n",
|
||||
"with open(addr_path_verifier, 'r') as f:\n",
|
||||
" addr = f.read()\n",
|
||||
"#read the data attestation address\n",
|
||||
"addr_da = None\n",
|
||||
"with open(addr_path_da, 'r') as f:\n",
|
||||
" addr_da = f.read()\n",
|
||||
"\n",
|
||||
"res = await ezkl.verify_evm(\n",
|
||||
" addr,\n",
|
||||
" RPC_URL,\n",
|
||||
" proof_path,\n",
|
||||
" addr_da,\n",
|
||||
")"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": ".env",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.12.9"
|
||||
},
|
||||
"orig_nbformat": 4
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
@@ -1,660 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# data-attest-ezkl hashed\n",
|
||||
"\n",
|
||||
"Here's an example leveraging EZKL whereby the hashes of the outputs to the model are read and attested to from an on-chain source.\n",
|
||||
"\n",
|
||||
"In this setup:\n",
|
||||
"- the hashes of outputs are publicly known to the prover and verifier\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"First we import the necessary dependencies and set up logging to be as informative as possible. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# check if notebook is in colab\n",
|
||||
"try:\n",
|
||||
" # install ezkl\n",
|
||||
" import google.colab\n",
|
||||
" import subprocess\n",
|
||||
" import sys\n",
|
||||
" subprocess.check_call([sys.executable, \"-m\", \"pip\", \"install\", \"ezkl\"])\n",
|
||||
" subprocess.check_call([sys.executable, \"-m\", \"pip\", \"install\", \"onnx\"])\n",
|
||||
"\n",
|
||||
"# rely on local installation of ezkl if the notebook is not in colab\n",
|
||||
"except:\n",
|
||||
" pass\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"from torch import nn\n",
|
||||
"import ezkl\n",
|
||||
"import os\n",
|
||||
"import json\n",
|
||||
"import logging\n",
|
||||
"\n",
|
||||
"# uncomment for more descriptive logging \n",
|
||||
"# FORMAT = '%(levelname)s %(name)s %(asctime)-15s %(filename)s:%(lineno)d %(message)s'\n",
|
||||
"# logging.basicConfig(format=FORMAT)\n",
|
||||
"# logging.getLogger().setLevel(logging.DEBUG)\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Now we define our model. It is a very simple PyTorch model that has just one layer, an average pooling 2D layer. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import torch\n",
|
||||
"# Defines the model\n",
|
||||
"\n",
|
||||
"class MyModel(nn.Module):\n",
|
||||
" def __init__(self):\n",
|
||||
" super(MyModel, self).__init__()\n",
|
||||
" self.layer = nn.AvgPool2d(2, 1, (1, 1))\n",
|
||||
"\n",
|
||||
" def forward(self, x):\n",
|
||||
" return self.layer(x)[0]\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"circuit = MyModel()\n",
|
||||
"\n",
|
||||
"# this is where you'd train your model\n",
|
||||
"\n",
|
||||
"\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We omit training for purposes of this demonstration. We've marked where training would happen in the cell above. \n",
|
||||
"Now we export the model to onnx and create a corresponding (randomly generated) input. This input data will eventually be stored on chain and read from according to the call_data field in the graph input.\n",
|
||||
"\n",
|
||||
"You can replace the random `x` with real data if you so wish. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"x = 0.1*torch.rand(1,*[3, 2, 2], requires_grad=True)\n",
|
||||
"\n",
|
||||
"# Flips the neural net into inference mode\n",
|
||||
"circuit.eval()\n",
|
||||
"\n",
|
||||
" # Export the model\n",
|
||||
"torch.onnx.export(circuit, # model being run\n",
|
||||
" x, # model input (or a tuple for multiple inputs)\n",
|
||||
" \"network.onnx\", # where to save the model (can be a file or file-like object)\n",
|
||||
" export_params=True, # store the trained parameter weights inside the model file\n",
|
||||
" opset_version=10, # the ONNX version to export the model to\n",
|
||||
" do_constant_folding=True, # whether to execute constant folding for optimization\n",
|
||||
" input_names = ['input'], # the model's input names\n",
|
||||
" output_names = ['output'], # the model's output names\n",
|
||||
" dynamic_axes={'input' : {0 : 'batch_size'}, # variable length axes\n",
|
||||
" 'output' : {0 : 'batch_size'}})\n",
|
||||
"\n",
|
||||
"data_array = ((x).detach().numpy()).reshape([-1]).tolist()\n",
|
||||
"\n",
|
||||
"data = dict(input_data = [data_array])\n",
|
||||
"\n",
|
||||
" # Serialize data into file:\n",
|
||||
"json.dump(data, open(\"input.json\", 'w' ))\n",
|
||||
"\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We now define a function that will create a new anvil instance which we will deploy our test contract too. This contract will contain in its storage the data that we will read from and attest to. In production you would not need to set up a local anvil instance. Instead you would replace RPC_URL with the actual RPC endpoint of the chain you are deploying your verifiers too, reading from the data on said chain."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import subprocess\n",
|
||||
"import time\n",
|
||||
"import threading\n",
|
||||
"\n",
|
||||
"# make sure anvil is running locally\n",
|
||||
"# $ anvil -p 3030\n",
|
||||
"\n",
|
||||
"RPC_URL = \"http://localhost:3030\"\n",
|
||||
"\n",
|
||||
"# Save process globally\n",
|
||||
"anvil_process = None\n",
|
||||
"\n",
|
||||
"def start_anvil():\n",
|
||||
" global anvil_process\n",
|
||||
" if anvil_process is None:\n",
|
||||
" anvil_process = subprocess.Popen([\"anvil\", \"-p\", \"3030\", \"--code-size-limit=41943040\"])\n",
|
||||
" if anvil_process.returncode is not None:\n",
|
||||
" raise Exception(\"failed to start anvil process\")\n",
|
||||
" time.sleep(3)\n",
|
||||
"\n",
|
||||
"def stop_anvil():\n",
|
||||
" global anvil_process\n",
|
||||
" if anvil_process is not None:\n",
|
||||
" anvil_process.terminate()\n",
|
||||
" anvil_process = None\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We define our `PyRunArgs` objects which contains the visibility parameters for out model. \n",
|
||||
"- `input_visibility` defines the visibility of the model inputs\n",
|
||||
"- `param_visibility` defines the visibility of the model weights and constants and parameters \n",
|
||||
"- `output_visibility` defines the visibility of the model outputs\n",
|
||||
"\n",
|
||||
"Here we create the following setup:\n",
|
||||
"- `input_visibility`: \"private\"\n",
|
||||
"- `param_visibility`: \"private\"\n",
|
||||
"- `output_visibility`: hashed\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import ezkl\n",
|
||||
"\n",
|
||||
"model_path = os.path.join('network.onnx')\n",
|
||||
"compiled_model_path = os.path.join('network.compiled')\n",
|
||||
"pk_path = os.path.join('test.pk')\n",
|
||||
"vk_path = os.path.join('test.vk')\n",
|
||||
"settings_path = os.path.join('settings.json')\n",
|
||||
"srs_path = os.path.join('kzg.srs')\n",
|
||||
"data_path = os.path.join('input.json')\n",
|
||||
"\n",
|
||||
"run_args = ezkl.PyRunArgs()\n",
|
||||
"run_args.input_visibility = \"private\"\n",
|
||||
"run_args.param_visibility = \"private\"\n",
|
||||
"run_args.output_visibility = \"hashed\"\n",
|
||||
"run_args.variables = [(\"batch_size\", 1)]\n",
|
||||
"\n",
|
||||
"\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Now we generate a settings file. This file basically instantiates a bunch of parameters that determine their circuit shape, size etc... Because of the way we represent nonlinearities in the circuit (using Halo2's [lookup tables](https://zcash.github.io/halo2/design/proving-system/lookup.html)), it is often best to _calibrate_ this settings file as some data can fall out of range of these lookups.\n",
|
||||
"\n",
|
||||
"You can pass a dataset for calibration that will be representative of real inputs you might find if and when you deploy the prover. Here we create a dummy calibration dataset for demonstration purposes. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!RUST_LOG=trace\n",
|
||||
"# TODO: Dictionary outputs\n",
|
||||
"res = ezkl.gen_settings(model_path, settings_path, py_run_args=run_args)\n",
|
||||
"assert res == True"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# generate a bunch of dummy calibration data\n",
|
||||
"cal_data = {\n",
|
||||
" \"input_data\": [(0.1*torch.rand(2, *[3, 2, 2])).flatten().tolist()],\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"cal_path = os.path.join('val_data.json')\n",
|
||||
"# save as json file\n",
|
||||
"with open(cal_path, \"w\") as f:\n",
|
||||
" json.dump(cal_data, f)\n",
|
||||
"\n",
|
||||
"res = await ezkl.calibrate_settings(cal_path, model_path, settings_path, \"resources\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"res = ezkl.compile_circuit(model_path, compiled_model_path, settings_path)\n",
|
||||
"assert res == True"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"As we use Halo2 with KZG-commitments we need an SRS string from (preferably) a multi-party trusted setup ceremony. For an overview of the procedures for such a ceremony check out [this page](https://blog.ethereum.org/2023/01/16/announcing-kzg-ceremony). The `get_srs` command retrieves a correctly sized SRS given the calibrated settings file from [here](https://github.com/han0110/halo2-kzg-srs). \n",
|
||||
"\n",
|
||||
"These SRS were generated with [this](https://github.com/privacy-scaling-explorations/perpetualpowersoftau) ceremony. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"res = await ezkl.get_srs( settings_path)\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We now need to generate the circuit witness. These are the model outputs (and any hashes) that are generated when feeding the previously generated `input.json` through the circuit / model. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!export RUST_BACKTRACE=1\n",
|
||||
"\n",
|
||||
"witness_path = \"witness.json\"\n",
|
||||
"\n",
|
||||
"res = await ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(ezkl.felt_to_big_endian(res['processed_outputs']['poseidon_hash'][0]))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We now post the hashes of the outputs to the chain. This is the data that will be read from and attested to."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from web3 import Web3, HTTPProvider\n",
|
||||
"from solcx import compile_standard\n",
|
||||
"from decimal import Decimal\n",
|
||||
"import json\n",
|
||||
"import os\n",
|
||||
"import torch\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# setup web3 instance\n",
|
||||
"w3 = Web3(HTTPProvider(RPC_URL))\n",
|
||||
"\n",
|
||||
"def test_on_chain_data(res):\n",
|
||||
" print(f'poseidon_hash: {res[\"processed_outputs\"][\"poseidon_hash\"]}')\n",
|
||||
" # Step 0: Convert the tensor to a flat list\n",
|
||||
" data = [int(ezkl.felt_to_big_endian(res['processed_outputs']['poseidon_hash'][0]), 0)]\n",
|
||||
"\n",
|
||||
" # Step 1: Prepare the data\n",
|
||||
" # Step 2: Prepare and compile the contract.\n",
|
||||
" # We are using a test contract here but in production you would\n",
|
||||
" # use whatever contract you are fetching data from.\n",
|
||||
" contract_source_code = '''\n",
|
||||
" // SPDX-License-Identifier: UNLICENSED\n",
|
||||
" pragma solidity ^0.8.17;\n",
|
||||
"\n",
|
||||
" contract TestReads {\n",
|
||||
"\n",
|
||||
" uint[] public arr;\n",
|
||||
" constructor(uint256[] memory _numbers) {\n",
|
||||
" for(uint256 i = 0; i < _numbers.length; i++) {\n",
|
||||
" arr.push(_numbers[i]);\n",
|
||||
" }\n",
|
||||
" }\n",
|
||||
" function getArr() public view returns (uint[] memory) {\n",
|
||||
" return arr;\n",
|
||||
" }\n",
|
||||
" }\n",
|
||||
" '''\n",
|
||||
"\n",
|
||||
" compiled_sol = compile_standard({\n",
|
||||
" \"language\": \"Solidity\",\n",
|
||||
" \"sources\": {\"testreads.sol\": {\"content\": contract_source_code}},\n",
|
||||
" \"settings\": {\"outputSelection\": {\"*\": {\"*\": [\"metadata\", \"evm.bytecode\", \"abi\"]}}}\n",
|
||||
" })\n",
|
||||
"\n",
|
||||
" # Get bytecode\n",
|
||||
" bytecode = compiled_sol['contracts']['testreads.sol']['TestReads']['evm']['bytecode']['object']\n",
|
||||
"\n",
|
||||
" # Get ABI\n",
|
||||
" # In production if you are reading from really large contracts you can just use\n",
|
||||
" # a stripped down version of the ABI of the contract you are calling, containing only the view functions you will fetch data from.\n",
|
||||
" abi = json.loads(compiled_sol['contracts']['testreads.sol']['TestReads']['metadata'])['output']['abi']\n",
|
||||
"\n",
|
||||
" # Step 3: Deploy the contract\n",
|
||||
" TestReads = w3.eth.contract(abi=abi, bytecode=bytecode)\n",
|
||||
" tx_hash = TestReads.constructor(data).transact()\n",
|
||||
" tx_receipt = w3.eth.wait_for_transaction_receipt(tx_hash)\n",
|
||||
" # If you are deploying to production you can skip the 3 lines of code above and just instantiate the contract like this,\n",
|
||||
" # passing the address and abi of the contract you are fetching data from.\n",
|
||||
" contract = w3.eth.contract(address=tx_receipt['contractAddress'], abi=abi)\n",
|
||||
"\n",
|
||||
" # Step 4: Interact with the contract\n",
|
||||
" calldata = contract.functions.getArr().build_transaction()['data'][2:]\n",
|
||||
"\n",
|
||||
" # Prepare the calls_to_account object\n",
|
||||
" # If you were calling view functions across multiple contracts,\n",
|
||||
" # you would have multiple entries in the calls_to_account array,\n",
|
||||
" # one for each contract.\n",
|
||||
" decimals = [0] * len(data)\n",
|
||||
" call_to_account = {\n",
|
||||
" 'call_data': calldata,\n",
|
||||
" 'decimals': decimals,\n",
|
||||
" 'address': contract.address[2:], # remove the '0x' prefix\n",
|
||||
" }\n",
|
||||
"\n",
|
||||
" print(f'call_to_account: {call_to_account}')\n",
|
||||
"\n",
|
||||
" return call_to_account\n",
|
||||
"\n",
|
||||
"# Now let's start the Anvil process. You don't need to do this if you are deploying to a non-local chain.\n",
|
||||
"start_anvil()\n",
|
||||
"\n",
|
||||
"# Now let's call our function, passing in the same input tensor we used to export the model 2 cells above.\n",
|
||||
"call_to_account = test_on_chain_data(res)\n",
|
||||
"\n",
|
||||
"data = dict(input_data = [data_array], output_data = {'rpc': RPC_URL, 'call': call_to_account })\n",
|
||||
"\n",
|
||||
"# Serialize on-chain data into file:\n",
|
||||
"json.dump(data, open(\"input.json\", 'w'))\n",
|
||||
"\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Here we setup verifying and proving keys for the circuit. As the name suggests the proving key is needed for ... proving and the verifying key is needed for ... verifying. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# HERE WE SETUP THE CIRCUIT PARAMS\n",
|
||||
"# WE GOT KEYS\n",
|
||||
"# WE GOT CIRCUIT PARAMETERS\n",
|
||||
"# EVERYTHING ANYONE HAS EVER NEEDED FOR ZK\n",
|
||||
"res = ezkl.setup(\n",
|
||||
" compiled_model_path,\n",
|
||||
" vk_path,\n",
|
||||
" pk_path,\n",
|
||||
" \n",
|
||||
" )\n",
|
||||
"\n",
|
||||
"assert res == True\n",
|
||||
"assert os.path.isfile(vk_path)\n",
|
||||
"assert os.path.isfile(pk_path)\n",
|
||||
"assert os.path.isfile(settings_path)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Now we generate a full proof. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# GENERATE A PROOF\n",
|
||||
"\n",
|
||||
"proof_path = os.path.join('test.pf')\n",
|
||||
"\n",
|
||||
"res = ezkl.prove(\n",
|
||||
" witness_path,\n",
|
||||
" compiled_model_path,\n",
|
||||
" pk_path,\n",
|
||||
" proof_path,\n",
|
||||
" \n",
|
||||
" \"single\",\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
"print(res)\n",
|
||||
"assert os.path.isfile(proof_path)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"And verify it as a sanity check. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# VERIFY IT\n",
|
||||
"\n",
|
||||
"res = ezkl.verify(\n",
|
||||
" proof_path,\n",
|
||||
" settings_path,\n",
|
||||
" vk_path,\n",
|
||||
" \n",
|
||||
" )\n",
|
||||
"\n",
|
||||
"assert res == True\n",
|
||||
"print(\"verified\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We can now create and then deploy a vanilla evm verifier."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"abi_path = 'test.abi'\n",
|
||||
"sol_code_path = 'test.sol'\n",
|
||||
"\n",
|
||||
"res = await ezkl.create_evm_verifier(\n",
|
||||
" vk_path,\n",
|
||||
" \n",
|
||||
" settings_path,\n",
|
||||
" sol_code_path,\n",
|
||||
" abi_path,\n",
|
||||
" )\n",
|
||||
"assert res == True"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import json\n",
|
||||
"\n",
|
||||
"addr_path_verifier = \"addr_verifier.txt\"\n",
|
||||
"\n",
|
||||
"res = await ezkl.deploy_evm(\n",
|
||||
" addr_path_verifier,\n",
|
||||
" 'http://127.0.0.1:3030',\n",
|
||||
" sol_code_path,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"assert res == True"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"With the vanilla verifier deployed, we can now create the data attestation contract, which will read in the instances from the calldata to the verifier, attest to them, call the verifier and then return the result. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"\n",
|
||||
"abi_path = 'test.abi'\n",
|
||||
"sol_code_path = 'test.sol'\n",
|
||||
"input_path = 'input.json'\n",
|
||||
"\n",
|
||||
"res = await ezkl.create_evm_data_attestation(\n",
|
||||
" input_path,\n",
|
||||
" settings_path,\n",
|
||||
" sol_code_path,\n",
|
||||
" abi_path,\n",
|
||||
" )"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Now we can deploy the data attest verifier contract. For security reasons, this binding will only deploy to a local anvil instance, using accounts generated by anvil. \n",
|
||||
"So should only be used for testing purposes."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"addr_path_da = \"addr_da.txt\"\n",
|
||||
"\n",
|
||||
"res = await ezkl.deploy_da_evm(\n",
|
||||
" addr_path_da,\n",
|
||||
" input_path,\n",
|
||||
" RPC_URL,\n",
|
||||
" settings_path,\n",
|
||||
" sol_code_path,\n",
|
||||
" )\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Call the view only verify method on the contract to verify the proof. Since it is a view function this is safe to use in production since you don't have to pass your private key."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# read the verifier address\n",
|
||||
"addr_verifier = None\n",
|
||||
"with open(addr_path_verifier, 'r') as f:\n",
|
||||
" addr = f.read()\n",
|
||||
"#read the data attestation address\n",
|
||||
"addr_da = None\n",
|
||||
"with open(addr_path_da, 'r') as f:\n",
|
||||
" addr_da = f.read()\n",
|
||||
"\n",
|
||||
"res = await ezkl.verify_evm(\n",
|
||||
" addr,\n",
|
||||
" RPC_URL,\n",
|
||||
" proof_path,\n",
|
||||
" addr_da,\n",
|
||||
")"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": ".env",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.12.9"
|
||||
},
|
||||
"orig_nbformat": 4
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
@@ -1,592 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# data-attest-kzg-vis\n",
|
||||
"\n",
|
||||
"Here's an example leveraging EZKL whereby the inputs to the model are read and attested to from an on-chain source and the params and outputs are committed to using kzg-commitments. \n",
|
||||
"\n",
|
||||
"In this setup:\n",
|
||||
"- the inputs and outputs are publicly known to the prover and verifier\n",
|
||||
"- the on chain inputs will be fetched and then fed directly into the circuit\n",
|
||||
"- the quantization of the on-chain inputs happens within the evm and is replicated at proving time \n",
|
||||
"- The kzg commitment to the params and inputs will be read from the proof and checked to make sure it matches the expected commitment stored on-chain.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"First we import the necessary dependencies and set up logging to be as informative as possible. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# check if notebook is in colab\n",
|
||||
"try:\n",
|
||||
" # install ezkl\n",
|
||||
" import google.colab\n",
|
||||
" import subprocess\n",
|
||||
" import sys\n",
|
||||
" subprocess.check_call([sys.executable, \"-m\", \"pip\", \"install\", \"ezkl\"])\n",
|
||||
" subprocess.check_call([sys.executable, \"-m\", \"pip\", \"install\", \"onnx\"])\n",
|
||||
"\n",
|
||||
"# rely on local installation of ezkl if the notebook is not in colab\n",
|
||||
"except:\n",
|
||||
" pass\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"from torch import nn\n",
|
||||
"import ezkl\n",
|
||||
"import os\n",
|
||||
"import json\n",
|
||||
"import logging\n",
|
||||
"\n",
|
||||
"# uncomment for more descriptive logging \n",
|
||||
"FORMAT = '%(levelname)s %(name)s %(asctime)-15s %(filename)s:%(lineno)d %(message)s'\n",
|
||||
"logging.basicConfig(format=FORMAT)\n",
|
||||
"logging.getLogger().setLevel(logging.DEBUG)\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Now we define our model. It is a very simple PyTorch model that has just one layer, an average pooling 2D layer. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import torch\n",
|
||||
"# Defines the model\n",
|
||||
"\n",
|
||||
"class MyModel(nn.Module):\n",
|
||||
" def __init__(self):\n",
|
||||
" super(MyModel, self).__init__()\n",
|
||||
" self.layer = nn.AvgPool2d(2, 1, (1, 1))\n",
|
||||
"\n",
|
||||
" def forward(self, x):\n",
|
||||
" return self.layer(x)[0]\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"circuit = MyModel()\n",
|
||||
"\n",
|
||||
"# this is where you'd train your model"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We omit training for purposes of this demonstration. We've marked where training would happen in the cell above. \n",
|
||||
"Now we export the model to onnx and create a corresponding (randomly generated) input. This input data will eventually be stored on chain and read from according to the call_data field in the graph input.\n",
|
||||
"\n",
|
||||
"You can replace the random `x` with real data if you so wish. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"x = 0.1*torch.rand(1,*[3, 2, 2], requires_grad=True)\n",
|
||||
"\n",
|
||||
"# Flips the neural net into inference mode\n",
|
||||
"circuit.eval()\n",
|
||||
"\n",
|
||||
" # Export the model\n",
|
||||
"torch.onnx.export(circuit, # model being run\n",
|
||||
" x, # model input (or a tuple for multiple inputs)\n",
|
||||
" \"network.onnx\", # where to save the model (can be a file or file-like object)\n",
|
||||
" export_params=True, # store the trained parameter weights inside the model file\n",
|
||||
" opset_version=10, # the ONNX version to export the model to\n",
|
||||
" do_constant_folding=True, # whether to execute constant folding for optimization\n",
|
||||
" input_names = ['input'], # the model's input names\n",
|
||||
" output_names = ['output'], # the model's output names\n",
|
||||
" dynamic_axes={'input' : {0 : 'batch_size'}, # variable length axes\n",
|
||||
" 'output' : {0 : 'batch_size'}})\n",
|
||||
"\n",
|
||||
"data_array = ((x).detach().numpy()).reshape([-1]).tolist()\n",
|
||||
"\n",
|
||||
"data = dict(input_data = [data_array])\n",
|
||||
"\n",
|
||||
" # Serialize data into file:\n",
|
||||
"json.dump(data, open(\"input.json\", 'w' ))\n",
|
||||
"\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We now define a function that will create a new anvil instance which we will deploy our test contract too. This contract will contain in its storage the data that we will read from and attest to. In production you would not need to set up a local anvil instance. Instead you would replace RPC_URL with the actual RPC endpoint of the chain you are deploying your verifiers too, reading from the data on said chain."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import subprocess\n",
|
||||
"import time\n",
|
||||
"import threading\n",
|
||||
"\n",
|
||||
"# make sure anvil is running locally\n",
|
||||
"# $ anvil -p 3030\n",
|
||||
"\n",
|
||||
"RPC_URL = \"http://localhost:3030\"\n",
|
||||
"\n",
|
||||
"# Save process globally\n",
|
||||
"anvil_process = None\n",
|
||||
"\n",
|
||||
"def start_anvil():\n",
|
||||
" global anvil_process\n",
|
||||
" if anvil_process is None:\n",
|
||||
" anvil_process = subprocess.Popen([\"anvil\", \"-p\", \"3030\", \"--code-size-limit=41943040\"])\n",
|
||||
" if anvil_process.returncode is not None:\n",
|
||||
" raise Exception(\"failed to start anvil process\")\n",
|
||||
" time.sleep(3)\n",
|
||||
"\n",
|
||||
"def stop_anvil():\n",
|
||||
" global anvil_process\n",
|
||||
" if anvil_process is not None:\n",
|
||||
" anvil_process.terminate()\n",
|
||||
" anvil_process = None\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We define our `PyRunArgs` objects which contains the visibility parameters for out model. \n",
|
||||
"- `input_visibility` defines the visibility of the model inputs\n",
|
||||
"- `param_visibility` defines the visibility of the model weights and constants and parameters \n",
|
||||
"- `output_visibility` defines the visibility of the model outputs\n",
|
||||
"\n",
|
||||
"Here we create the following setup:\n",
|
||||
"- `input_visibility`: \"public\"\n",
|
||||
"- `param_visibility`: \"polycommitment\" \n",
|
||||
"- `output_visibility`: \"polycommitment\"\n",
|
||||
"\n",
|
||||
"**Note**:\n",
|
||||
"When we set this to polycommitment, we are saying that the model parameters are committed to using a polynomial commitment scheme. This commitment will be stored on chain as a constant stored in the DA contract, and the proof will contain the commitment to the parameters. The DA verification will then check that the commitment in the proof matches the commitment stored on chain. \n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import ezkl\n",
|
||||
"\n",
|
||||
"model_path = os.path.join('network.onnx')\n",
|
||||
"compiled_model_path = os.path.join('network.compiled')\n",
|
||||
"pk_path = os.path.join('test.pk')\n",
|
||||
"vk_path = os.path.join('test.vk')\n",
|
||||
"settings_path = os.path.join('settings.json')\n",
|
||||
"srs_path = os.path.join('kzg.srs')\n",
|
||||
"data_path = os.path.join('input.json')\n",
|
||||
"\n",
|
||||
"run_args = ezkl.PyRunArgs()\n",
|
||||
"run_args.input_visibility = \"public\"\n",
|
||||
"run_args.param_visibility = \"polycommit\"\n",
|
||||
"run_args.output_visibility = \"polycommit\"\n",
|
||||
"run_args.num_inner_cols = 1\n",
|
||||
"run_args.variables = [(\"batch_size\", 1)]\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Now we generate a settings file. This file basically instantiates a bunch of parameters that determine their circuit shape, size etc... Because of the way we represent nonlinearities in the circuit (using Halo2's [lookup tables](https://zcash.github.io/halo2/design/proving-system/lookup.html)), it is often best to _calibrate_ this settings file as some data can fall out of range of these lookups.\n",
|
||||
"\n",
|
||||
"You can pass a dataset for calibration that will be representative of real inputs you might find if and when you deploy the prover. Here we create a dummy calibration dataset for demonstration purposes. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!RUST_LOG=trace\n",
|
||||
"# TODO: Dictionary outputs\n",
|
||||
"res = ezkl.gen_settings(model_path, settings_path, py_run_args=run_args)\n",
|
||||
"assert res == True"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# generate a bunch of dummy calibration data\n",
|
||||
"cal_data = {\n",
|
||||
" \"input_data\": [(0.1*torch.rand(2, *[3, 2, 2])).flatten().tolist()],\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"cal_path = os.path.join('val_data.json')\n",
|
||||
"# save as json file\n",
|
||||
"with open(cal_path, \"w\") as f:\n",
|
||||
" json.dump(cal_data, f)\n",
|
||||
"\n",
|
||||
"res = await ezkl.calibrate_settings(cal_path, model_path, settings_path, \"resources\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"res = ezkl.compile_circuit(model_path, compiled_model_path, settings_path)\n",
|
||||
"assert res == True"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The graph input for on chain data sources is formatted completely differently compared to file based data sources.\n",
|
||||
"\n",
|
||||
"- For file data sources, the raw floating point values that eventually get quantized, converted into field elements and stored in `witness.json` to be consumed by the circuit are stored. The output data contains the expected floating point values returned as outputs from running your vanilla pytorch model on the given inputs.\n",
|
||||
"- For on chain data sources, the input_data field contains all the data necessary to read and format the on chain data into something digestable by EZKL (aka field elements :-D). \n",
|
||||
"Here is what the schema for an on-chain data source graph input file should look like for a single call data source:\n",
|
||||
" \n",
|
||||
"```json\n",
|
||||
"{\n",
|
||||
" \"input_data\": {\n",
|
||||
" \"rpc\": \"http://localhost:3030\", // The rpc endpoint of the chain you are deploying your verifier to\n",
|
||||
" \"calls\": {\n",
|
||||
" \"call_data\": \"1f3be514000000000000000000000000c6962004f452be9203591991d15f6b388e09e8d00000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000c000000000000000000000000000000000000000000000000000000000000000b000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000009000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000070000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000000500000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000003000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000\", // The abi encoded call data to a view function that returns an array of on-chain data points we are attesting to. \n",
|
||||
" \"decimals\": 0, // The number of decimal places of the large uint256 value. This is our way of representing large wei values as floating points on chain, since the evm only natively supports integer values.\n",
|
||||
" \"address\": \"9A213F53334279C128C37DA962E5472eCD90554f\", // The address of the contract that we are calling to get the data. \n",
|
||||
" \"len\": 3 // The number of data points returned by the view function (the length of the array)\n",
|
||||
" }\n",
|
||||
" }\n",
|
||||
"}\n",
|
||||
"```"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"await ezkl.setup_test_evm_data(\n",
|
||||
" data_path,\n",
|
||||
" compiled_model_path,\n",
|
||||
" # we write the call data to the same file as the input data\n",
|
||||
" data_path,\n",
|
||||
" input_source=ezkl.PyTestDataSource.OnChain,\n",
|
||||
" output_source=ezkl.PyTestDataSource.File,\n",
|
||||
" rpc_url=RPC_URL)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"As we use Halo2 with KZG-commitments we need an SRS string from (preferably) a multi-party trusted setup ceremony. For an overview of the procedures for such a ceremony check out [this page](https://blog.ethereum.org/2023/01/16/announcing-kzg-ceremony). The `get_srs` command retrieves a correctly sized SRS given the calibrated settings file from [here](https://github.com/han0110/halo2-kzg-srs). \n",
|
||||
"\n",
|
||||
"These SRS were generated with [this](https://github.com/privacy-scaling-explorations/perpetualpowersoftau) ceremony. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"res = await ezkl.get_srs( settings_path)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We now need to generate the circuit witness. These are the model outputs (and any hashes) that are generated when feeding the previously generated `input.json` through the circuit / model. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# HERE WE SETUP THE CIRCUIT PARAMS\n",
|
||||
"# WE GOT KEYS\n",
|
||||
"# WE GOT CIRCUIT PARAMETERS\n",
|
||||
"# EVERYTHING ANYONE HAS EVER NEEDED FOR ZK\n",
|
||||
"res = ezkl.setup(\n",
|
||||
" compiled_model_path,\n",
|
||||
" vk_path,\n",
|
||||
" pk_path,\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
"assert res == True\n",
|
||||
"assert os.path.isfile(vk_path)\n",
|
||||
"assert os.path.isfile(pk_path)\n",
|
||||
"assert os.path.isfile(settings_path)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!export RUST_BACKTRACE=1\n",
|
||||
"\n",
|
||||
"witness_path = \"witness.json\"\n",
|
||||
"\n",
|
||||
"res = await ezkl.gen_witness(data_path, compiled_model_path, witness_path, vk_path)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Here we setup verifying and proving keys for the circuit. As the name suggests the proving key is needed for ... proving and the verifying key is needed for ... verifying. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Now we generate a full proof. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# GENERATE A PROOF\n",
|
||||
"\n",
|
||||
"proof_path = os.path.join('test.pf')\n",
|
||||
"\n",
|
||||
"res = ezkl.prove(\n",
|
||||
" witness_path,\n",
|
||||
" compiled_model_path,\n",
|
||||
" pk_path,\n",
|
||||
" proof_path,\n",
|
||||
" \n",
|
||||
" \"single\",\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
"print(res)\n",
|
||||
"assert os.path.isfile(proof_path)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"And verify it as a sanity check. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# VERIFY IT\n",
|
||||
"\n",
|
||||
"res = ezkl.verify(\n",
|
||||
" proof_path,\n",
|
||||
" settings_path,\n",
|
||||
" vk_path,\n",
|
||||
" \n",
|
||||
" )\n",
|
||||
"\n",
|
||||
"assert res == True\n",
|
||||
"print(\"verified\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We can now create and then deploy a vanilla evm verifier."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"abi_path = 'test.abi'\n",
|
||||
"sol_code_path = 'test.sol'\n",
|
||||
"\n",
|
||||
"res = await ezkl.create_evm_verifier(\n",
|
||||
" vk_path,\n",
|
||||
" \n",
|
||||
" settings_path,\n",
|
||||
" sol_code_path,\n",
|
||||
" abi_path,\n",
|
||||
" )\n",
|
||||
"assert res == True"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"\n",
|
||||
"addr_path_verifier = \"addr_verifier.txt\"\n",
|
||||
"\n",
|
||||
"res = await ezkl.deploy_evm(\n",
|
||||
" addr_path_verifier,\n",
|
||||
" 'http://127.0.0.1:3030',\n",
|
||||
" sol_code_path,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"assert res == True"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"When deploying a DA with kzg commitments, we need to make sure to also pass a witness file that contains the commitments to the parameters and inputs. This is because the verifier will need to check that the commitments in the proof match the commitments stored on chain."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"\n",
|
||||
"abi_path = 'test.abi'\n",
|
||||
"sol_code_path = 'test.sol'\n",
|
||||
"input_path = 'input.json'\n",
|
||||
"\n",
|
||||
"res = await ezkl.create_evm_data_attestation(\n",
|
||||
" input_path,\n",
|
||||
" settings_path,\n",
|
||||
" sol_code_path,\n",
|
||||
" abi_path,\n",
|
||||
" witness_path = witness_path,\n",
|
||||
" )"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Now we can deploy the data attest verifier contract. For security reasons, this binding will only deploy to a local anvil instance, using accounts generated by anvil. \n",
|
||||
"So should only be used for testing purposes."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"addr_path_da = \"addr_da.txt\"\n",
|
||||
"\n",
|
||||
"res = await ezkl.deploy_da_evm(\n",
|
||||
" addr_path_da,\n",
|
||||
" input_path,\n",
|
||||
" RPC_URL,\n",
|
||||
" settings_path,\n",
|
||||
" sol_code_path,\n",
|
||||
" )\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Call the view only verify method on the contract to verify the proof. Since it is a view function this is safe to use in production since you don't have to pass your private key."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# read the verifier address\n",
|
||||
"addr_verifier = None\n",
|
||||
"with open(addr_path_verifier, 'r') as f:\n",
|
||||
" addr = f.read()\n",
|
||||
"#read the data attestation address\n",
|
||||
"addr_da = None\n",
|
||||
"with open(addr_path_da, 'r') as f:\n",
|
||||
" addr_da = f.read()\n",
|
||||
"\n",
|
||||
"res = await ezkl.verify_evm(\n",
|
||||
" addr,\n",
|
||||
" RPC_URL,\n",
|
||||
" proof_path,\n",
|
||||
" addr_da,\n",
|
||||
")"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": ".env",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.5"
|
||||
},
|
||||
"orig_nbformat": 4
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
@@ -150,7 +150,7 @@
|
||||
"res = ezkl.gen_settings(model_path, settings_path)\n",
|
||||
"assert res == True\n",
|
||||
"\n",
|
||||
"res = await ezkl.calibrate_settings(data_path, model_path, settings_path, \"resources\")\n",
|
||||
"res = ezkl.calibrate_settings(data_path, model_path, settings_path, \"resources\")\n",
|
||||
"assert res == True"
|
||||
]
|
||||
},
|
||||
@@ -170,7 +170,7 @@
|
||||
"with open(cal_path, \"w\") as f:\n",
|
||||
" json.dump(cal_data, f)\n",
|
||||
"\n",
|
||||
"res = await ezkl.calibrate_settings(cal_path, model_path, settings_path, \"resources\")"
|
||||
"res = ezkl.calibrate_settings(cal_path, model_path, settings_path, \"resources\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -204,7 +204,7 @@
|
||||
"source": [
|
||||
"# now generate the witness file \n",
|
||||
"\n",
|
||||
"res = await ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"res = ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"assert os.path.isfile(witness_path)"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -437,7 +437,7 @@
|
||||
"\n",
|
||||
"# Optimize for resources, we cap logrows at 12 to reduce setup and proving time, at the expense of accuracy\n",
|
||||
"# You may want to increase the max logrows if accuracy is a concern\n",
|
||||
"res = await ezkl.calibrate_settings(target = \"resources\", max_logrows = 12, scales = [2])"
|
||||
"res = ezkl.calibrate_settings(target = \"resources\", max_logrows = 12, scales = [2])"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -526,7 +526,7 @@
|
||||
"# now generate the witness file\n",
|
||||
"witness_path = os.path.join('witness.json')\n",
|
||||
"\n",
|
||||
"res = await ezkl.gen_witness()\n",
|
||||
"res = ezkl.gen_witness()\n",
|
||||
"assert os.path.isfile(witness_path)"
|
||||
]
|
||||
},
|
||||
@@ -736,4 +736,4 @@
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 0
|
||||
}
|
||||
}
|
||||
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
@@ -467,7 +467,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"\n",
|
||||
"res = await ezkl.calibrate_settings(data_path, model_path, settings_path, \"resources\")\n",
|
||||
"res = ezkl.calibrate_settings(data_path, model_path, settings_path, \"resources\")\n",
|
||||
"assert res == True"
|
||||
]
|
||||
},
|
||||
@@ -508,7 +508,7 @@
|
||||
"source": [
|
||||
"# now generate the witness file\n",
|
||||
"\n",
|
||||
"res = await ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"res = ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"assert os.path.isfile(witness_path)"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -196,7 +196,7 @@
|
||||
"json.dump(data, open(cal_path, 'w'))\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"res = await ezkl.calibrate_settings(data_path, model_path, settings_path, \"resources\")\n",
|
||||
"res = ezkl.calibrate_settings(data_path, model_path, settings_path, \"resources\")\n",
|
||||
"assert res == True\n"
|
||||
]
|
||||
},
|
||||
@@ -237,7 +237,7 @@
|
||||
"source": [
|
||||
"# now generate the witness file\n",
|
||||
"\n",
|
||||
"res = await ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"res = ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"assert os.path.isfile(witness_path)"
|
||||
]
|
||||
},
|
||||
@@ -341,4 +341,4 @@
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
}
|
||||
@@ -179,7 +179,7 @@
|
||||
"json.dump(data, open(cal_path, 'w'))\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"res = await ezkl.calibrate_settings(data_path, model_path, settings_path, \"resources\")\n",
|
||||
"res = ezkl.calibrate_settings(data_path, model_path, settings_path, \"resources\")\n",
|
||||
"assert res == True\n"
|
||||
]
|
||||
},
|
||||
@@ -214,7 +214,7 @@
|
||||
"source": [
|
||||
"# now generate the witness file \n",
|
||||
"\n",
|
||||
"res = await ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"res = ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"assert os.path.isfile(witness_path)"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -241,7 +241,7 @@
|
||||
"with open(cal_path, \"w\") as f:\n",
|
||||
" json.dump(cal_data, f)\n",
|
||||
"\n",
|
||||
"res = await ezkl.calibrate_settings(cal_path, model_path, settings_path, \"resources\")"
|
||||
"res = ezkl.calibrate_settings(cal_path, model_path, settings_path, \"resources\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -291,7 +291,7 @@
|
||||
"\n",
|
||||
"witness_path = \"witness.json\"\n",
|
||||
"\n",
|
||||
"res = await ezkl.gen_witness(data_path, compiled_model_path, witness_path)"
|
||||
"res = ezkl.gen_witness(data_path, compiled_model_path, witness_path)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -510,4 +510,4 @@
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
}
|
||||
@@ -152,7 +152,7 @@
|
||||
"json.dump(data, open(cal_path, 'w'))\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"res = await ezkl.calibrate_settings(data_path, model_path, settings_path, \"resources\")\n",
|
||||
"res = ezkl.calibrate_settings(data_path, model_path, settings_path, \"resources\")\n",
|
||||
"assert res == True\n"
|
||||
]
|
||||
},
|
||||
@@ -188,7 +188,7 @@
|
||||
"# now generate the witness file \n",
|
||||
"witness_path = \"witness.json\"\n",
|
||||
"\n",
|
||||
"res = await ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"res = ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"assert os.path.isfile(witness_path)"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -155,7 +155,7 @@
|
||||
"json.dump(data, open(cal_path, 'w'))\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"res = await ezkl.calibrate_settings(data_path, model_path, settings_path, \"resources\")\n",
|
||||
"res = ezkl.calibrate_settings(data_path, model_path, settings_path, \"resources\")\n",
|
||||
"assert res == True\n"
|
||||
]
|
||||
},
|
||||
@@ -190,7 +190,7 @@
|
||||
"source": [
|
||||
"# now generate the witness file \n",
|
||||
"\n",
|
||||
"res = await ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"res = ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"assert os.path.isfile(witness_path)"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -233,7 +233,7 @@
|
||||
"with open(cal_path, \"w\") as f:\n",
|
||||
" json.dump(cal_data, f)\n",
|
||||
"\n",
|
||||
"res = await ezkl.calibrate_settings(cal_path, model_path, settings_path, \"resources\")"
|
||||
"res = ezkl.calibrate_settings(cal_path, model_path, settings_path, \"resources\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -315,7 +315,7 @@
|
||||
"\n",
|
||||
"witness_path = \"witness.json\"\n",
|
||||
"\n",
|
||||
"res = await ezkl.gen_witness(data_path, compiled_model_path, witness_path, vk_path)\n"
|
||||
"res = ezkl.gen_witness(data_path, compiled_model_path, witness_path, vk_path)\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -512,4 +512,4 @@
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
}
|
||||
@@ -193,7 +193,7 @@
|
||||
"json.dump(data, open(cal_path, 'w'))\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"res = await ezkl.calibrate_settings(data_path, model_path, settings_path, \"resources\")\n",
|
||||
"res = ezkl.calibrate_settings(data_path, model_path, settings_path, \"resources\")\n",
|
||||
"assert res == True\n"
|
||||
]
|
||||
},
|
||||
@@ -228,7 +228,7 @@
|
||||
"source": [
|
||||
"# now generate the witness file \n",
|
||||
"\n",
|
||||
"res = await ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"res = ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"assert os.path.isfile(witness_path)"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -1,284 +1,284 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "cf69bb3f-94e6-4dba-92cd-ce08df117d67",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Linear Regression\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"Sklearn based models are slightly finicky to get into a suitable onnx format. \n",
|
||||
"This notebook showcases how to do so using the `hummingbird-ml` python package ! "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "95613ee9",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# check if notebook is in colab\n",
|
||||
"try:\n",
|
||||
" # install ezkl\n",
|
||||
" import google.colab\n",
|
||||
" import subprocess\n",
|
||||
" import sys\n",
|
||||
" subprocess.check_call([sys.executable, \"-m\", \"pip\", \"install\", \"ezkl\"])\n",
|
||||
" subprocess.check_call([sys.executable, \"-m\", \"pip\", \"install\", \"onnx\"])\n",
|
||||
" subprocess.check_call([sys.executable, \"-m\", \"pip\", \"install\", \"hummingbird-ml\"])\n",
|
||||
"\n",
|
||||
"# rely on local installation of ezkl if the notebook is not in colab\n",
|
||||
"except:\n",
|
||||
" pass\n",
|
||||
"\n",
|
||||
"import os\n",
|
||||
"import torch\n",
|
||||
"import ezkl\n",
|
||||
"import json\n",
|
||||
"from hummingbird.ml import convert\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# here we create and (potentially train a model)\n",
|
||||
"\n",
|
||||
"# make sure you have the dependencies required here already installed\n",
|
||||
"import numpy as np\n",
|
||||
"from sklearn.linear_model import LinearRegression\n",
|
||||
"X = np.array([[1, 1], [1, 2], [2, 2], [2, 3]])\n",
|
||||
"# y = 1 * x_0 + 2 * x_1 + 3\n",
|
||||
"y = np.dot(X, np.array([1, 2])) + 3\n",
|
||||
"reg = LinearRegression().fit(X, y)\n",
|
||||
"reg.score(X, y)\n",
|
||||
"\n",
|
||||
"circuit = convert(reg, \"torch\", X[:1]).model\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "b37637c4",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"model_path = os.path.join('network.onnx')\n",
|
||||
"compiled_model_path = os.path.join('network.compiled')\n",
|
||||
"pk_path = os.path.join('test.pk')\n",
|
||||
"vk_path = os.path.join('test.vk')\n",
|
||||
"settings_path = os.path.join('settings.json')\n",
|
||||
"\n",
|
||||
"witness_path = os.path.join('witness.json')\n",
|
||||
"data_path = os.path.join('input.json')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "82db373a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"\n",
|
||||
"\n",
|
||||
"# export to onnx format\n",
|
||||
"# !!!!!!!!!!!!!!!!! This will flash a warning but it is fine !!!!!!!!!!!!!!!!!!!!!\n",
|
||||
"\n",
|
||||
"# Input to the model\n",
|
||||
"shape = X.shape[1:]\n",
|
||||
"x = torch.rand(1, *shape, requires_grad=True)\n",
|
||||
"torch_out = circuit(x)\n",
|
||||
"# Export the model\n",
|
||||
"torch.onnx.export(circuit, # model being run\n",
|
||||
" # model input (or a tuple for multiple inputs)\n",
|
||||
" x,\n",
|
||||
" # where to save the model (can be a file or file-like object)\n",
|
||||
" \"network.onnx\",\n",
|
||||
" export_params=True, # store the trained parameter weights inside the model file\n",
|
||||
" opset_version=10, # the ONNX version to export the model to\n",
|
||||
" do_constant_folding=True, # whether to execute constant folding for optimization\n",
|
||||
" input_names=['input'], # the model's input names\n",
|
||||
" output_names=['output'], # the model's output names\n",
|
||||
" dynamic_axes={'input': {0: 'batch_size'}, # variable length axes\n",
|
||||
" 'output': {0: 'batch_size'}})\n",
|
||||
"\n",
|
||||
"d = ((x).detach().numpy()).reshape([-1]).tolist()\n",
|
||||
"\n",
|
||||
"data = dict(input_shapes=[shape],\n",
|
||||
" input_data=[d],\n",
|
||||
" output_data=[((o).detach().numpy()).reshape([-1]).tolist() for o in torch_out])\n",
|
||||
"\n",
|
||||
"# Serialize data into file:\n",
|
||||
"json.dump(data, open(\"input.json\", 'w'))\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# note that you can also call the following function to generate random data for the model\n",
|
||||
"# it is functionally equivalent to the code above\n",
|
||||
"ezkl.gen_random_data()\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "d5e374a2",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!RUST_LOG=trace\n",
|
||||
"# TODO: Dictionary outputs\n",
|
||||
"res = ezkl.gen_settings(model_path, settings_path)\n",
|
||||
"assert res == True\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"cal_path = os.path.join(\"calibration.json\")\n",
|
||||
"\n",
|
||||
"data_array = (torch.randn(20, *shape).detach().numpy()).reshape([-1]).tolist()\n",
|
||||
"\n",
|
||||
"data = dict(input_data = [data_array])\n",
|
||||
"\n",
|
||||
"# Serialize data into file:\n",
|
||||
"json.dump(data, open(cal_path, 'w'))\n",
|
||||
"\n",
|
||||
"res = await ezkl.calibrate_settings(data_path, model_path, settings_path, \"resources\")\n",
|
||||
"assert res == True\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "3aa4f090",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"res = ezkl.compile_circuit(model_path, compiled_model_path, settings_path)\n",
|
||||
"assert res == True"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "8b74dcee",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# srs path\n",
|
||||
"res = await ezkl.get_srs( settings_path)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "18c8b7c7",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# now generate the witness file \n",
|
||||
"\n",
|
||||
"res = await ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"assert os.path.isfile(witness_path)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "b1c561a8",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"\n",
|
||||
"# HERE WE SETUP THE CIRCUIT PARAMS\n",
|
||||
"# WE GOT KEYS\n",
|
||||
"# WE GOT CIRCUIT PARAMETERS\n",
|
||||
"# EVERYTHING ANYONE HAS EVER NEEDED FOR ZK\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"res = ezkl.setup(\n",
|
||||
" compiled_model_path,\n",
|
||||
" vk_path,\n",
|
||||
" pk_path,\n",
|
||||
" \n",
|
||||
" )\n",
|
||||
"\n",
|
||||
"assert res == True\n",
|
||||
"assert os.path.isfile(vk_path)\n",
|
||||
"assert os.path.isfile(pk_path)\n",
|
||||
"assert os.path.isfile(settings_path)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "c384cbc8",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# GENERATE A PROOF\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"proof_path = os.path.join('test.pf')\n",
|
||||
"\n",
|
||||
"res = ezkl.prove(\n",
|
||||
" witness_path,\n",
|
||||
" compiled_model_path,\n",
|
||||
" pk_path,\n",
|
||||
" proof_path,\n",
|
||||
" \n",
|
||||
" \"single\",\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
"print(res)\n",
|
||||
"assert os.path.isfile(proof_path)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "76f00d41",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# VERIFY IT\n",
|
||||
"\n",
|
||||
"res = ezkl.verify(\n",
|
||||
" proof_path,\n",
|
||||
" settings_path,\n",
|
||||
" vk_path,\n",
|
||||
" \n",
|
||||
" )\n",
|
||||
"\n",
|
||||
"assert res == True\n",
|
||||
"print(\"verified\")"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.15"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "cf69bb3f-94e6-4dba-92cd-ce08df117d67",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Linear Regression\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"Sklearn based models are slightly finicky to get into a suitable onnx format. \n",
|
||||
"This notebook showcases how to do so using the `hummingbird-ml` python package ! "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "95613ee9",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# check if notebook is in colab\n",
|
||||
"try:\n",
|
||||
" # install ezkl\n",
|
||||
" import google.colab\n",
|
||||
" import subprocess\n",
|
||||
" import sys\n",
|
||||
" subprocess.check_call([sys.executable, \"-m\", \"pip\", \"install\", \"ezkl\"])\n",
|
||||
" subprocess.check_call([sys.executable, \"-m\", \"pip\", \"install\", \"onnx\"])\n",
|
||||
" subprocess.check_call([sys.executable, \"-m\", \"pip\", \"install\", \"hummingbird-ml\"])\n",
|
||||
"\n",
|
||||
"# rely on local installation of ezkl if the notebook is not in colab\n",
|
||||
"except:\n",
|
||||
" pass\n",
|
||||
"\n",
|
||||
"import os\n",
|
||||
"import torch\n",
|
||||
"import ezkl\n",
|
||||
"import json\n",
|
||||
"from hummingbird.ml import convert\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# here we create and (potentially train a model)\n",
|
||||
"\n",
|
||||
"# make sure you have the dependencies required here already installed\n",
|
||||
"import numpy as np\n",
|
||||
"from sklearn.linear_model import LinearRegression\n",
|
||||
"X = np.array([[1, 1], [1, 2], [2, 2], [2, 3]])\n",
|
||||
"# y = 1 * x_0 + 2 * x_1 + 3\n",
|
||||
"y = np.dot(X, np.array([1, 2])) + 3\n",
|
||||
"reg = LinearRegression().fit(X, y)\n",
|
||||
"reg.score(X, y)\n",
|
||||
"\n",
|
||||
"circuit = convert(reg, \"torch\", X[:1]).model\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "b37637c4",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"model_path = os.path.join('network.onnx')\n",
|
||||
"compiled_model_path = os.path.join('network.compiled')\n",
|
||||
"pk_path = os.path.join('test.pk')\n",
|
||||
"vk_path = os.path.join('test.vk')\n",
|
||||
"settings_path = os.path.join('settings.json')\n",
|
||||
"\n",
|
||||
"witness_path = os.path.join('witness.json')\n",
|
||||
"data_path = os.path.join('input.json')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "82db373a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"\n",
|
||||
"\n",
|
||||
"# export to onnx format\n",
|
||||
"# !!!!!!!!!!!!!!!!! This will flash a warning but it is fine !!!!!!!!!!!!!!!!!!!!!\n",
|
||||
"\n",
|
||||
"# Input to the model\n",
|
||||
"shape = X.shape[1:]\n",
|
||||
"x = torch.rand(1, *shape, requires_grad=True)\n",
|
||||
"torch_out = circuit(x)\n",
|
||||
"# Export the model\n",
|
||||
"torch.onnx.export(circuit, # model being run\n",
|
||||
" # model input (or a tuple for multiple inputs)\n",
|
||||
" x,\n",
|
||||
" # where to save the model (can be a file or file-like object)\n",
|
||||
" \"network.onnx\",\n",
|
||||
" export_params=True, # store the trained parameter weights inside the model file\n",
|
||||
" opset_version=10, # the ONNX version to export the model to\n",
|
||||
" do_constant_folding=True, # whether to execute constant folding for optimization\n",
|
||||
" input_names=['input'], # the model's input names\n",
|
||||
" output_names=['output'], # the model's output names\n",
|
||||
" dynamic_axes={'input': {0: 'batch_size'}, # variable length axes\n",
|
||||
" 'output': {0: 'batch_size'}})\n",
|
||||
"\n",
|
||||
"d = ((x).detach().numpy()).reshape([-1]).tolist()\n",
|
||||
"\n",
|
||||
"data = dict(input_shapes=[shape],\n",
|
||||
" input_data=[d],\n",
|
||||
" output_data=[((o).detach().numpy()).reshape([-1]).tolist() for o in torch_out])\n",
|
||||
"\n",
|
||||
"# Serialize data into file:\n",
|
||||
"json.dump(data, open(\"input.json\", 'w'))\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# note that you can also call the following function to generate random data for the model\n",
|
||||
"# it is functionally equivalent to the code above\n",
|
||||
"ezkl.gen_random_data()\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "d5e374a2",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!RUST_LOG=trace\n",
|
||||
"# TODO: Dictionary outputs\n",
|
||||
"res = ezkl.gen_settings(model_path, settings_path)\n",
|
||||
"assert res == True\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"cal_path = os.path.join(\"calibration.json\")\n",
|
||||
"\n",
|
||||
"data_array = (torch.randn(20, *shape).detach().numpy()).reshape([-1]).tolist()\n",
|
||||
"\n",
|
||||
"data = dict(input_data = [data_array])\n",
|
||||
"\n",
|
||||
"# Serialize data into file:\n",
|
||||
"json.dump(data, open(cal_path, 'w'))\n",
|
||||
"\n",
|
||||
"res = ezkl.calibrate_settings(data_path, model_path, settings_path, \"resources\")\n",
|
||||
"assert res == True\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "3aa4f090",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"res = ezkl.compile_circuit(model_path, compiled_model_path, settings_path)\n",
|
||||
"assert res == True"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "8b74dcee",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# srs path\n",
|
||||
"res = await ezkl.get_srs( settings_path)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "18c8b7c7",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# now generate the witness file \n",
|
||||
"\n",
|
||||
"res = ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"assert os.path.isfile(witness_path)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "b1c561a8",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"\n",
|
||||
"# HERE WE SETUP THE CIRCUIT PARAMS\n",
|
||||
"# WE GOT KEYS\n",
|
||||
"# WE GOT CIRCUIT PARAMETERS\n",
|
||||
"# EVERYTHING ANYONE HAS EVER NEEDED FOR ZK\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"res = ezkl.setup(\n",
|
||||
" compiled_model_path,\n",
|
||||
" vk_path,\n",
|
||||
" pk_path,\n",
|
||||
" \n",
|
||||
" )\n",
|
||||
"\n",
|
||||
"assert res == True\n",
|
||||
"assert os.path.isfile(vk_path)\n",
|
||||
"assert os.path.isfile(pk_path)\n",
|
||||
"assert os.path.isfile(settings_path)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "c384cbc8",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# GENERATE A PROOF\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"proof_path = os.path.join('test.pf')\n",
|
||||
"\n",
|
||||
"res = ezkl.prove(\n",
|
||||
" witness_path,\n",
|
||||
" compiled_model_path,\n",
|
||||
" pk_path,\n",
|
||||
" proof_path,\n",
|
||||
" \n",
|
||||
" \"single\",\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
"print(res)\n",
|
||||
"assert os.path.isfile(proof_path)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "76f00d41",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# VERIFY IT\n",
|
||||
"\n",
|
||||
"res = ezkl.verify(\n",
|
||||
" proof_path,\n",
|
||||
" settings_path,\n",
|
||||
" vk_path,\n",
|
||||
" \n",
|
||||
" )\n",
|
||||
"\n",
|
||||
"assert res == True\n",
|
||||
"print(\"verified\")"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.15"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -347,7 +347,7 @@
|
||||
"# Serialize data into file:\n",
|
||||
"json.dump(data, open(cal_path, 'w'))\n",
|
||||
"\n",
|
||||
"res = await ezkl.calibrate_settings(data_path, model_path, settings_path, \"resources\")\n",
|
||||
"res = ezkl.calibrate_settings(data_path, model_path, settings_path, \"resources\")\n",
|
||||
"assert res == True\n"
|
||||
]
|
||||
},
|
||||
@@ -383,7 +383,7 @@
|
||||
"# now generate the witness file \n",
|
||||
"witness_path = \"gan_witness.json\"\n",
|
||||
"\n",
|
||||
"res = await ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"res = ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"assert os.path.isfile(witness_path)"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -142,7 +142,7 @@
|
||||
"# Serialize data into file:\n",
|
||||
"json.dump(data, open(cal_path, 'w'))\n",
|
||||
"\n",
|
||||
"res = await ezkl.calibrate_settings(data_path, model_path, settings_path, \"resources\")\n",
|
||||
"res = ezkl.calibrate_settings(data_path, model_path, settings_path, \"resources\")\n",
|
||||
"assert res == True\n"
|
||||
]
|
||||
},
|
||||
@@ -177,7 +177,7 @@
|
||||
"source": [
|
||||
"# now generate the witness file \n",
|
||||
"\n",
|
||||
"res = await ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"res = ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"assert os.path.isfile(witness_path)"
|
||||
]
|
||||
},
|
||||
@@ -276,4 +276,4 @@
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
}
|
||||
@@ -139,7 +139,7 @@
|
||||
"res = ezkl.gen_settings(model_path, settings_path, py_run_args=run_args)\n",
|
||||
"assert res == True\n",
|
||||
"\n",
|
||||
"res = await ezkl.calibrate_settings(data_path, model_path, settings_path, \"resources\")\n",
|
||||
"res = ezkl.calibrate_settings(data_path, model_path, settings_path, \"resources\")\n",
|
||||
"assert res == True\n"
|
||||
]
|
||||
},
|
||||
@@ -193,7 +193,7 @@
|
||||
"# now generate the witness file \n",
|
||||
"witness_path = \"lstmwitness.json\"\n",
|
||||
"\n",
|
||||
"res = await ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"res = ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"assert os.path.isfile(witness_path)"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -323,7 +323,7 @@
|
||||
"res = ezkl.gen_settings(model_path, settings_path, py_run_args=run_args)\n",
|
||||
"assert res == True\n",
|
||||
"\n",
|
||||
"res = await ezkl.calibrate_settings(cal_path, model_path, settings_path, \"resources\", scales=[2,7])\n",
|
||||
"res = ezkl.calibrate_settings(cal_path, model_path, settings_path, \"resources\", scales=[2,7])\n",
|
||||
"assert res == True"
|
||||
]
|
||||
},
|
||||
@@ -362,7 +362,7 @@
|
||||
"# now generate the witness file\n",
|
||||
"witness_path = \"witness.json\"\n",
|
||||
"\n",
|
||||
"res = await ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"res = ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"assert os.path.isfile(witness_path)"
|
||||
]
|
||||
},
|
||||
@@ -558,4 +558,4 @@
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 0
|
||||
}
|
||||
}
|
||||
@@ -289,7 +289,7 @@
|
||||
"json.dump(data, open(cal_path, 'w'))\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"await ezkl.calibrate_settings(cal_path, model_path, settings_path, \"resources\", scales=[0,6])"
|
||||
"ezkl.calibrate_settings(cal_path, model_path, settings_path, \"resources\", scales=[0,6])"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -321,7 +321,7 @@
|
||||
"# now generate the witness file \n",
|
||||
"witness_path = \"gan_witness.json\"\n",
|
||||
"\n",
|
||||
"res = await ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"res = ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"assert os.path.isfile(witness_path)"
|
||||
]
|
||||
},
|
||||
@@ -425,4 +425,4 @@
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
}
|
||||
@@ -341,7 +341,7 @@
|
||||
"\n",
|
||||
" # generate settings for the current model\n",
|
||||
" res = ezkl.gen_settings(model_path, settings_path, py_run_args=run_args)\n",
|
||||
" res = await ezkl.calibrate_settings(data_path, model_path, settings_path, \"resources\", scales=[run_args.input_scale], max_logrows=run_args.logrows)\n",
|
||||
" res = ezkl.calibrate_settings(data_path, model_path, settings_path, \"resources\", scales=[run_args.input_scale], max_logrows=run_args.logrows)\n",
|
||||
" assert res == True\n",
|
||||
"\n",
|
||||
" # load settings and print them to the console\n",
|
||||
@@ -361,7 +361,7 @@
|
||||
" assert res == True\n",
|
||||
" assert os.path.isfile(vk_path)\n",
|
||||
" assert os.path.isfile(pk_path)\n",
|
||||
" res = await ezkl.gen_witness(data_path, compiled_model_path, witness_path, vk_path)\n",
|
||||
" res = ezkl.gen_witness(data_path, compiled_model_path, witness_path, vk_path)\n",
|
||||
" run_args.input_scale = settings[\"model_output_scales\"][0]\n",
|
||||
"\n",
|
||||
"for i in range(3):\n",
|
||||
@@ -484,4 +484,4 @@
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
}
|
||||
@@ -215,7 +215,7 @@
|
||||
"json.dump(data, open(cal_path, 'w'))\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"await ezkl.calibrate_settings(cal_path, model_path, settings_path, \"resources\")"
|
||||
"ezkl.calibrate_settings(cal_path, model_path, settings_path, \"resources\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -247,7 +247,7 @@
|
||||
"# now generate the witness file\n",
|
||||
"witness_path = \"ae_witness.json\"\n",
|
||||
"\n",
|
||||
"res = await ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"res = ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"assert os.path.isfile(witness_path)"
|
||||
]
|
||||
},
|
||||
@@ -451,7 +451,7 @@
|
||||
"res = ezkl.gen_settings(model_path, settings_path)\n",
|
||||
"assert res == True\n",
|
||||
"\n",
|
||||
"res = await ezkl.calibrate_settings(data_path, model_path, settings_path, \"resources\")\n",
|
||||
"res = ezkl.calibrate_settings(data_path, model_path, settings_path, \"resources\")\n",
|
||||
"assert res == True\n",
|
||||
"print(\"verified\")"
|
||||
]
|
||||
@@ -485,7 +485,7 @@
|
||||
"# now generate the witness file \n",
|
||||
"witness_path = \"vae_witness.json\"\n",
|
||||
"\n",
|
||||
"res = await ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"res = ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"assert os.path.isfile(witness_path)"
|
||||
]
|
||||
},
|
||||
@@ -590,4 +590,4 @@
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
}
|
||||
@@ -845,7 +845,7 @@
|
||||
"res = ezkl.gen_settings(model_path, settings_path)\n",
|
||||
"assert res == True\n",
|
||||
"\n",
|
||||
"res = await ezkl.calibrate_settings(data_path, model_path, settings_path, \"resources\", max_logrows = 20, scales = [3])\n",
|
||||
"res = ezkl.calibrate_settings(data_path, model_path, settings_path, \"resources\", max_logrows = 20, scales = [3])\n",
|
||||
"assert res == True"
|
||||
]
|
||||
},
|
||||
@@ -881,7 +881,7 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"res = await ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"res = ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"assert os.path.isfile(witness_path)"
|
||||
]
|
||||
},
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -282,7 +282,7 @@
|
||||
"\n",
|
||||
" # generate settings for the current model\n",
|
||||
" res = ezkl.gen_settings(model_path, settings_path, py_run_args=run_args)\n",
|
||||
" res = await ezkl.calibrate_settings(data_path, model_path, settings_path, \"resources\", scales=[run_args.input_scale], max_logrows=run_args.logrows)\n",
|
||||
" res = ezkl.calibrate_settings(data_path, model_path, settings_path, \"resources\", scales=[run_args.input_scale], max_logrows=run_args.logrows)\n",
|
||||
" assert res == True\n",
|
||||
"\n",
|
||||
" # load settings and print them to the console\n",
|
||||
@@ -303,7 +303,7 @@
|
||||
" assert os.path.isfile(vk_path)\n",
|
||||
" assert os.path.isfile(pk_path)\n",
|
||||
"\n",
|
||||
" res = await ezkl.gen_witness(data_path, compiled_model_path, witness_path, vk_path)\n",
|
||||
" res = ezkl.gen_witness(data_path, compiled_model_path, witness_path, vk_path)\n",
|
||||
" run_args.input_scale = settings[\"model_output_scales\"][0]\n",
|
||||
"\n",
|
||||
"for i in range(2):\n",
|
||||
@@ -472,4 +472,4 @@
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
}
|
||||
@@ -176,7 +176,7 @@
|
||||
"json.dump(data, open(cal_path, 'w'))\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"await ezkl.calibrate_settings(cal_path, model_path, settings_path, \"resources\")"
|
||||
"ezkl.calibrate_settings(cal_path, model_path, settings_path, \"resources\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -210,7 +210,7 @@
|
||||
"source": [
|
||||
"# now generate the witness file \n",
|
||||
"\n",
|
||||
"res = await ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"res = ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"assert os.path.isfile(witness_path)"
|
||||
]
|
||||
},
|
||||
@@ -309,4 +309,4 @@
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
}
|
||||
@@ -1,330 +1,336 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Reusable Verifiers \n",
|
||||
"\n",
|
||||
"This notebook demonstrates how to create and reuse the same set of separated verifiers for different models. Specifically, we will use the same verifier for the following four models:\n",
|
||||
"\n",
|
||||
"- `1l_mlp sigmoid`\n",
|
||||
"- `1l_mlp relu`\n",
|
||||
"- `1l_conv sigmoid`\n",
|
||||
"- `1l_conv relu`\n",
|
||||
"\n",
|
||||
"When deploying EZKL verifiers on the blockchain, each associated model typically requires its own unique verifier, leading to increased on-chain state usage. \n",
|
||||
"However, with the reusable verifier, we can deploy a single verifier that can be used to verify proofs for any valid H2 circuit. This notebook shows how to do so. \n",
|
||||
"\n",
|
||||
"By reusing the same verifier across multiple models, we significantly reduce the amount of state bloat on the blockchain. Instead of deploying a unique verifier for each model, we deploy a unique and much smaller verifying key artifact (VKA) contract for each model while sharing a common separated verifier. The VKA contains the VK for the model as well circuit specific metadata that was otherwise hardcoded into the stack of the original non-reusable verifier."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import torch\n",
|
||||
"import torch.nn as nn\n",
|
||||
"import torch.onnx\n",
|
||||
"\n",
|
||||
"# Define the models\n",
|
||||
"class MLP_Sigmoid(nn.Module):\n",
|
||||
" def __init__(self):\n",
|
||||
" super(MLP_Sigmoid, self).__init__()\n",
|
||||
" self.fc = nn.Linear(3, 3)\n",
|
||||
" self.sigmoid = nn.Sigmoid()\n",
|
||||
"\n",
|
||||
" def forward(self, x):\n",
|
||||
" x = self.fc(x)\n",
|
||||
" x = self.sigmoid(x)\n",
|
||||
" return x\n",
|
||||
"\n",
|
||||
"class MLP_Relu(nn.Module):\n",
|
||||
" def __init__(self):\n",
|
||||
" super(MLP_Relu, self).__init__()\n",
|
||||
" self.fc = nn.Linear(3, 3)\n",
|
||||
" self.relu = nn.ReLU()\n",
|
||||
"\n",
|
||||
" def forward(self, x):\n",
|
||||
" x = self.fc(x)\n",
|
||||
" x = self.relu(x)\n",
|
||||
" return x\n",
|
||||
"\n",
|
||||
"class Conv_Sigmoid(nn.Module):\n",
|
||||
" def __init__(self):\n",
|
||||
" super(Conv_Sigmoid, self).__init__()\n",
|
||||
" self.conv = nn.Conv1d(1, 1, kernel_size=3, stride=1)\n",
|
||||
" self.sigmoid = nn.Sigmoid()\n",
|
||||
"\n",
|
||||
" def forward(self, x):\n",
|
||||
" x = self.conv(x)\n",
|
||||
" x = self.sigmoid(x)\n",
|
||||
" return x\n",
|
||||
"\n",
|
||||
"class Conv_Relu(nn.Module):\n",
|
||||
" def __init__(self):\n",
|
||||
" super(Conv_Relu, self).__init__()\n",
|
||||
" self.conv = nn.Conv1d(1, 1, kernel_size=3, stride=1)\n",
|
||||
" self.relu = nn.ReLU()\n",
|
||||
"\n",
|
||||
" def forward(self, x):\n",
|
||||
" x = self.conv(x)\n",
|
||||
" x = self.relu(x)\n",
|
||||
" return x\n",
|
||||
"\n",
|
||||
"# Instantiate the models\n",
|
||||
"mlp_sigmoid = MLP_Sigmoid()\n",
|
||||
"mlp_relu = MLP_Relu()\n",
|
||||
"conv_sigmoid = Conv_Sigmoid()\n",
|
||||
"conv_relu = Conv_Relu()\n",
|
||||
"\n",
|
||||
"# Dummy input tensor for mlp\n",
|
||||
"dummy_input_mlp = torch.tensor([[-1.5737053155899048, -1.708398461341858, 0.19544155895709991]])\n",
|
||||
"input_mlp_path = 'mlp_input.json'\n",
|
||||
"\n",
|
||||
"# Dummy input tensor for conv\n",
|
||||
"dummy_input_conv = torch.tensor([[[1.4124163389205933, 0.6938204169273376, 1.0664031505584717]]])\n",
|
||||
"input_conv_path = 'conv_input.json'"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"names = ['mlp_sigmoid', 'mlp_relu', 'conv_sigmoid', 'conv_relu']\n",
|
||||
"models = [mlp_sigmoid, mlp_relu, conv_sigmoid, conv_relu]\n",
|
||||
"inputs = [dummy_input_mlp, dummy_input_mlp, dummy_input_conv, dummy_input_conv]\n",
|
||||
"input_paths = [input_mlp_path, input_mlp_path, input_conv_path, input_conv_path]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"import json\n",
|
||||
"import torch\n",
|
||||
"import ezkl\n",
|
||||
"\n",
|
||||
"for name, model, x, input_path in zip(names, models, inputs, input_paths):\n",
|
||||
" # Create a new directory for the model if it doesn't exist\n",
|
||||
" if not os.path.exists(name):\n",
|
||||
" os.mkdir(name)\n",
|
||||
" # Store the paths in each of their respective directories\n",
|
||||
" model_path = os.path.join(name, \"network.onnx\")\n",
|
||||
" compiled_model_path = os.path.join(name, \"network.compiled\")\n",
|
||||
" pk_path = os.path.join(name, \"test.pk\")\n",
|
||||
" vk_path = os.path.join(name, \"test.vk\")\n",
|
||||
" settings_path = os.path.join(name, \"settings.json\")\n",
|
||||
"\n",
|
||||
" witness_path = os.path.join(name, \"witness.json\")\n",
|
||||
" sol_code_path = os.path.join(name, 'test.sol')\n",
|
||||
" sol_key_code_path = os.path.join(name, 'test_key.sol')\n",
|
||||
" abi_path = os.path.join(name, 'test.abi')\n",
|
||||
" proof_path = os.path.join(name, \"proof.json\")\n",
|
||||
"\n",
|
||||
" # Flips the neural net into inference mode\n",
|
||||
" model.eval()\n",
|
||||
"\n",
|
||||
" # Export the model\n",
|
||||
" torch.onnx.export(model, x, model_path, export_params=True, opset_version=10,\n",
|
||||
" do_constant_folding=True, input_names=['input'],\n",
|
||||
" output_names=['output'], dynamic_axes={'input': {0: 'batch_size'},\n",
|
||||
" 'output': {0: 'batch_size'}})\n",
|
||||
"\n",
|
||||
" data_array = ((x).detach().numpy()).reshape([-1]).tolist()\n",
|
||||
" data = dict(input_data=[data_array])\n",
|
||||
" json.dump(data, open(input_path, 'w'))\n",
|
||||
"\n",
|
||||
" py_run_args = ezkl.PyRunArgs()\n",
|
||||
" py_run_args.input_visibility = \"private\"\n",
|
||||
" py_run_args.output_visibility = \"public\"\n",
|
||||
" py_run_args.param_visibility = \"fixed\" # private by default\n",
|
||||
"\n",
|
||||
" res = ezkl.gen_settings(model_path, settings_path, py_run_args=py_run_args)\n",
|
||||
" assert res == True\n",
|
||||
"\n",
|
||||
" await ezkl.calibrate_settings(input_path, model_path, settings_path, \"resources\")\n",
|
||||
"\n",
|
||||
" res = ezkl.compile_circuit(model_path, compiled_model_path, settings_path)\n",
|
||||
" assert res == True\n",
|
||||
"\n",
|
||||
" res = await ezkl.get_srs(settings_path)\n",
|
||||
" assert res == True\n",
|
||||
"\n",
|
||||
" # now generate the witness file\n",
|
||||
" res = await ezkl.gen_witness(input_path, compiled_model_path, witness_path)\n",
|
||||
" assert os.path.isfile(witness_path) == True\n",
|
||||
"\n",
|
||||
" # SETUP \n",
|
||||
" # We recommend disabling selector compression for the setup as it decreases the size of the VK artifact\n",
|
||||
" res = ezkl.setup(compiled_model_path, vk_path, pk_path, disable_selector_compression=True)\n",
|
||||
" assert res == True\n",
|
||||
" assert os.path.isfile(vk_path)\n",
|
||||
" assert os.path.isfile(pk_path)\n",
|
||||
" assert os.path.isfile(settings_path)\n",
|
||||
"\n",
|
||||
" # GENERATE A PROOF\n",
|
||||
" res = ezkl.prove(witness_path, compiled_model_path, pk_path, proof_path, \"single\")\n",
|
||||
" assert os.path.isfile(proof_path)\n",
|
||||
"\n",
|
||||
" res = await ezkl.create_evm_verifier(vk_path, settings_path, sol_code_path, abi_path, reusable=True)\n",
|
||||
" assert res == True\n",
|
||||
"\n",
|
||||
" res = await ezkl.create_evm_vka(vk_path, settings_path, sol_key_code_path, abi_path)\n",
|
||||
" assert res == True\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import subprocess\n",
|
||||
"import time\n",
|
||||
"\n",
|
||||
"# make sure anvil is running locally\n",
|
||||
"# $ anvil -p 3030\n",
|
||||
"\n",
|
||||
"RPC_URL = \"http://localhost:3030\"\n",
|
||||
"\n",
|
||||
"# Save process globally\n",
|
||||
"anvil_process = None\n",
|
||||
"\n",
|
||||
"def start_anvil():\n",
|
||||
" global anvil_process\n",
|
||||
" if anvil_process is None:\n",
|
||||
" anvil_process = subprocess.Popen([\"anvil\", \"-p\", \"3030\", \"--code-size-limit=41943040\"])\n",
|
||||
" if anvil_process.returncode is not None:\n",
|
||||
" raise Exception(\"failed to start anvil process\")\n",
|
||||
" time.sleep(3)\n",
|
||||
"\n",
|
||||
"def stop_anvil():\n",
|
||||
" global anvil_process\n",
|
||||
" if anvil_process is not None:\n",
|
||||
" anvil_process.terminate()\n",
|
||||
" anvil_process = None\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Check that the generated verifiers are identical for all models."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import filecmp\n",
|
||||
"\n",
|
||||
"def compare_files(file1, file2):\n",
|
||||
" return filecmp.cmp(file1, file2, shallow=False)\n",
|
||||
"\n",
|
||||
"sol_code_path_0 = os.path.join(\"mlp_sigmoid\", 'test.sol')\n",
|
||||
"sol_code_path_1 = os.path.join(\"mlp_relu\", 'test.sol')\n",
|
||||
"\n",
|
||||
"sol_code_path_2 = os.path.join(\"conv_sigmoid\", 'test.sol')\n",
|
||||
"sol_code_path_3 = os.path.join(\"conv_relu\", 'test.sol')\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"assert compare_files(sol_code_path_0, sol_code_path_1) == True\n",
|
||||
"assert compare_files(sol_code_path_2, sol_code_path_3) == True"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Here we deploy separate verifier that will be shared by the four models. We picked the `1l_mlp sigmoid` model as an example but you could have used any of the generated verifiers since they are all identical. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os \n",
|
||||
"addr_path_verifier = \"addr_verifier.txt\"\n",
|
||||
"sol_code_path = os.path.join(\"mlp_sigmoid\", 'test.sol')\n",
|
||||
"\n",
|
||||
"res = await ezkl.deploy_evm(\n",
|
||||
" addr_path_verifier,\n",
|
||||
" 'http://127.0.0.1:3030',\n",
|
||||
" sol_code_path,\n",
|
||||
" \"verifier/reusable\"\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"assert res == True\n",
|
||||
"\n",
|
||||
"with open(addr_path_verifier, 'r') as file:\n",
|
||||
" addr = file.read().rstrip()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Finally we deploy each of the unique VK-artifacts and verify them using the shared verifier deployed in the previous step."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"for name in names:\n",
|
||||
" addr_path_vk = \"addr_vk.txt\"\n",
|
||||
" sol_key_code_path = os.path.join(name, 'test_key.sol')\n",
|
||||
" res = await ezkl.deploy_evm(addr_path_vk, 'http://127.0.0.1:3030', sol_key_code_path, \"vka\")\n",
|
||||
" assert res == True\n",
|
||||
"\n",
|
||||
" with open(addr_path_vk, 'r') as file:\n",
|
||||
" addr_vk = file.read().rstrip()\n",
|
||||
" \n",
|
||||
" proof_path = os.path.join(name, \"proof.json\")\n",
|
||||
" sol_code_path = os.path.join(name, 'vk.sol')\n",
|
||||
" res = await ezkl.verify_evm(\n",
|
||||
" addr,\n",
|
||||
" \"http://127.0.0.1:3030\",\n",
|
||||
" proof_path,\n",
|
||||
" addr_vk = addr_vk\n",
|
||||
" )\n",
|
||||
" assert res == True"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": ".env",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.5"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Reusable Verifiers \n",
|
||||
"\n",
|
||||
"TODO: Update the reusable verifier solidity contract name.. Make it less generic to H2 and more bespoke to us.\n",
|
||||
"\n",
|
||||
"This notebook demonstrates how to create and reuse the same set of separated verifiers for different models. Specifically, we will use the same verifier for the following four models:\n",
|
||||
"\n",
|
||||
"- `1l_mlp sigmoid`\n",
|
||||
"- `1l_mlp relu`\n",
|
||||
"- `1l_conv sigmoid`\n",
|
||||
"- `1l_conv relu`\n",
|
||||
"\n",
|
||||
"When deploying EZKL verifiers on the blockchain, each associated model typically requires its own unique verifier, leading to increased on-chain state usage. \n",
|
||||
"However, with the reusable verifier, we can deploy a single verifier that can be used to verify proofs for any valid H2 circuit. This notebook shows how to do so. \n",
|
||||
"\n",
|
||||
"By reusing the same verifier across multiple models, we significantly reduce the amount of state bloat on the blockchain. Instead of deploying a unique verifier for each model, we register a unique and much smaller verifying key artifact (VKA) on the reusable verifier contract for each model while sharing a common separated verifier. The VKA contains the VK for the model as well circuit specific metadata that was otherwise hardcoded into the stack of the original non-reusable verifier. The VKA is passed as a parameter to the verifyProof method. This VKA calldata needs to be d with the reusable verifier before it can start verifying proofs by calling the registerVKA method. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import torch\n",
|
||||
"import torch.nn as nn\n",
|
||||
"import torch.onnx\n",
|
||||
"\n",
|
||||
"# Define the models\n",
|
||||
"class MLP_Sigmoid(nn.Module):\n",
|
||||
" def __init__(self):\n",
|
||||
" super(MLP_Sigmoid, self).__init__()\n",
|
||||
" self.fc = nn.Linear(3, 3)\n",
|
||||
" self.sigmoid = nn.Sigmoid()\n",
|
||||
"\n",
|
||||
" def forward(self, x):\n",
|
||||
" x = self.fc(x)\n",
|
||||
" x = self.sigmoid(x)\n",
|
||||
" return x\n",
|
||||
"\n",
|
||||
"class MLP_Relu(nn.Module):\n",
|
||||
" def __init__(self):\n",
|
||||
" super(MLP_Relu, self).__init__()\n",
|
||||
" self.fc = nn.Linear(3, 3)\n",
|
||||
" self.relu = nn.ReLU()\n",
|
||||
"\n",
|
||||
" def forward(self, x):\n",
|
||||
" x = self.fc(x)\n",
|
||||
" x = self.relu(x)\n",
|
||||
" return x\n",
|
||||
"\n",
|
||||
"class Conv_Sigmoid(nn.Module):\n",
|
||||
" def __init__(self):\n",
|
||||
" super(Conv_Sigmoid, self).__init__()\n",
|
||||
" self.conv = nn.Conv1d(1, 1, kernel_size=3, stride=1)\n",
|
||||
" self.sigmoid = nn.Sigmoid()\n",
|
||||
"\n",
|
||||
" def forward(self, x):\n",
|
||||
" x = self.conv(x)\n",
|
||||
" x = self.sigmoid(x)\n",
|
||||
" return x\n",
|
||||
"\n",
|
||||
"class Conv_Relu(nn.Module):\n",
|
||||
" def __init__(self):\n",
|
||||
" super(Conv_Relu, self).__init__()\n",
|
||||
" self.conv = nn.Conv1d(1, 1, kernel_size=3, stride=1)\n",
|
||||
" self.relu = nn.ReLU()\n",
|
||||
"\n",
|
||||
" def forward(self, x):\n",
|
||||
" x = self.conv(x)\n",
|
||||
" x = self.relu(x)\n",
|
||||
" return x\n",
|
||||
"\n",
|
||||
"# Instantiate the models\n",
|
||||
"mlp_sigmoid = MLP_Sigmoid()\n",
|
||||
"mlp_relu = MLP_Relu()\n",
|
||||
"conv_sigmoid = Conv_Sigmoid()\n",
|
||||
"conv_relu = Conv_Relu()\n",
|
||||
"\n",
|
||||
"# Dummy input tensor for mlp\n",
|
||||
"dummy_input_mlp = torch.tensor([[-1.5737053155899048, -1.708398461341858, 0.19544155895709991]])\n",
|
||||
"input_mlp_path = 'mlp_input.json'\n",
|
||||
"\n",
|
||||
"# Dummy input tensor for conv\n",
|
||||
"dummy_input_conv = torch.tensor([[[1.4124163389205933, 0.6938204169273376, 1.0664031505584717]]])\n",
|
||||
"input_conv_path = 'conv_input.json'"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"names = ['mlp_sigmoid', 'mlp_relu', 'conv_sigmoid', 'conv_relu']\n",
|
||||
"models = [mlp_sigmoid, mlp_relu, conv_sigmoid, conv_relu]\n",
|
||||
"inputs = [dummy_input_mlp, dummy_input_mlp, dummy_input_conv, dummy_input_conv]\n",
|
||||
"input_paths = [input_mlp_path, input_mlp_path, input_conv_path, input_conv_path]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"import json\n",
|
||||
"import torch\n",
|
||||
"import ezkl\n",
|
||||
"\n",
|
||||
"for name, model, x, input_path in zip(names, models, inputs, input_paths):\n",
|
||||
" # Create a new directory for the model if it doesn't exist\n",
|
||||
" if not os.path.exists(name):\n",
|
||||
" os.mkdir(name)\n",
|
||||
" # Store the paths in each of their respective directories\n",
|
||||
" model_path = os.path.join(name, \"network.onnx\")\n",
|
||||
" compiled_model_path = os.path.join(name, \"network.compiled\")\n",
|
||||
" pk_path = os.path.join(name, \"test.pk\")\n",
|
||||
" vk_path = os.path.join(name, \"test.vk\")\n",
|
||||
" settings_path = os.path.join(name, \"settings.json\")\n",
|
||||
"\n",
|
||||
" witness_path = os.path.join(name, \"witness.json\")\n",
|
||||
" sol_code_path = os.path.join(name, 'test.sol')\n",
|
||||
" vka_path = os.path.join(name, 'vka.bytes')\n",
|
||||
" abi_path = os.path.join(name, 'test.abi')\n",
|
||||
" proof_path = os.path.join(name, \"proof.json\")\n",
|
||||
"\n",
|
||||
" # Flips the neural net into inference mode\n",
|
||||
" model.eval()\n",
|
||||
"\n",
|
||||
" # Export the model\n",
|
||||
" torch.onnx.export(model, x, model_path, export_params=True, opset_version=10,\n",
|
||||
" do_constant_folding=True, input_names=['input'],\n",
|
||||
" output_names=['output'], dynamic_axes={'input': {0: 'batch_size'},\n",
|
||||
" 'output': {0: 'batch_size'}})\n",
|
||||
"\n",
|
||||
" data_array = ((x).detach().numpy()).reshape([-1]).tolist()\n",
|
||||
" data = dict(input_data=[data_array])\n",
|
||||
" json.dump(data, open(input_path, 'w'))\n",
|
||||
"\n",
|
||||
" py_run_args = ezkl.PyRunArgs()\n",
|
||||
" py_run_args.input_visibility = \"private\"\n",
|
||||
" py_run_args.output_visibility = \"public\"\n",
|
||||
" py_run_args.param_visibility = \"fixed\" # private by default\n",
|
||||
"\n",
|
||||
" res = ezkl.gen_settings(model_path, settings_path, py_run_args=py_run_args)\n",
|
||||
" assert res == True\n",
|
||||
"\n",
|
||||
" ezkl.calibrate_settings(input_path, model_path, settings_path, \"resources\")\n",
|
||||
"\n",
|
||||
" res = ezkl.compile_circuit(model_path, compiled_model_path, settings_path)\n",
|
||||
" assert res == True\n",
|
||||
"\n",
|
||||
" res = await ezkl.get_srs(settings_path)\n",
|
||||
" assert res == True\n",
|
||||
"\n",
|
||||
" # now generate the witness file\n",
|
||||
" res = ezkl.gen_witness(input_path, compiled_model_path, witness_path)\n",
|
||||
" assert os.path.isfile(witness_path) == True\n",
|
||||
"\n",
|
||||
" # SETUP \n",
|
||||
" # We recommend disabling selector compression for the setup as it decreases the size of the VK artifact\n",
|
||||
" res = ezkl.setup(compiled_model_path, vk_path, pk_path, disable_selector_compression=True)\n",
|
||||
" assert res == True\n",
|
||||
" assert os.path.isfile(vk_path)\n",
|
||||
" assert os.path.isfile(pk_path)\n",
|
||||
" assert os.path.isfile(settings_path)\n",
|
||||
"\n",
|
||||
" # GENERATE A PROOF\n",
|
||||
" res = ezkl.prove(witness_path, compiled_model_path, pk_path, proof_path, \"single\")\n",
|
||||
" assert os.path.isfile(proof_path)\n",
|
||||
"\n",
|
||||
" res = await ezkl.create_evm_verifier(vk_path, settings_path, sol_code_path, abi_path, reusable=True)\n",
|
||||
" # TODO: Add a flag force equals true to in the deprication process to preserve OG single purpose verifier?\n",
|
||||
" assert res == True\n",
|
||||
"\n",
|
||||
" # TODO: \n",
|
||||
" res = await ezkl.create_evm_vka(vk_path, settings_path, vka_path, decimals=18)\n",
|
||||
" assert res == True\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import subprocess\n",
|
||||
"import time\n",
|
||||
"\n",
|
||||
"# make sure anvil is running locally\n",
|
||||
"# $ anvil -p 3030\n",
|
||||
"\n",
|
||||
"RPC_URL = \"http://localhost:3030\"\n",
|
||||
"\n",
|
||||
"# Save process globally\n",
|
||||
"anvil_process = None\n",
|
||||
"\n",
|
||||
"def start_anvil():\n",
|
||||
" global anvil_process\n",
|
||||
" if anvil_process is None:\n",
|
||||
" anvil_process = subprocess.Popen([\"anvil\", \"-p\", \"3030\", \"--code-size-limit=41943040\"])\n",
|
||||
" if anvil_process.returncode is not None:\n",
|
||||
" raise Exception(\"failed to start anvil process\")\n",
|
||||
" time.sleep(3)\n",
|
||||
"\n",
|
||||
"def stop_anvil():\n",
|
||||
" global anvil_process\n",
|
||||
" if anvil_process is not None:\n",
|
||||
" anvil_process.terminate()\n",
|
||||
" anvil_process = None\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Check that the generated verifiers are identical for all models."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import filecmp\n",
|
||||
"\n",
|
||||
"def compare_files(file1, file2):\n",
|
||||
" return filecmp.cmp(file1, file2, shallow=False)\n",
|
||||
"\n",
|
||||
"sol_code_path_0 = os.path.join(\"mlp_sigmoid\", 'test.sol')\n",
|
||||
"sol_code_path_1 = os.path.join(\"mlp_relu\", 'test.sol')\n",
|
||||
"\n",
|
||||
"sol_code_path_2 = os.path.join(\"conv_sigmoid\", 'test.sol')\n",
|
||||
"sol_code_path_3 = os.path.join(\"conv_relu\", 'test.sol')\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"assert compare_files(sol_code_path_0, sol_code_path_1) == True\n",
|
||||
"assert compare_files(sol_code_path_2, sol_code_path_3) == True"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Here we deploy reusable verifier that will be shared by the four models. We picked the `1l_mlp sigmoid` model as an example but you could have used any of the generated verifiers since they are all identical. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os \n",
|
||||
"addr_path_verifier = \"addr_verifier.txt\"\n",
|
||||
"sol_code_path = os.path.join(\"mlp_sigmoid\", 'test.sol')\n",
|
||||
"\n",
|
||||
"res = await ezkl.deploy_evm(\n",
|
||||
" addr_path_verifier,\n",
|
||||
" 'http://127.0.0.1:3030',\n",
|
||||
" sol_code_path,\n",
|
||||
" \"verifier/reusable\" # TODO deprecate this option for selecting the type of verifier you want to deploy. \n",
|
||||
" # verifier, verifier/reusable, vka\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"assert res == True\n",
|
||||
"\n",
|
||||
"with open(addr_path_verifier, 'r') as file:\n",
|
||||
" addr = file.read().rstrip()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Finally we deploy each of the unique VK-artifacts and verify them using the shared verifier deployed in the previous step."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"for name in names:\n",
|
||||
" addr_path_vk = \"addr_vk.txt\"\n",
|
||||
" vka_path = os.path.join(name, 'vka.bytes')\n",
|
||||
" res = await ezkl.register_vka(\n",
|
||||
" addr, # address of the reusable verifier. TODO: If we deploy the RV across all chains to a single canoncial address, we can hardcode that address and remove this param.\n",
|
||||
" 'http://127.0.0.1:3030',\n",
|
||||
" vka_path=vka_path, # TODO: Pass in private key and potentially create new command that both creates and registers the vka. Simplify testing pipeline for us and other folks. \n",
|
||||
" )\n",
|
||||
" assert res == True\n",
|
||||
" \n",
|
||||
" proof_path = os.path.join(name, \"proof.json\")\n",
|
||||
" res = await ezkl.verify_evm(\n",
|
||||
" addr,\n",
|
||||
" \"http://127.0.0.1:3030\",\n",
|
||||
" proof_path,\n",
|
||||
" vka_path = vka_path # TODO: Turn this from optional to required if we deprecate the orignal verifier. \n",
|
||||
" # TODO: Make it where the use only needs to deply a vka. \n",
|
||||
" )\n",
|
||||
" assert res == True"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": ".env",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.5"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
|
||||
@@ -231,7 +231,7 @@
|
||||
"source": [
|
||||
"# now generate the witness file\n",
|
||||
"\n",
|
||||
"res = await ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"res = ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"assert os.path.isfile(witness_path)"
|
||||
]
|
||||
},
|
||||
@@ -267,7 +267,7 @@
|
||||
" # Serialize data into file:\n",
|
||||
"json.dump( data, open(data_path_faulty, 'w' ))\n",
|
||||
"\n",
|
||||
"res = await ezkl.gen_witness(data_path_faulty, compiled_model_path, witness_path_faulty)\n",
|
||||
"res = ezkl.gen_witness(data_path_faulty, compiled_model_path, witness_path_faulty)\n",
|
||||
"assert os.path.isfile(witness_path_faulty)"
|
||||
]
|
||||
},
|
||||
@@ -312,7 +312,7 @@
|
||||
"# Serialize data into file:\n",
|
||||
"json.dump( data, open(data_path_truthy, 'w' ))\n",
|
||||
"\n",
|
||||
"res = await ezkl.gen_witness(data_path_truthy, compiled_model_path, witness_path_truthy)\n",
|
||||
"res = ezkl.gen_witness(data_path_truthy, compiled_model_path, witness_path_truthy)\n",
|
||||
"assert os.path.isfile(witness_path_truthy)"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -171,7 +171,7 @@
|
||||
"json.dump(data, open(cal_path, 'w'))\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"await ezkl.calibrate_settings(cal_path, model_path, settings_path, \"resources\")"
|
||||
"ezkl.calibrate_settings(cal_path, model_path, settings_path, \"resources\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -205,7 +205,7 @@
|
||||
"source": [
|
||||
"# now generate the witness file \n",
|
||||
"\n",
|
||||
"res = await ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"res = ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"assert os.path.isfile(witness_path)"
|
||||
]
|
||||
},
|
||||
@@ -404,4 +404,4 @@
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
}
|
||||
@@ -171,7 +171,7 @@
|
||||
"json.dump(data, open(cal_path, 'w'))\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"await ezkl.calibrate_settings(cal_path, model_path, settings_path, \"resources\")"
|
||||
"ezkl.calibrate_settings(cal_path, model_path, settings_path, \"resources\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -205,7 +205,7 @@
|
||||
"source": [
|
||||
"# now generate the witness file \n",
|
||||
"\n",
|
||||
"res = await ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"res = ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"assert os.path.isfile(witness_path)"
|
||||
]
|
||||
},
|
||||
@@ -304,4 +304,4 @@
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
}
|
||||
@@ -169,7 +169,7 @@
|
||||
"json.dump(data, open(cal_path, 'w'))\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"await ezkl.calibrate_settings(cal_path, model_path, settings_path, \"resources\")"
|
||||
"ezkl.calibrate_settings(cal_path, model_path, settings_path, \"resources\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -203,7 +203,7 @@
|
||||
"source": [
|
||||
"# now generate the witness file \n",
|
||||
"\n",
|
||||
"res = await ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"res = ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"assert os.path.isfile(witness_path)"
|
||||
]
|
||||
},
|
||||
@@ -302,4 +302,4 @@
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
}
|
||||
@@ -170,7 +170,7 @@
|
||||
"json.dump(data, open(cal_path, 'w'))\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"await ezkl.calibrate_settings(cal_path, model_path, settings_path, \"resources\")"
|
||||
"ezkl.calibrate_settings(cal_path, model_path, settings_path, \"resources\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -204,7 +204,7 @@
|
||||
"source": [
|
||||
"# now generate the witness file \n",
|
||||
"\n",
|
||||
"res = await ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"res = ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"assert os.path.isfile(witness_path)"
|
||||
]
|
||||
},
|
||||
@@ -303,4 +303,4 @@
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
}
|
||||
@@ -149,7 +149,7 @@
|
||||
"json.dump(data, open(cal_path, 'w'))\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"await ezkl.calibrate_settings(cal_path, model_path, settings_path, \"resources\")"
|
||||
"ezkl.calibrate_settings(cal_path, model_path, settings_path, \"resources\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -183,7 +183,7 @@
|
||||
"source": [
|
||||
"# now generate the witness file \n",
|
||||
"\n",
|
||||
"res = await ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"res = ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"assert os.path.isfile(witness_path)"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -298,7 +298,7 @@
|
||||
"\n",
|
||||
"witness_path = \"witness.json\"\n",
|
||||
"\n",
|
||||
"res = await ezkl.gen_witness(data_path, compiled_model_path, witness_path, vk_path)\n",
|
||||
"res = ezkl.gen_witness(data_path, compiled_model_path, witness_path, vk_path)\n",
|
||||
"assert os.path.isfile(witness_path)\n",
|
||||
"\n",
|
||||
"# we force the output to be 1 this corresponds to the solvency test being true -- and we set this to a fixed vis output\n",
|
||||
@@ -412,7 +412,7 @@
|
||||
"source": [
|
||||
"# now generate the witness file\n",
|
||||
"\n",
|
||||
"res = await ezkl.gen_witness(data_path_faulty, compiled_model_path, witness_path, vk_path)\n",
|
||||
"res = ezkl.gen_witness(data_path_faulty, compiled_model_path, witness_path, vk_path)\n",
|
||||
"assert os.path.isfile(witness_path)\n",
|
||||
"\n",
|
||||
"# we force the output to be 1 this corresponds to the solvency test being true -- and we set this to a fixed vis output\n",
|
||||
|
||||
@@ -167,7 +167,7 @@
|
||||
"res = ezkl.gen_settings(model_path, settings_path)\n",
|
||||
"assert res == True\n",
|
||||
"\n",
|
||||
"res = await ezkl.calibrate_settings(data_path, model_path, settings_path, \"resources\")\n",
|
||||
"res = ezkl.calibrate_settings(data_path, model_path, settings_path, \"resources\")\n",
|
||||
"assert res == True"
|
||||
]
|
||||
},
|
||||
@@ -187,7 +187,7 @@
|
||||
"json.dump(data, open(cal_path, 'w'))\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"await ezkl.calibrate_settings(cal_path, model_path, settings_path, \"resources\")"
|
||||
"ezkl.calibrate_settings(cal_path, model_path, settings_path, \"resources\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -221,7 +221,7 @@
|
||||
"source": [
|
||||
"# now generate the witness file \n",
|
||||
"\n",
|
||||
"res = await ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"res = ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"assert os.path.isfile(witness_path)"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -152,7 +152,7 @@
|
||||
"json.dump(data, open(cal_path, 'w'))\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"await ezkl.calibrate_settings(cal_path, model_path, settings_path, \"resources\")"
|
||||
"ezkl.calibrate_settings(cal_path, model_path, settings_path, \"resources\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -186,7 +186,7 @@
|
||||
"source": [
|
||||
"# now generate the witness file \n",
|
||||
"\n",
|
||||
"res = await ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"res = ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"assert os.path.isfile(witness_path)"
|
||||
]
|
||||
},
|
||||
@@ -392,7 +392,7 @@
|
||||
"res = ezkl.gen_settings(model_path, settings_path)\n",
|
||||
"assert res == True\n",
|
||||
"\n",
|
||||
"res = await ezkl.calibrate_settings(data_path, model_path, settings_path, \"resources\")\n",
|
||||
"res = ezkl.calibrate_settings(data_path, model_path, settings_path, \"resources\")\n",
|
||||
"assert res == True"
|
||||
]
|
||||
}
|
||||
@@ -418,4 +418,4 @@
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
}
|
||||
@@ -637,7 +637,7 @@
|
||||
"json.dump(data, open(cal_path, 'w'))\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"await ezkl.calibrate_settings(cal_path, model_path, settings_path, \"resources\", scales = [11])"
|
||||
"ezkl.calibrate_settings(cal_path, model_path, settings_path, \"resources\", scales = [11])"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -683,7 +683,7 @@
|
||||
" data = json.load(f)\n",
|
||||
" print(len(data['input_data'][0]))\n",
|
||||
"\n",
|
||||
"await ezkl.gen_witness(data_path, compiled_model_path, witness_path)"
|
||||
"ezkl.gen_witness(data_path, compiled_model_path, witness_path)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -758,4 +758,4 @@
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 4
|
||||
}
|
||||
}
|
||||
@@ -525,7 +525,7 @@
|
||||
"json.dump(data, open(cal_path, 'w'))\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"await ezkl.calibrate_settings(cal_path, model_path, settings_path, \"resources\", scales = [4])"
|
||||
"ezkl.calibrate_settings(cal_path, model_path, settings_path, \"resources\", scales = [4])"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -572,7 +572,7 @@
|
||||
" data = json.load(f)\n",
|
||||
" print(len(data['input_data'][0]))\n",
|
||||
"\n",
|
||||
"await ezkl.gen_witness(data_path, compiled_model_path, witness_path)"
|
||||
"ezkl.gen_witness(data_path, compiled_model_path, witness_path)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -647,4 +647,4 @@
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 4
|
||||
}
|
||||
}
|
||||
@@ -1,685 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# univ3-da-ezkl\n",
|
||||
"\n",
|
||||
"Here's an example leveraging EZKL whereby the inputs to the model are read and attested to from an on-chain source. For this setup we make a single call to a view function that returns an array of UniV3 historical TWAP price data that we will attest to on-chain. \n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"First we import the necessary dependencies and set up logging to be as informative as possible. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# check if notebook is in colab\n",
|
||||
"try:\n",
|
||||
" # install ezkl\n",
|
||||
" import google.colab\n",
|
||||
" import subprocess\n",
|
||||
" import sys\n",
|
||||
" subprocess.check_call([sys.executable, \"-m\", \"pip\", \"install\", \"ezkl\"])\n",
|
||||
" subprocess.check_call([sys.executable, \"-m\", \"pip\", \"install\", \"onnx\"])\n",
|
||||
"\n",
|
||||
"# rely on local installation of ezkl if the notebook is not in colab\n",
|
||||
"except:\n",
|
||||
" pass\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"from torch import nn\n",
|
||||
"import ezkl\n",
|
||||
"import os\n",
|
||||
"import json\n",
|
||||
"import logging\n",
|
||||
"\n",
|
||||
"# uncomment for more descriptive logging \n",
|
||||
"FORMAT = '%(levelname)s %(name)s %(asctime)-15s %(filename)s:%(lineno)d %(message)s'\n",
|
||||
"logging.basicConfig(format=FORMAT)\n",
|
||||
"logging.getLogger().setLevel(logging.DEBUG)\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Now we define our model. It is a very simple PyTorch model that has just one layer, an average pooling 2D layer. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import torch\n",
|
||||
"# Defines the model\n",
|
||||
"\n",
|
||||
"class MyModel(nn.Module):\n",
|
||||
" def __init__(self):\n",
|
||||
" super(MyModel, self).__init__()\n",
|
||||
" self.layer = nn.AvgPool2d(2, 1, (1, 1))\n",
|
||||
"\n",
|
||||
" def forward(self, x):\n",
|
||||
" return self.layer(x)[0]\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"circuit = MyModel()\n",
|
||||
"\n",
|
||||
"# this is where you'd train your model"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We omit training for purposes of this demonstration. We've marked where training would happen in the cell above. \n",
|
||||
"Now we export the model to onnx and create a corresponding (randomly generated) input. This input data will eventually be stored on chain and read from according to the call_data field in the graph input.\n",
|
||||
"\n",
|
||||
"You can replace the random `x` with real data if you so wish. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"x = 0.1*torch.rand(1,*[3, 2, 2], requires_grad=True)\n",
|
||||
"\n",
|
||||
"# Flips the neural net into inference mode\n",
|
||||
"circuit.eval()\n",
|
||||
"\n",
|
||||
" # Export the model\n",
|
||||
"torch.onnx.export(circuit, # model being run\n",
|
||||
" x, # model input (or a tuple for multiple inputs)\n",
|
||||
" \"network.onnx\", # where to save the model (can be a file or file-like object)\n",
|
||||
" export_params=True, # store the trained parameter weights inside the model file\n",
|
||||
" opset_version=10, # the ONNX version to export the model to\n",
|
||||
" do_constant_folding=True, # whether to execute constant folding for optimization\n",
|
||||
" input_names = ['input'], # the model's input names\n",
|
||||
" output_names = ['output'], # the model's output names\n",
|
||||
" dynamic_axes={'input' : {0 : 'batch_size'}, # variable length axes\n",
|
||||
" 'output' : {0 : 'batch_size'}})\n",
|
||||
"\n",
|
||||
"data_array = ((x).detach().numpy()).reshape([-1]).tolist()\n",
|
||||
"\n",
|
||||
"data = dict(input_data = [data_array])\n",
|
||||
"\n",
|
||||
" # Serialize data into file:\n",
|
||||
"json.dump(data, open(\"input.json\", 'w' ))\n",
|
||||
"\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We now define a function that will create a new anvil instance which we will deploy our test contract too. This contract will contain in its storage the data that we will read from and attest to. In production you would not need to set up a local anvil instance. Instead you would replace RPC_URL with the actual RPC endpoint of the chain you are deploying your verifiers too, reading from the data on said chain."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import subprocess\n",
|
||||
"import time\n",
|
||||
"import threading\n",
|
||||
"\n",
|
||||
"# make sure anvil is running locally\n",
|
||||
"# $ anvil -p 3030\n",
|
||||
"\n",
|
||||
"RPC_URL = \"http://localhost:3030\"\n",
|
||||
"\n",
|
||||
"# Save process globally\n",
|
||||
"anvil_process = None\n",
|
||||
"\n",
|
||||
"def start_anvil():\n",
|
||||
" global anvil_process\n",
|
||||
" if anvil_process is None:\n",
|
||||
" anvil_process = subprocess.Popen([\"anvil\", \"-p\", \"3030\", \"--fork-url\", \"https://arb1.arbitrum.io/rpc\", \"--code-size-limit=41943040\"])\n",
|
||||
" if anvil_process.returncode is not None:\n",
|
||||
" raise Exception(\"failed to start anvil process\")\n",
|
||||
" time.sleep(3)\n",
|
||||
"\n",
|
||||
"def stop_anvil():\n",
|
||||
" global anvil_process\n",
|
||||
" if anvil_process is not None:\n",
|
||||
" anvil_process.terminate()\n",
|
||||
" anvil_process = None\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We define our `PyRunArgs` objects which contains the visibility parameters for out model. \n",
|
||||
"- `input_visibility` defines the visibility of the model inputs\n",
|
||||
"- `param_visibility` defines the visibility of the model weights and constants and parameters \n",
|
||||
"- `output_visibility` defines the visibility of the model outputs\n",
|
||||
"\n",
|
||||
"Here we create the following setup:\n",
|
||||
"- `input_visibility`: \"public\"\n",
|
||||
"- `param_visibility`: \"private\"\n",
|
||||
"- `output_visibility`: public\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import ezkl\n",
|
||||
"\n",
|
||||
"model_path = os.path.join('network.onnx')\n",
|
||||
"compiled_model_path = os.path.join('network.compiled')\n",
|
||||
"pk_path = os.path.join('test.pk')\n",
|
||||
"vk_path = os.path.join('test.vk')\n",
|
||||
"settings_path = os.path.join('settings.json')\n",
|
||||
"srs_path = os.path.join('kzg.srs')\n",
|
||||
"data_path = os.path.join('input.json')\n",
|
||||
"\n",
|
||||
"run_args = ezkl.PyRunArgs()\n",
|
||||
"run_args.input_visibility = \"public\"\n",
|
||||
"run_args.param_visibility = \"private\"\n",
|
||||
"run_args.output_visibility = \"public\"\n",
|
||||
"run_args.decomp_legs=5\n",
|
||||
"run_args.num_inner_cols = 1\n",
|
||||
"run_args.variables = [(\"batch_size\", 1)]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Now we generate a settings file. This file basically instantiates a bunch of parameters that determine their circuit shape, size etc... Because of the way we represent nonlinearities in the circuit (using Halo2's [lookup tables](https://zcash.github.io/halo2/design/proving-system/lookup.html)), it is often best to _calibrate_ this settings file as some data can fall out of range of these lookups.\n",
|
||||
"\n",
|
||||
"You can pass a dataset for calibration that will be representative of real inputs you might find if and when you deploy the prover. Here we create a dummy calibration dataset for demonstration purposes. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# TODO: Dictionary outputs\n",
|
||||
"res = ezkl.gen_settings(model_path, settings_path, py_run_args=run_args)\n",
|
||||
"assert res == True"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# generate a bunch of dummy calibration data\n",
|
||||
"cal_data = {\n",
|
||||
" \"input_data\": [(0.1*torch.rand(2, *[3, 2, 2])).flatten().tolist()],\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"cal_path = os.path.join('val_data.json')\n",
|
||||
"# save as json file\n",
|
||||
"with open(cal_path, \"w\") as f:\n",
|
||||
" json.dump(cal_data, f)\n",
|
||||
"\n",
|
||||
"res = await ezkl.calibrate_settings(cal_path, model_path, settings_path, \"resources\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"res = ezkl.compile_circuit(model_path, compiled_model_path, settings_path)\n",
|
||||
"assert res == True"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The graph input for on chain data sources is formatted completely differently compared to file based data sources.\n",
|
||||
"\n",
|
||||
"- For file data sources, the raw floating point values that eventually get quantized, converted into field elements and stored in `witness.json` to be consumed by the circuit are stored. The output data contains the expected floating point values returned as outputs from running your vanilla pytorch model on the given inputs.\n",
|
||||
"- For on chain data sources, the input_data field contains all the data necessary to read and format the on chain data into something digestable by EZKL (aka field elements :-D). \n",
|
||||
"Here is what the schema for an on-chain data source graph input file should look like for a single call data source:\n",
|
||||
" \n",
|
||||
"```json\n",
|
||||
"{\n",
|
||||
" \"input_data\": {\n",
|
||||
" \"rpc\": \"http://localhost:3030\", // The rpc endpoint of the chain you are deploying your verifier to\n",
|
||||
" \"call\": {\n",
|
||||
" \"call_data\": \"1f3be514000000000000000000000000c6962004f452be9203591991d15f6b388e09e8d00000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000c000000000000000000000000000000000000000000000000000000000000000b000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000009000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000070000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000000500000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000003000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000\", // The abi encoded call data to a view function that returns an array of on-chain data points we are attesting to. \n",
|
||||
" \"decimals\": 0, // The number of decimal places of the large uint256 value. This is our way of representing large wei values as floating points on chain, since the evm only natively supports integer values.\n",
|
||||
" \"address\": \"9A213F53334279C128C37DA962E5472eCD90554f\", // The address of the contract that we are calling to get the data. \n",
|
||||
" \"len\": 12 // The number of data points returned by the view function (the length of the array)\n",
|
||||
" }\n",
|
||||
" }\n",
|
||||
"}\n",
|
||||
"```"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from web3 import Web3, HTTPProvider\n",
|
||||
"from solcx import compile_standard\n",
|
||||
"from decimal import Decimal\n",
|
||||
"import json\n",
|
||||
"import os\n",
|
||||
"import torch\n",
|
||||
"import requests\n",
|
||||
"\n",
|
||||
"def count_decimal_places(num):\n",
|
||||
" num_str = str(num)\n",
|
||||
" if '.' in num_str:\n",
|
||||
" return len(num_str) - 1 - num_str.index('.')\n",
|
||||
" else:\n",
|
||||
" return 0\n",
|
||||
"\n",
|
||||
"w3 = Web3(HTTPProvider(RPC_URL)) \n",
|
||||
"\n",
|
||||
"def on_chain_data(tensor):\n",
|
||||
" data = tensor.view(-1).tolist()\n",
|
||||
" secondsAgo = [len(data) - 1 - i for i in range(len(data))]\n",
|
||||
"\n",
|
||||
" contract_source_code = '''\n",
|
||||
" // SPDX-License-Identifier: MIT\n",
|
||||
" pragma solidity ^0.8.20;\n",
|
||||
"\n",
|
||||
" interface IUniswapV3PoolDerivedState {\n",
|
||||
" function observe(\n",
|
||||
" uint32[] calldata secondsAgos\n",
|
||||
" ) external view returns (\n",
|
||||
" int56[] memory tickCumulatives,\n",
|
||||
" uint160[] memory secondsPerLiquidityCumulativeX128s\n",
|
||||
" );\n",
|
||||
" }\n",
|
||||
"\n",
|
||||
" contract UniTickAttestor {\n",
|
||||
" int256[] private cachedTicks;\n",
|
||||
"\n",
|
||||
" function consult(\n",
|
||||
" IUniswapV3PoolDerivedState pool,\n",
|
||||
" uint32[] memory secondsAgo\n",
|
||||
" ) public view returns (int256[] memory tickCumulatives) {\n",
|
||||
" tickCumulatives = new int256[](secondsAgo.length);\n",
|
||||
" (int56[] memory _ticks,) = pool.observe(secondsAgo);\n",
|
||||
" for (uint256 i = 0; i < secondsAgo.length; i++) {\n",
|
||||
" tickCumulatives[i] = int256(_ticks[i]);\n",
|
||||
" }\n",
|
||||
" }\n",
|
||||
"\n",
|
||||
" function cache_price(\n",
|
||||
" IUniswapV3PoolDerivedState pool,\n",
|
||||
" uint32[] memory secondsAgo\n",
|
||||
" ) public {\n",
|
||||
" (int56[] memory _ticks,) = pool.observe(secondsAgo);\n",
|
||||
" cachedTicks = new int256[](_ticks.length);\n",
|
||||
" for (uint256 i = 0; i < _ticks.length; i++) {\n",
|
||||
" cachedTicks[i] = int256(_ticks[i]);\n",
|
||||
" }\n",
|
||||
" }\n",
|
||||
"\n",
|
||||
" function readPriceCache() public view returns (int256[] memory) {\n",
|
||||
" return cachedTicks;\n",
|
||||
" }\n",
|
||||
" }\n",
|
||||
" '''\n",
|
||||
"\n",
|
||||
" compiled_sol = compile_standard({\n",
|
||||
" \"language\": \"Solidity\",\n",
|
||||
" \"sources\": {\"UniTickAttestor.sol\": {\"content\": contract_source_code}},\n",
|
||||
" \"settings\": {\"outputSelection\": {\"*\": {\"*\": [\"metadata\", \"evm.bytecode\", \"abi\"]}}}\n",
|
||||
" })\n",
|
||||
"\n",
|
||||
" bytecode = compiled_sol['contracts']['UniTickAttestor.sol']['UniTickAttestor']['evm']['bytecode']['object']\n",
|
||||
" abi = json.loads(compiled_sol['contracts']['UniTickAttestor.sol']['UniTickAttestor']['metadata'])['output']['abi']\n",
|
||||
"\n",
|
||||
" # Deploy contract\n",
|
||||
" UniTickAttestor = w3.eth.contract(abi=abi, bytecode=bytecode)\n",
|
||||
" tx_hash = UniTickAttestor.constructor().transact()\n",
|
||||
" tx_receipt = w3.eth.wait_for_transaction_receipt(tx_hash)\n",
|
||||
" contract = w3.eth.contract(address=tx_receipt['contractAddress'], abi=abi)\n",
|
||||
"\n",
|
||||
" # Step 4: Store data via cache_price transaction\n",
|
||||
" tx_hash = contract.functions.cache_price(\n",
|
||||
" \"0xC6962004f452bE9203591991D15f6b388e09E8D0\",\n",
|
||||
" secondsAgo\n",
|
||||
" ).transact()\n",
|
||||
" tx_receipt = w3.eth.wait_for_transaction_receipt(tx_hash)\n",
|
||||
"\n",
|
||||
" # Step 5: Prepare calldata for readPriceCache\n",
|
||||
" call = contract.functions.readPriceCache().build_transaction()\n",
|
||||
" calldata = call['data'][2:]\n",
|
||||
"\n",
|
||||
" # Get stored data\n",
|
||||
" result = contract.functions.readPriceCache().call()\n",
|
||||
" print(f'Cached ticks: {result}')\n",
|
||||
"\n",
|
||||
" decimals = [0] * len(data)\n",
|
||||
"\n",
|
||||
" call_to_account = {\n",
|
||||
" 'call_data': calldata,\n",
|
||||
" 'decimals': decimals,\n",
|
||||
" 'address': contract.address[2:],\n",
|
||||
" }\n",
|
||||
"\n",
|
||||
" return call_to_account\n",
|
||||
"\n",
|
||||
"start_anvil()\n",
|
||||
"call_to_account = on_chain_data(x)\n",
|
||||
"\n",
|
||||
"data = dict(input_data = {'rpc': RPC_URL, 'call': call_to_account })\n",
|
||||
"json.dump(data, open(\"input.json\", 'w'))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"As we use Halo2 with KZG-commitments we need an SRS string from (preferably) a multi-party trusted setup ceremony. For an overview of the procedures for such a ceremony check out [this page](https://blog.ethereum.org/2023/01/16/announcing-kzg-ceremony). The `get_srs` command retrieves a correctly sized SRS given the calibrated settings file from [here](https://github.com/han0110/halo2-kzg-srs). \n",
|
||||
"\n",
|
||||
"These SRS were generated with [this](https://github.com/privacy-scaling-explorations/perpetualpowersoftau) ceremony. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"res = await ezkl.get_srs( settings_path)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We now need to generate the circuit witness. These are the model outputs (and any hashes) that are generated when feeding the previously generated `input.json` through the circuit / model. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# !export RUST_BACKTRACE=1\n",
|
||||
"\n",
|
||||
"witness_path = \"witness.json\"\n",
|
||||
"\n",
|
||||
"res = await ezkl.gen_witness(data_path, compiled_model_path, witness_path)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Here we setup verifying and proving keys for the circuit. As the name suggests the proving key is needed for ... proving and the verifying key is needed for ... verifying. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# HERE WE SETUP THE CIRCUIT PARAMS\n",
|
||||
"# WE GOT KEYS\n",
|
||||
"# WE GOT CIRCUIT PARAMETERS\n",
|
||||
"# EVERYTHING ANYONE HAS EVER NEEDED FOR ZK\n",
|
||||
"res = ezkl.setup(\n",
|
||||
" compiled_model_path,\n",
|
||||
" vk_path,\n",
|
||||
" pk_path,\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
"assert res == True\n",
|
||||
"assert os.path.isfile(vk_path)\n",
|
||||
"assert os.path.isfile(pk_path)\n",
|
||||
"assert os.path.isfile(settings_path)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Now we generate a full proof. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# GENERATE A PROOF\n",
|
||||
"\n",
|
||||
"proof_path = os.path.join('test.pf')\n",
|
||||
"\n",
|
||||
"res = ezkl.prove(\n",
|
||||
" witness_path,\n",
|
||||
" compiled_model_path,\n",
|
||||
" pk_path,\n",
|
||||
" proof_path,\n",
|
||||
" \"single\",\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
"print(res)\n",
|
||||
"assert os.path.isfile(proof_path)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"And verify it as a sanity check. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# VERIFY IT\n",
|
||||
"\n",
|
||||
"res = ezkl.verify(\n",
|
||||
" proof_path,\n",
|
||||
" settings_path,\n",
|
||||
" vk_path,\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
"assert res == True\n",
|
||||
"print(\"verified\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We can now create and then deploy a vanilla evm verifier."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"abi_path = 'test.abi'\n",
|
||||
"sol_code_path = 'test.sol'\n",
|
||||
"\n",
|
||||
"res = await ezkl.create_evm_verifier(\n",
|
||||
" vk_path,\n",
|
||||
" settings_path,\n",
|
||||
" sol_code_path,\n",
|
||||
" abi_path,\n",
|
||||
" )\n",
|
||||
"assert res == True"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import json\n",
|
||||
"\n",
|
||||
"addr_path_verifier = \"addr_verifier.txt\"\n",
|
||||
"\n",
|
||||
"res = await ezkl.deploy_evm(\n",
|
||||
" addr_path_verifier,\n",
|
||||
" 'http://127.0.0.1:3030',\n",
|
||||
" sol_code_path,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"assert res == True"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"With the vanilla verifier deployed, we can now create the data attestation contract, which will read in the instances from the calldata to the verifier, attest to them, call the verifier and then return the result. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"\n",
|
||||
"abi_path = 'test.abi'\n",
|
||||
"sol_code_path = 'test.sol'\n",
|
||||
"input_path = 'input.json'\n",
|
||||
"\n",
|
||||
"res = await ezkl.create_evm_data_attestation(\n",
|
||||
" input_path,\n",
|
||||
" settings_path,\n",
|
||||
" sol_code_path,\n",
|
||||
" abi_path,\n",
|
||||
" )"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Now we can deploy the data attest verifier contract. For security reasons, this binding will only deploy to a local anvil instance, using accounts generated by anvil. \n",
|
||||
"So should only be used for testing purposes."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"addr_path_da = \"addr_da.txt\"\n",
|
||||
"\n",
|
||||
"res = await ezkl.deploy_da_evm(\n",
|
||||
" addr_path_da,\n",
|
||||
" input_path,\n",
|
||||
" RPC_URL,\n",
|
||||
" settings_path,\n",
|
||||
" sol_code_path,\n",
|
||||
" )\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Here we need to regenerate the witness, prove and then verify all within the same cell. This is because we want to reduce the amount of latency between reading on-chain state and verifying it on-chain. This is because the attest input values read from the oracle are time sensitive (their values are derived from computing on block.timestamp) and can change between the time of reading and the time of verifying.\n",
|
||||
"\n",
|
||||
"Call the view only verify method on the contract to verify the proof. Since it is a view function this is safe to use in production since you don't have to pass your private key."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# !export RUST_BACKTRACE=1\n",
|
||||
"\n",
|
||||
"# print(res)\n",
|
||||
"assert os.path.isfile(proof_path)\n",
|
||||
"# read the verifier address\n",
|
||||
"addr_verifier = None\n",
|
||||
"with open(addr_path_verifier, 'r') as f:\n",
|
||||
" addr = f.read()\n",
|
||||
"#read the data attestation address\n",
|
||||
"addr_da = None\n",
|
||||
"with open(addr_path_da, 'r') as f:\n",
|
||||
" addr_da = f.read()\n",
|
||||
"\n",
|
||||
"res = await ezkl.verify_evm(\n",
|
||||
" addr,\n",
|
||||
" RPC_URL,\n",
|
||||
" proof_path,\n",
|
||||
" addr_da,\n",
|
||||
")"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": ".env",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.5"
|
||||
},
|
||||
"orig_nbformat": 4
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
@@ -458,7 +458,7 @@
|
||||
"\n",
|
||||
"\n",
|
||||
"ezkl.gen_settings(onnx_filename, settings_filename)\n",
|
||||
"await ezkl.calibrate_settings(\n",
|
||||
"ezkl.calibrate_settings(\n",
|
||||
" input_filename, onnx_filename, settings_filename, \"resources\", scales = [4])\n",
|
||||
"res = await ezkl.get_srs(settings_filename)\n",
|
||||
"ezkl.compile_circuit(onnx_filename, compiled_filename, settings_filename)\n",
|
||||
@@ -527,7 +527,7 @@
|
||||
"\n",
|
||||
"witness_path = \"witness.json\"\n",
|
||||
"\n",
|
||||
"res = await ezkl.gen_witness(input_filename, compiled_filename, witness_path)\n",
|
||||
"res = ezkl.gen_witness(input_filename, compiled_filename, witness_path)\n",
|
||||
"assert os.path.isfile(witness_path)"
|
||||
]
|
||||
},
|
||||
@@ -762,4 +762,4 @@
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 0
|
||||
}
|
||||
}
|
||||
@@ -629,7 +629,7 @@
|
||||
"source": [
|
||||
"\n",
|
||||
"\n",
|
||||
"res = await ezkl.calibrate_settings(val_data, model_path, settings_path, \"resources\", scales = [4])\n",
|
||||
"res = ezkl.calibrate_settings(val_data, model_path, settings_path, \"resources\", scales = [4])\n",
|
||||
"assert res == True\n",
|
||||
"print(\"verified\")\n"
|
||||
]
|
||||
@@ -680,7 +680,7 @@
|
||||
"\n",
|
||||
"witness_path = \"witness.json\"\n",
|
||||
"\n",
|
||||
"res = await ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"res = ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"assert os.path.isfile(witness_path)"
|
||||
]
|
||||
},
|
||||
@@ -905,4 +905,4 @@
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
}
|
||||
@@ -1,539 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "cf69bb3f-94e6-4dba-92cd-ce08df117d67",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## World rotation\n",
|
||||
"\n",
|
||||
"Here we demonstrate how to use the EZKL package to rotate an on-chain world. \n",
|
||||
"\n",
|
||||
"\n",
|
||||
"> **A typical ZK application flow**. For the shape rotators out there — this is an easily digestible example. A user computes a ZK-proof that they have calculated a valid rotation of a world. They submit this proof to a verifier contract which governs an on-chain world, along with a new set of coordinates, and the world rotation updates. Observe that it’s possible for one player to initiate a *global* change.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "95613ee9",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# check if notebook is in colab\n",
|
||||
"try:\n",
|
||||
" # install ezkl\n",
|
||||
" import google.colab\n",
|
||||
" import subprocess\n",
|
||||
" import sys\n",
|
||||
" subprocess.check_call([sys.executable, \"-m\", \"pip\", \"install\", \"ezkl\"])\n",
|
||||
" subprocess.check_call([sys.executable, \"-m\", \"pip\", \"install\", \"onnx\"])\n",
|
||||
"\n",
|
||||
"# rely on local installation of ezkl if the notebook is not in colab\n",
|
||||
"except:\n",
|
||||
" pass\n",
|
||||
"\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from torch import nn\n",
|
||||
"import ezkl\n",
|
||||
"import os\n",
|
||||
"import json\n",
|
||||
"import torch\n",
|
||||
"import math\n",
|
||||
"\n",
|
||||
"# these are constants for the rotation\n",
|
||||
"phi = torch.tensor(5 * math.pi / 180)\n",
|
||||
"s = torch.sin(phi)\n",
|
||||
"c = torch.cos(phi)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"class RotateStuff(nn.Module):\n",
|
||||
" def __init__(self):\n",
|
||||
" super(RotateStuff, self).__init__()\n",
|
||||
"\n",
|
||||
" # create a rotation matrix -- the matrix is constant and is transposed for convenience\n",
|
||||
" self.rot = torch.stack([torch.stack([c, -s]),\n",
|
||||
" torch.stack([s, c])]).t()\n",
|
||||
"\n",
|
||||
" def forward(self, x):\n",
|
||||
" x_rot = x @ self.rot # same as x_rot = (rot @ x.t()).t() due to rot in O(n) (SO(n) even)\n",
|
||||
" return x_rot\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"circuit = RotateStuff()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"This will showcase the principle directions of rotation by plotting the rotation of a single unit vector."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from matplotlib import pyplot\n",
|
||||
"pyplot.figure(figsize=(3, 3))\n",
|
||||
"pyplot.arrow(0, 0, 1, 0, width=0.02, alpha=0.5)\n",
|
||||
"pyplot.arrow(0, 0, 0, 1, width=0.02, alpha=0.5)\n",
|
||||
"pyplot.arrow(0, 0, circuit.rot[0, 0].item(), circuit.rot[0, 1].item(), width=0.02)\n",
|
||||
"pyplot.arrow(0, 0, circuit.rot[1, 0].item(), circuit.rot[1, 1].item(), width=0.02)\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "b37637c4",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"model_path = os.path.join('network.onnx')\n",
|
||||
"compiled_model_path = os.path.join('network.compiled')\n",
|
||||
"pk_path = os.path.join('test.pk')\n",
|
||||
"vk_path = os.path.join('test.vk')\n",
|
||||
"settings_path = os.path.join('settings.json')\n",
|
||||
"srs_path = os.path.join('kzg.srs')\n",
|
||||
"witness_path = os.path.join('witness.json')\n",
|
||||
"data_path = os.path.join('input.json')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "82db373a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"\n",
|
||||
"\n",
|
||||
"# initial principle vectors for the rotation are as in the plot above\n",
|
||||
"x = torch.tensor([[1, 0], [0, 1]], dtype=torch.float32)\n",
|
||||
"\n",
|
||||
"# Flips the neural net into inference mode\n",
|
||||
"circuit.eval()\n",
|
||||
"\n",
|
||||
" # Export the model\n",
|
||||
"torch.onnx.export(circuit, # model being run\n",
|
||||
" x, # model input (or a tuple for multiple inputs)\n",
|
||||
" model_path, # where to save the model (can be a file or file-like object)\n",
|
||||
" export_params=True, # store the trained parameter weights inside the model file\n",
|
||||
" opset_version=10, # the ONNX version to export the model to\n",
|
||||
" do_constant_folding=True, # whether to execute constant folding for optimization\n",
|
||||
" input_names = ['input'], # the model's input names\n",
|
||||
" output_names = ['output'], # the model's output names\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
"data_array = ((x).detach().numpy()).reshape([-1]).tolist()\n",
|
||||
"\n",
|
||||
"data = dict(input_data = [data_array])\n",
|
||||
"\n",
|
||||
" # Serialize data into file:\n",
|
||||
"json.dump( data, open(data_path, 'w' ))\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### World rotation in 2D on-chain"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"For demo purposes we deploy these coordinates to a contract running locally using Anvil. This creates our on-chain world. We then rotate the world using the EZKL package and submit the proof to the contract. The contract then updates the world rotation. For demo purposes we do this repeatedly, rotating the world by 1 transform each time."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import subprocess\n",
|
||||
"import time\n",
|
||||
"import threading\n",
|
||||
"\n",
|
||||
"# make sure anvil is running locally\n",
|
||||
"# $ anvil -p 3030\n",
|
||||
"\n",
|
||||
"RPC_URL = \"http://localhost:3030\"\n",
|
||||
"\n",
|
||||
"# Save process globally\n",
|
||||
"anvil_process = None\n",
|
||||
"\n",
|
||||
"def start_anvil():\n",
|
||||
" global anvil_process\n",
|
||||
" if anvil_process is None:\n",
|
||||
" anvil_process = subprocess.Popen([\"anvil\", \"-p\", \"3030\", \"--code-size-limit=41943040\"])\n",
|
||||
" if anvil_process.returncode is not None:\n",
|
||||
" raise Exception(\"failed to start anvil process\")\n",
|
||||
" time.sleep(3)\n",
|
||||
"\n",
|
||||
"def stop_anvil():\n",
|
||||
" global anvil_process\n",
|
||||
" if anvil_process is not None:\n",
|
||||
" anvil_process.terminate()\n",
|
||||
" anvil_process = None\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We define our `PyRunArgs` objects which contains the visibility parameters for out model. \n",
|
||||
"- `input_visibility` defines the visibility of the model inputs\n",
|
||||
"- `param_visibility` defines the visibility of the model weights and constants and parameters \n",
|
||||
"- `output_visibility` defines the visibility of the model outputs\n",
|
||||
"\n",
|
||||
"Here we create the following setup:\n",
|
||||
"- `input_visibility`: \"public\"\n",
|
||||
"- `param_visibility`: \"fixed\"\n",
|
||||
"- `output_visibility`: public"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "d5e374a2",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"py_run_args = ezkl.PyRunArgs()\n",
|
||||
"py_run_args.input_visibility = \"public\"\n",
|
||||
"py_run_args.output_visibility = \"public\"\n",
|
||||
"py_run_args.param_visibility = \"private\" # private by default\n",
|
||||
"py_run_args.scale_rebase_multiplier = 10\n",
|
||||
"\n",
|
||||
"res = ezkl.gen_settings(model_path, settings_path, py_run_args=py_run_args)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "3aa4f090",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"res = ezkl.compile_circuit(model_path, compiled_model_path, settings_path)\n",
|
||||
"assert res == True"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We also define a contract that holds out test data. This contract will contain in its storage the data that we will read from and attest to. In production you would not need to set up a local anvil instance. Instead you would replace RPC_URL with the actual RPC endpoint of the chain you are deploying your verifiers too, reading from the data on said chain."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "2007dc77",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"ezkl.setup_test_evm_data(\n",
|
||||
" data_path,\n",
|
||||
" compiled_model_path,\n",
|
||||
" # we write the call data to the same file as the input data\n",
|
||||
" data_path,\n",
|
||||
" input_source=ezkl.PyTestDataSource.OnChain,\n",
|
||||
" output_source=ezkl.PyTestDataSource.File,\n",
|
||||
" rpc_url=RPC_URL)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "ab993958",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"As we use Halo2 with KZG-commitments we need an SRS string from (preferably) a multi-party trusted setup ceremony. For an overview of the procedures for such a ceremony check out [this page](https://blog.ethereum.org/2023/01/16/announcing-kzg-ceremony). The `get_srs` command retrieves a correctly sized SRS given the calibrated settings file from [here](https://github.com/han0110/halo2-kzg-srs). \n",
|
||||
"\n",
|
||||
"These SRS were generated with [this](https://github.com/privacy-scaling-explorations/perpetualpowersoftau) ceremony. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "8b74dcee",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# srs path\n",
|
||||
"res = await ezkl.get_srs( settings_path)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "18c8b7c7",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# now generate the witness file \n",
|
||||
"\n",
|
||||
"witness = await ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"assert os.path.isfile(witness_path)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "ad58432e",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Here we setup verifying and proving keys for the circuit. As the name suggests the proving key is needed for ... proving and the verifying key is needed for ... verifying. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "b1c561a8",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"res = ezkl.setup(\n",
|
||||
" compiled_model_path,\n",
|
||||
" vk_path,\n",
|
||||
" pk_path,\n",
|
||||
" \n",
|
||||
" )\n",
|
||||
"\n",
|
||||
"assert res == True\n",
|
||||
"assert os.path.isfile(vk_path)\n",
|
||||
"assert os.path.isfile(pk_path)\n",
|
||||
"assert os.path.isfile(settings_path)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "1746c8d1",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We can now create an EVM verifier contract from our circuit. This contract will be deployed to the chain we are using. In this case we are using a local anvil instance."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "d1920c0f",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"abi_path = 'test.abi'\n",
|
||||
"sol_code_path = 'test.sol'\n",
|
||||
"\n",
|
||||
"res = await ezkl.create_evm_verifier(\n",
|
||||
" vk_path,\n",
|
||||
" settings_path,\n",
|
||||
" sol_code_path,\n",
|
||||
" abi_path,\n",
|
||||
" )\n",
|
||||
"assert res == True"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "0fd7f22b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import json\n",
|
||||
"\n",
|
||||
"addr_path_verifier = \"addr_verifier.txt\"\n",
|
||||
"\n",
|
||||
"res = await ezkl.deploy_evm(\n",
|
||||
" addr_path_verifier,\n",
|
||||
" 'http://127.0.0.1:3030',\n",
|
||||
" sol_code_path,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"assert res == True"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "9c0dffab",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"With the vanilla verifier deployed, we can now create the data attestation contract, which will read in the instances from the calldata to the verifier, attest to them, call the verifier and then return the result. \n",
|
||||
"\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "c2db14d7",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"abi_path = 'test.abi'\n",
|
||||
"sol_code_path = 'test.sol'\n",
|
||||
"input_path = 'input.json'\n",
|
||||
"\n",
|
||||
"res = await ezkl.create_evm_data_attestation(\n",
|
||||
" input_path,\n",
|
||||
" settings_path,\n",
|
||||
" sol_code_path,\n",
|
||||
" abi_path,\n",
|
||||
" )"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "5a018ba6",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"addr_path_da = \"addr_da.txt\"\n",
|
||||
"\n",
|
||||
"res = await ezkl.deploy_da_evm(\n",
|
||||
" addr_path_da,\n",
|
||||
" input_path,\n",
|
||||
" RPC_URL,\n",
|
||||
" settings_path,\n",
|
||||
" sol_code_path,\n",
|
||||
" )"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "2adad845",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Now we can pull in the data from the contract and calculate a new set of coordinates. We then rotate the world by 1 transform and submit the proof to the contract. The contract could then update the world rotation (logic not inserted here). For demo purposes we do this repeatedly, rotating the world by 1 transform. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "c384cbc8",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# GENERATE A PROOF\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"proof_path = os.path.join('test.pf')\n",
|
||||
"\n",
|
||||
"res = ezkl.prove(\n",
|
||||
" witness_path,\n",
|
||||
" compiled_model_path,\n",
|
||||
" pk_path,\n",
|
||||
" proof_path,\n",
|
||||
" \n",
|
||||
" \"single\",\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
"print(res)\n",
|
||||
"assert os.path.isfile(proof_path)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "90eda56e",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Call the view only verify method on the contract to verify the proof. Since it is a view function this is safe to use in production since you don't have to pass your private key."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "76f00d41",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# read the verifier address\n",
|
||||
"addr_verifier = None\n",
|
||||
"with open(addr_path_verifier, 'r') as f:\n",
|
||||
" addr = f.read()\n",
|
||||
"#read the data attestation address\n",
|
||||
"addr_da = None\n",
|
||||
"with open(addr_path_da, 'r') as f:\n",
|
||||
" addr_da = f.read()\n",
|
||||
"\n",
|
||||
"res = ezkl.verify_evm(\n",
|
||||
" addr,\n",
|
||||
" RPC_URL,\n",
|
||||
" proof_path,\n",
|
||||
" addr_da,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"As a sanity check lets plot the rotations of the unit vectors. We can see that the unit vectors rotate as expected by the output of the circuit. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"witness['outputs'][0][0]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"settings = json.load(open(settings_path, 'r'))\n",
|
||||
"out_scale = settings[\"model_output_scales\"][0]\n",
|
||||
"\n",
|
||||
"from matplotlib import pyplot\n",
|
||||
"pyplot.figure(figsize=(3, 3))\n",
|
||||
"pyplot.arrow(0, 0, 1, 0, width=0.02, alpha=0.5)\n",
|
||||
"pyplot.arrow(0, 0, 0, 1, width=0.02, alpha=0.5)\n",
|
||||
"\n",
|
||||
"arrow_x = ezkl.felt_to_float(witness['outputs'][0][0], out_scale)\n",
|
||||
"arrow_y = ezkl.felt_to_float(witness['outputs'][0][1], out_scale)\n",
|
||||
"pyplot.arrow(0, 0, arrow_x, arrow_y, width=0.02)\n",
|
||||
"arrow_x = ezkl.felt_to_float(witness['outputs'][0][2], out_scale)\n",
|
||||
"arrow_y = ezkl.felt_to_float(witness['outputs'][0][3], out_scale)\n",
|
||||
"pyplot.arrow(0, 0, arrow_x, arrow_y, width=0.02)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": ".env",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.12.9"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -193,7 +193,7 @@
|
||||
"with open(cal_path, \"w\") as f:\n",
|
||||
" json.dump(cal_data, f)\n",
|
||||
"\n",
|
||||
"res = await ezkl.calibrate_settings(cal_path, model_path, settings_path, \"resources\")"
|
||||
"res = ezkl.calibrate_settings(cal_path, model_path, settings_path, \"resources\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -227,7 +227,7 @@
|
||||
"source": [
|
||||
"# now generate the witness file \n",
|
||||
"\n",
|
||||
"res = await ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"res = ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"assert os.path.isfile(witness_path)"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -104,5 +104,5 @@ json.dump(data, open("input.json", 'w'))
|
||||
# ezkl.gen_settings("network.onnx", "settings.json")
|
||||
|
||||
# !RUST_LOG = full
|
||||
# res = await ezkl.calibrate_settings(
|
||||
# res = ezkl.calibrate_settings(
|
||||
# "input.json", "network.onnx", "settings.json", "resources")
|
||||
|
||||
74
ezkl.pyi
74
ezkl.pyi
@@ -160,30 +160,6 @@ def compile_circuit(model:str | os.PathLike | pathlib.Path,compiled_circuit:str
|
||||
"""
|
||||
...
|
||||
|
||||
def create_evm_data_attestation(input_data:str | os.PathLike | pathlib.Path,settings_path:str | os.PathLike | pathlib.Path,sol_code_path:str | os.PathLike | pathlib.Path,abi_path:str | os.PathLike | pathlib.Path,witness_path:typing.Optional[str | os.PathLike | pathlib.Path]) -> typing.Any:
|
||||
r"""
|
||||
Creates an EVM compatible data attestation verifier, you will need solc installed in your environment to run this
|
||||
|
||||
Arguments
|
||||
---------
|
||||
input_data: str
|
||||
The path to the .json data file, which should contain the necessary calldata and account addresses needed to read from all the on-chain view functions that return the data that the network ingests as inputs
|
||||
|
||||
settings_path: str
|
||||
The path to the settings file
|
||||
|
||||
sol_code_path: str
|
||||
The path to the create the solidity verifier
|
||||
|
||||
abi_path: str
|
||||
The path to create the ABI for the solidity verifier
|
||||
|
||||
Returns
|
||||
-------
|
||||
bool
|
||||
"""
|
||||
...
|
||||
|
||||
def create_evm_verifier(vk_path:str | os.PathLike | pathlib.Path,settings_path:str | os.PathLike | pathlib.Path,sol_code_path:str | os.PathLike | pathlib.Path,abi_path:str | os.PathLike | pathlib.Path,srs_path:typing.Optional[str | os.PathLike | pathlib.Path],reusable:bool) -> typing.Any:
|
||||
r"""
|
||||
Creates an EVM compatible verifier, you will need solc installed in your environment to run this
|
||||
@@ -247,7 +223,7 @@ def create_evm_verifier_aggr(aggregation_settings:typing.Sequence[str | os.PathL
|
||||
"""
|
||||
...
|
||||
|
||||
def create_evm_vka(vk_path:str | os.PathLike | pathlib.Path,settings_path:str | os.PathLike | pathlib.Path,sol_code_path:str | os.PathLike | pathlib.Path,abi_path:str | os.PathLike | pathlib.Path,srs_path:typing.Optional[str | os.PathLike | pathlib.Path]) -> typing.Any:
|
||||
def create_evm_vka(vk_path:str | os.PathLike | pathlib.Path,settings_path:str | os.PathLike | pathlib.Path,vka_path:str | os.PathLike | pathlib.Path,srs_path:typing.Optional[str | os.PathLike | pathlib.Path]) -> typing.Any:
|
||||
r"""
|
||||
Creates an Evm VK artifact. This command generated a VK with circuit specific meta data encoding in memory for use by the reusable H2 verifier.
|
||||
This is useful for deploying verifier that were otherwise too big to fit on chain and required aggregation.
|
||||
@@ -260,8 +236,8 @@ def create_evm_vka(vk_path:str | os.PathLike | pathlib.Path,settings_path:str |
|
||||
settings_path: str
|
||||
The path to the settings file
|
||||
|
||||
sol_code_path: str
|
||||
The path to the create the solidity verifying key.
|
||||
vka_path: str
|
||||
The path to the create the vka calldata.
|
||||
|
||||
abi_path: str
|
||||
The path to create the ABI for the solidity verifier
|
||||
@@ -275,12 +251,6 @@ def create_evm_vka(vk_path:str | os.PathLike | pathlib.Path,settings_path:str |
|
||||
"""
|
||||
...
|
||||
|
||||
def deploy_da_evm(addr_path:str | os.PathLike | pathlib.Path,input_data:str | os.PathLike | pathlib.Path,settings_path:str | os.PathLike | pathlib.Path,sol_code_path:str | os.PathLike | pathlib.Path,rpc_url:typing.Optional[str],optimizer_runs:int,private_key:typing.Optional[str]) -> typing.Any:
|
||||
r"""
|
||||
deploys the solidity da verifier
|
||||
"""
|
||||
...
|
||||
|
||||
def deploy_evm(addr_path:str | os.PathLike | pathlib.Path,sol_code_path:str | os.PathLike | pathlib.Path,rpc_url:typing.Optional[str],contract_type:str,optimizer_runs:int,private_key:typing.Optional[str]) -> typing.Any:
|
||||
r"""
|
||||
deploys the solidity verifier
|
||||
@@ -706,35 +676,6 @@ def setup_aggregate(sample_snarks:typing.Sequence[str | os.PathLike | pathlib.Pa
|
||||
"""
|
||||
...
|
||||
|
||||
def setup_test_evm_data(data_path:str | os.PathLike | pathlib.Path,compiled_circuit_path:str | os.PathLike | pathlib.Path,test_data:str | os.PathLike | pathlib.Path,input_source:PyTestDataSource,output_source:PyTestDataSource,rpc_url:typing.Optional[str]) -> typing.Any:
|
||||
r"""
|
||||
Setup test evm data
|
||||
|
||||
Arguments
|
||||
---------
|
||||
data_path: str
|
||||
The path to the .json data file, which should include both the network input (possibly private) and the network output (public input to the proof)
|
||||
|
||||
compiled_circuit_path: str
|
||||
The path to the compiled model file (generated using the compile-circuit command)
|
||||
|
||||
test_data: str
|
||||
For testing purposes only. The optional path to the .json data file that will be generated that contains the OnChain data storage information derived from the file information in the data .json file. Should include both the network input (possibly private) and the network output (public input to the proof)
|
||||
|
||||
input_sources: str
|
||||
Where the input data comes from
|
||||
|
||||
output_source: str
|
||||
Where the output data comes from
|
||||
|
||||
rpc_url: str
|
||||
RPC URL for an EVM compatible node, if None, uses Anvil as a local RPC node
|
||||
|
||||
Returns
|
||||
-------
|
||||
bool
|
||||
"""
|
||||
...
|
||||
|
||||
def swap_proof_commitments(proof_path:str | os.PathLike | pathlib.Path,witness_path:str | os.PathLike | pathlib.Path) -> None:
|
||||
r"""
|
||||
@@ -823,7 +764,7 @@ def verify_aggr(proof_path:str | os.PathLike | pathlib.Path,vk_path:str | os.Pat
|
||||
"""
|
||||
...
|
||||
|
||||
def verify_evm(addr_verifier:str,proof_path:str | os.PathLike | pathlib.Path,rpc_url:typing.Optional[str],addr_da:typing.Optional[str],addr_vk:typing.Optional[str]) -> typing.Any:
|
||||
def verify_evm(addr_verifier:str,proof_path:str | os.PathLike | pathlib.Path,rpc_url:typing.Optional[str],vka_path:typing.Optional[str]) -> typing.Any:
|
||||
r"""
|
||||
verifies an evm compatible proof, you will need solc installed in your environment to run this
|
||||
|
||||
@@ -838,11 +779,8 @@ def verify_evm(addr_verifier:str,proof_path:str | os.PathLike | pathlib.Path,rpc
|
||||
rpc_url: str
|
||||
RPC URL for an Ethereum node, if None will use Anvil but WON'T persist state
|
||||
|
||||
addr_da: str
|
||||
does the verifier use data attestation ?
|
||||
|
||||
addr_vk: str
|
||||
The addess of the separate VK contract (if the verifier key is rendered as a separate contract)
|
||||
vka_path: str
|
||||
The path to the VKA calldata bytes file (generated using the create_evm_vka command)
|
||||
Returns
|
||||
-------
|
||||
bool
|
||||
|
||||
@@ -1,60 +0,0 @@
|
||||
# inbrowser-evm-verify
|
||||
|
||||
We would like the Solidity verifier to be canonical and usually all you ever need. For this, we need to be able to run that verifier in browser.
|
||||
|
||||
## How to use (Node js)
|
||||
|
||||
```ts
|
||||
import localEVMVerify from '@ezkljs/verify';
|
||||
|
||||
// Load in the proof file as a buffer
|
||||
const proofFileBuffer = fs.readFileSync(`${path}/${example}/proof.pf`)
|
||||
|
||||
// Stringified EZKL evm verifier bytecode (this is just an example don't use in production)
|
||||
const bytecode = '0x608060405234801561001057600080fd5b5060d38061001f6000396000f3fe608060405234801561001057600080fd5b50600436106100415760003560e01c8063cfae321714610046575b600080fd5b6100496100f1565b60405161005691906100f1565b60405180910390f35b'
|
||||
|
||||
const result = await localEVMVerify(proofFileBuffer, bytecode)
|
||||
|
||||
console.log('result', result)
|
||||
```
|
||||
|
||||
**Note**: Run `ezkl create-evm-verifier` to get the Solidity verifier, with which you can retrieve the bytecode once compiled. We recommend compiling to the Shanghai hardfork target, else you will have to pass an additional parameter specifying the EVM version to the `localEVMVerify` function like so (for Paris hardfork):
|
||||
|
||||
```ts
|
||||
import localEVMVerify, { hardfork } from '@ezkljs/verify';
|
||||
|
||||
const result = await localEVMVerify(proofFileBuffer, bytecode, hardfork['Paris'])
|
||||
```
|
||||
|
||||
**Note**: You can also verify separated vk verifiers using the `localEVMVerify` function. Just pass the vk verifier bytecode as the third parameter like so:
|
||||
```ts
|
||||
import localEVMVerify from '@ezkljs/verify';
|
||||
|
||||
const result = await localEVMVerify(proofFileBuffer, verifierBytecode, VKBytecode)
|
||||
```
|
||||
|
||||
|
||||
## How to use (Browser)
|
||||
|
||||
```ts
|
||||
import localEVMVerify from '@ezkljs/verify';
|
||||
|
||||
// Load in the proof file as a buffer using the web apis (fetch, FileReader, etc)
|
||||
// We use fetch in this example to load the proof file as a buffer
|
||||
const proofFileBuffer = await fetch(`${path}/${example}/proof.pf`).then(res => res.arrayBuffer())
|
||||
|
||||
// Stringified EZKL evm verifier bytecode (this is just an example don't use in production)
|
||||
const bytecode = '0x608060405234801561001057600080fd5b5060d38061001f6000396000f3fe608060405234801561001057600080fd5b50600436106100415760003560e01c8063cfae321714610046575b600080fd5b6100496100f1565b60405161005691906100f1565b60405180910390f35b'
|
||||
|
||||
const result = await browserEVMVerify(proofFileBuffer, bytecode)
|
||||
|
||||
console.log('result', result)
|
||||
```
|
||||
|
||||
Output:
|
||||
|
||||
```ts
|
||||
result: true
|
||||
```
|
||||
|
||||
|
||||
@@ -1,42 +0,0 @@
|
||||
{
|
||||
"name": "@ezkljs/verify",
|
||||
"version": "v10.4.2",
|
||||
"publishConfig": {
|
||||
"access": "public"
|
||||
},
|
||||
"description": "Evm verify EZKL proofs in the browser.",
|
||||
"main": "dist/commonjs/index.js",
|
||||
"module": "dist/esm/index.js",
|
||||
"types": "dist/commonjs/index.d.ts",
|
||||
"files": [
|
||||
"dist",
|
||||
"LICENSE",
|
||||
"README.md"
|
||||
],
|
||||
"scripts": {
|
||||
"clean": "rm -r dist || true",
|
||||
"build:commonjs": "tsc --project tsconfig.commonjs.json && resolve-tspaths -p tsconfig.commonjs.json",
|
||||
"build:esm": "tsc --project tsconfig.esm.json && resolve-tspaths -p tsconfig.esm.json",
|
||||
"build": "npm run clean && npm run build:commonjs && npm run build:esm"
|
||||
},
|
||||
"dependencies": {
|
||||
"@ethereumjs/common": "4.0.0",
|
||||
"@ethereumjs/evm": "2.0.0",
|
||||
"@ethereumjs/statemanager": "2.0.0",
|
||||
"@ethereumjs/tx": "5.0.0",
|
||||
"@ethereumjs/util": "9.0.0",
|
||||
"@ethereumjs/vm": "7.0.0",
|
||||
"@ethersproject/abi": "5.7.0",
|
||||
"@ezkljs/engine": "10.4.2",
|
||||
"ethers": "6.7.1",
|
||||
"json-bigint": "1.0.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@types/node": "^20.8.3",
|
||||
"ts-loader": "^9.5.0",
|
||||
"ts-node": "^10.9.1",
|
||||
"resolve-tspaths": "^0.8.16",
|
||||
"tsconfig-paths": "^4.2.0",
|
||||
"typescript": "^5.2.2"
|
||||
}
|
||||
}
|
||||
1479
in-browser-evm-verifier/pnpm-lock.yaml
generated
1479
in-browser-evm-verifier/pnpm-lock.yaml
generated
File diff suppressed because it is too large
Load Diff
@@ -1,144 +0,0 @@
|
||||
import { defaultAbiCoder as AbiCoder } from '@ethersproject/abi'
|
||||
import { Address, hexToBytes } from '@ethereumjs/util'
|
||||
import { Chain, Common, Hardfork } from '@ethereumjs/common'
|
||||
import { LegacyTransaction, LegacyTxData } from '@ethereumjs/tx'
|
||||
// import { DefaultStateManager } from '@ethereumjs/statemanager'
|
||||
// import { Blockchain } from '@ethereumjs/blockchain'
|
||||
import { VM } from '@ethereumjs/vm'
|
||||
import { EVM } from '@ethereumjs/evm'
|
||||
import { buildTransaction, encodeDeployment } from './utils/tx-builder'
|
||||
import { getAccountNonce, insertAccount } from './utils/account-utils'
|
||||
import { encodeVerifierCalldata } from '../nodejs/ezkl';
|
||||
|
||||
async function deployContract(
|
||||
vm: VM,
|
||||
common: Common,
|
||||
senderPrivateKey: Uint8Array,
|
||||
deploymentBytecode: string
|
||||
): Promise<Address> {
|
||||
// Contracts are deployed by sending their deployment bytecode to the address 0
|
||||
// The contract params should be abi-encoded and appended to the deployment bytecode.
|
||||
// const data =
|
||||
const data = encodeDeployment(deploymentBytecode)
|
||||
const txData = {
|
||||
data,
|
||||
nonce: await getAccountNonce(vm, senderPrivateKey),
|
||||
}
|
||||
|
||||
const tx = LegacyTransaction.fromTxData(
|
||||
buildTransaction(txData) as LegacyTxData,
|
||||
{ common, allowUnlimitedInitCodeSize: true },
|
||||
).sign(senderPrivateKey)
|
||||
|
||||
const deploymentResult = await vm.runTx({
|
||||
tx,
|
||||
skipBlockGasLimitValidation: true,
|
||||
skipNonce: true
|
||||
})
|
||||
|
||||
if (deploymentResult.execResult.exceptionError) {
|
||||
throw deploymentResult.execResult.exceptionError
|
||||
}
|
||||
|
||||
return deploymentResult.createdAddress!
|
||||
}
|
||||
|
||||
async function verify(
|
||||
vm: VM,
|
||||
contractAddress: Address,
|
||||
caller: Address,
|
||||
proof: Uint8Array | Uint8ClampedArray,
|
||||
vkAddress?: Address | Uint8Array,
|
||||
): Promise<boolean> {
|
||||
if (proof instanceof Uint8Array) {
|
||||
proof = new Uint8ClampedArray(proof.buffer)
|
||||
}
|
||||
if (vkAddress) {
|
||||
const vkAddressBytes = hexToBytes(vkAddress.toString())
|
||||
const vkAddressArray = Array.from(vkAddressBytes)
|
||||
|
||||
let string = JSON.stringify(vkAddressArray)
|
||||
|
||||
const uint8Array = new TextEncoder().encode(string);
|
||||
|
||||
// Step 3: Convert to Uint8ClampedArray
|
||||
vkAddress = new Uint8Array(uint8Array.buffer);
|
||||
|
||||
// convert uitn8array of length
|
||||
console.error('vkAddress', vkAddress)
|
||||
}
|
||||
const data = encodeVerifierCalldata(proof, vkAddress)
|
||||
|
||||
const verifyResult = await vm.evm.runCall({
|
||||
to: contractAddress,
|
||||
caller: caller,
|
||||
origin: caller, // The tx.origin is also the caller here
|
||||
data: data,
|
||||
})
|
||||
|
||||
if (verifyResult.execResult.exceptionError) {
|
||||
throw verifyResult.execResult.exceptionError
|
||||
}
|
||||
|
||||
const results = AbiCoder.decode(['bool'], verifyResult.execResult.returnValue)
|
||||
|
||||
return results[0]
|
||||
}
|
||||
|
||||
/**
|
||||
* Spins up an ephemeral EVM instance for executing the bytecode of a solidity verifier
|
||||
* @param proof Json serialized proof file
|
||||
* @param bytecode The bytecode of a compiled solidity verifier.
|
||||
* @param bytecode_vk The bytecode of a contract that stores the vk. (Optional, only required if the vk is stored in a separate contract)
|
||||
* @param evmVersion The evm version to use for the verification. (Default: London)
|
||||
* @returns The result of the evm verification.
|
||||
* @throws If the verify transaction reverts
|
||||
*/
|
||||
export default async function localEVMVerify(
|
||||
proof: Uint8Array | Uint8ClampedArray,
|
||||
bytecode_verifier: string,
|
||||
bytecode_vk?: string,
|
||||
evmVersion?: Hardfork,
|
||||
): Promise<boolean> {
|
||||
try {
|
||||
const hardfork = evmVersion ? evmVersion : Hardfork['Shanghai']
|
||||
const common = new Common({ chain: Chain.Mainnet, hardfork })
|
||||
const accountPk = hexToBytes(
|
||||
'0xe331b6d69882b4cb4ea581d88e0b604039a3de5967688d3dcffdd2270c0fd109', // anvil deterministic Pk
|
||||
)
|
||||
|
||||
const evm = new EVM({
|
||||
allowUnlimitedContractSize: true,
|
||||
allowUnlimitedInitCodeSize: true,
|
||||
})
|
||||
|
||||
const vm = await VM.create({ common, evm })
|
||||
const accountAddress = Address.fromPrivateKey(accountPk)
|
||||
|
||||
await insertAccount(vm, accountAddress)
|
||||
|
||||
const verifierAddress = await deployContract(
|
||||
vm,
|
||||
common,
|
||||
accountPk,
|
||||
bytecode_verifier
|
||||
)
|
||||
|
||||
if (bytecode_vk) {
|
||||
const accountPk = hexToBytes("0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80"); // anvil deterministic Pk
|
||||
const accountAddress = Address.fromPrivateKey(accountPk)
|
||||
await insertAccount(vm, accountAddress)
|
||||
const output = await deployContract(vm, common, accountPk, bytecode_vk)
|
||||
const result = await verify(vm, verifierAddress, accountAddress, proof, output)
|
||||
return true
|
||||
}
|
||||
|
||||
const result = await verify(vm, verifierAddress, accountAddress, proof)
|
||||
|
||||
return result
|
||||
} catch (error) {
|
||||
// log or re-throw the error, depending on your needs
|
||||
console.error('An error occurred:', error)
|
||||
throw error
|
||||
}
|
||||
}
|
||||
@@ -1,32 +0,0 @@
|
||||
import { VM } from '@ethereumjs/vm'
|
||||
import { Account, Address } from '@ethereumjs/util'
|
||||
|
||||
export const keyPair = {
|
||||
secretKey:
|
||||
'0x3cd7232cd6f3fc66a57a6bedc1a8ed6c228fff0a327e169c2bcc5e869ed49511',
|
||||
publicKey:
|
||||
'0x0406cc661590d48ee972944b35ad13ff03c7876eae3fd191e8a2f77311b0a3c6613407b5005e63d7d8d76b89d5f900cde691497688bb281e07a5052ff61edebdc0',
|
||||
}
|
||||
|
||||
export const insertAccount = async (vm: VM, address: Address) => {
|
||||
const acctData = {
|
||||
nonce: 0,
|
||||
balance: BigInt('1000000000000000000'), // 1 eth
|
||||
}
|
||||
const account = Account.fromAccountData(acctData)
|
||||
|
||||
await vm.stateManager.putAccount(address, account)
|
||||
}
|
||||
|
||||
export const getAccountNonce = async (
|
||||
vm: VM,
|
||||
accountPrivateKey: Uint8Array,
|
||||
) => {
|
||||
const address = Address.fromPrivateKey(accountPrivateKey)
|
||||
const account = await vm.stateManager.getAccount(address)
|
||||
if (account) {
|
||||
return account.nonce
|
||||
} else {
|
||||
return BigInt(0)
|
||||
}
|
||||
}
|
||||
@@ -1,59 +0,0 @@
|
||||
import { Interface, defaultAbiCoder as AbiCoder } from '@ethersproject/abi'
|
||||
import {
|
||||
AccessListEIP2930TxData,
|
||||
FeeMarketEIP1559TxData,
|
||||
TxData,
|
||||
} from '@ethereumjs/tx'
|
||||
|
||||
type TransactionsData =
|
||||
| TxData
|
||||
| AccessListEIP2930TxData
|
||||
| FeeMarketEIP1559TxData
|
||||
|
||||
export const encodeFunction = (
|
||||
method: string,
|
||||
params?: {
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
types: any[]
|
||||
values: unknown[]
|
||||
},
|
||||
): string => {
|
||||
const parameters = params?.types ?? []
|
||||
const methodWithParameters = `function ${method}(${parameters.join(',')})`
|
||||
const signatureHash = new Interface([methodWithParameters]).getSighash(method)
|
||||
const encodedArgs = AbiCoder.encode(parameters, params?.values ?? [])
|
||||
|
||||
return signatureHash + encodedArgs.slice(2)
|
||||
}
|
||||
|
||||
export const encodeDeployment = (
|
||||
bytecode: string,
|
||||
params?: {
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
types: any[]
|
||||
values: unknown[]
|
||||
},
|
||||
) => {
|
||||
const deploymentData = '0x' + bytecode
|
||||
if (params) {
|
||||
const argumentsEncoded = AbiCoder.encode(params.types, params.values)
|
||||
return deploymentData + argumentsEncoded.slice(2)
|
||||
}
|
||||
return deploymentData
|
||||
}
|
||||
|
||||
export const buildTransaction = (
|
||||
data: Partial<TransactionsData>,
|
||||
): TransactionsData => {
|
||||
const defaultData: Partial<TransactionsData> = {
|
||||
gasLimit: 3_000_000_000_000_000,
|
||||
gasPrice: 7,
|
||||
value: 0,
|
||||
data: '0x',
|
||||
}
|
||||
|
||||
return {
|
||||
...defaultData,
|
||||
...data,
|
||||
}
|
||||
}
|
||||
@@ -1,7 +0,0 @@
|
||||
{
|
||||
"extends": "./tsconfig.json",
|
||||
"compilerOptions": {
|
||||
"module": "CommonJS",
|
||||
"outDir": "./dist/commonjs"
|
||||
}
|
||||
}
|
||||
@@ -1,7 +0,0 @@
|
||||
{
|
||||
"extends": "./tsconfig.json",
|
||||
"compilerOptions": {
|
||||
"module": "ES2020",
|
||||
"outDir": "./dist/esm"
|
||||
}
|
||||
}
|
||||
@@ -1,62 +0,0 @@
|
||||
{
|
||||
"compilerOptions": {
|
||||
"rootDir": "src",
|
||||
"target": "es2017",
|
||||
"outDir": "dist",
|
||||
"declaration": true,
|
||||
"lib": [
|
||||
"dom",
|
||||
"dom.iterable",
|
||||
"esnext"
|
||||
],
|
||||
"allowJs": true,
|
||||
"checkJs": true,
|
||||
"skipLibCheck": true,
|
||||
"strict": true,
|
||||
"forceConsistentCasingInFileNames": true,
|
||||
"noEmit": false,
|
||||
"esModuleInterop": true,
|
||||
"module": "CommonJS",
|
||||
"moduleResolution": "node",
|
||||
"resolveJsonModule": true,
|
||||
"isolatedModules": true,
|
||||
"jsx": "preserve",
|
||||
// "incremental": true,
|
||||
"noUncheckedIndexedAccess": true,
|
||||
"baseUrl": ".",
|
||||
"paths": {
|
||||
"@/*": [
|
||||
"./src/*"
|
||||
]
|
||||
}
|
||||
},
|
||||
"include": [
|
||||
"src/**/*.ts",
|
||||
"src/**/*.tsx",
|
||||
"src/**/*.cjs",
|
||||
"src/**/*.mjs"
|
||||
],
|
||||
"exclude": [
|
||||
"node_modules"
|
||||
],
|
||||
// NEW: Options for file/directory watching
|
||||
"watchOptions": {
|
||||
// Use native file system events for files and directories
|
||||
"watchFile": "useFsEvents",
|
||||
"watchDirectory": "useFsEvents",
|
||||
// Poll files for updates more frequently
|
||||
// when they're updated a lot.
|
||||
"fallbackPolling": "dynamicPriority",
|
||||
// Don't coalesce watch notification
|
||||
"synchronousWatchDirectory": true,
|
||||
// Finally, two additional settings for reducing the amount of possible
|
||||
// files to track work from these directories
|
||||
"excludeDirectories": [
|
||||
"**/node_modules",
|
||||
"_build"
|
||||
],
|
||||
"excludeFiles": [
|
||||
"build/fileWhichChangesOften.ts"
|
||||
]
|
||||
}
|
||||
}
|
||||
@@ -9,6 +9,6 @@ pytest==8.1.1
|
||||
tomli==2.0.1
|
||||
typing-extensions==4.10.0
|
||||
zipp==3.18.1
|
||||
onnx==1.15.0
|
||||
onnx==1.17.0
|
||||
onnxruntime==1.17.1
|
||||
numpy==1.26.4
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
pub mod python;
|
||||
/// Universal bindings for all platforms
|
||||
#[cfg(any(
|
||||
feature = "ios-bindings",
|
||||
feature = "universal-bindings",
|
||||
all(target_arch = "wasm32", target_os = "unknown")
|
||||
))]
|
||||
pub mod universal;
|
||||
|
||||
@@ -1030,7 +1030,6 @@ fn gen_random_data(
|
||||
))]
|
||||
#[gen_stub_pyfunction]
|
||||
fn calibrate_settings(
|
||||
py: Python,
|
||||
data: String,
|
||||
model: PathBuf,
|
||||
settings: PathBuf,
|
||||
@@ -1039,26 +1038,23 @@ fn calibrate_settings(
|
||||
scales: Option<Vec<crate::Scale>>,
|
||||
scale_rebase_multiplier: Vec<u32>,
|
||||
max_logrows: Option<u32>,
|
||||
) -> PyResult<Bound<'_, PyAny>> {
|
||||
pyo3_async_runtimes::tokio::future_into_py(py, async move {
|
||||
crate::execute::calibrate(
|
||||
model,
|
||||
data,
|
||||
settings,
|
||||
target,
|
||||
lookup_safety_margin,
|
||||
scales,
|
||||
scale_rebase_multiplier,
|
||||
max_logrows,
|
||||
)
|
||||
.await
|
||||
.map_err(|e| {
|
||||
let err_str = format!("Failed to calibrate settings: {}", e);
|
||||
PyRuntimeError::new_err(err_str)
|
||||
})?;
|
||||
) -> PyResult<bool> {
|
||||
crate::execute::calibrate(
|
||||
model,
|
||||
data,
|
||||
settings,
|
||||
target,
|
||||
lookup_safety_margin,
|
||||
scales,
|
||||
scale_rebase_multiplier,
|
||||
max_logrows,
|
||||
)
|
||||
.map_err(|e| {
|
||||
let err_str = format!("Failed to calibrate settings: {}", e);
|
||||
PyRuntimeError::new_err(err_str)
|
||||
})?;
|
||||
|
||||
Ok(true)
|
||||
})
|
||||
Ok(true)
|
||||
}
|
||||
|
||||
/// Runs the forward pass operation to generate a witness
|
||||
@@ -1094,22 +1090,18 @@ fn calibrate_settings(
|
||||
))]
|
||||
#[gen_stub_pyfunction]
|
||||
fn gen_witness(
|
||||
py: Python,
|
||||
data: String,
|
||||
model: PathBuf,
|
||||
output: Option<PathBuf>,
|
||||
vk_path: Option<PathBuf>,
|
||||
srs_path: Option<PathBuf>,
|
||||
) -> PyResult<Bound<'_, PyAny>> {
|
||||
pyo3_async_runtimes::tokio::future_into_py(py, async move {
|
||||
let output = crate::execute::gen_witness(model, data, output, vk_path, srs_path)
|
||||
.await
|
||||
.map_err(|e| {
|
||||
let err_str = format!("Failed to generate witness: {}", e);
|
||||
PyRuntimeError::new_err(err_str)
|
||||
})?;
|
||||
Python::with_gil(|py| Ok(output.to_object(py)))
|
||||
})
|
||||
) -> PyResult<PyObject> {
|
||||
let output =
|
||||
crate::execute::gen_witness(model, data, output, vk_path, srs_path).map_err(|e| {
|
||||
let err_str = format!("Failed to generate witness: {}", e);
|
||||
PyRuntimeError::new_err(err_str)
|
||||
})?;
|
||||
Python::with_gil(|py| Ok(output.to_object(py)))
|
||||
}
|
||||
|
||||
/// Mocks the prover
|
||||
@@ -1607,22 +1599,15 @@ fn verify_aggr(
|
||||
#[pyfunction(signature = (
|
||||
proof=PathBuf::from(DEFAULT_PROOF),
|
||||
calldata=PathBuf::from(DEFAULT_CALLDATA),
|
||||
addr_vk=None,
|
||||
vka_path=None,
|
||||
))]
|
||||
#[gen_stub_pyfunction]
|
||||
fn encode_evm_calldata<'a>(
|
||||
proof: PathBuf,
|
||||
calldata: PathBuf,
|
||||
addr_vk: Option<&'a str>,
|
||||
vka_path: Option<PathBuf>,
|
||||
) -> Result<Vec<u8>, PyErr> {
|
||||
let addr_vk = if let Some(addr_vk) = addr_vk {
|
||||
let addr_vk = H160Flag::from(addr_vk);
|
||||
Some(addr_vk)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
crate::execute::encode_evm_calldata(proof, calldata, addr_vk).map_err(|e| {
|
||||
crate::execute::encode_evm_calldata(proof, calldata, vka_path).map_err(|e| {
|
||||
let err_str = format!("Failed to generate calldata: {}", e);
|
||||
PyRuntimeError::new_err(err_str)
|
||||
})
|
||||
@@ -1691,6 +1676,7 @@ fn create_evm_verifier(
|
||||
})
|
||||
}
|
||||
|
||||
#[cfg(feature = "reusable-verifier")]
|
||||
/// Creates an Evm VK artifact. This command generated a VK with circuit specific meta data encoding in memory for use by the reusable H2 verifier.
|
||||
/// This is useful for deploying verifier that were otherwise too big to fit on chain and required aggregation.
|
||||
///
|
||||
@@ -1702,15 +1688,15 @@ fn create_evm_verifier(
|
||||
/// settings_path: str
|
||||
/// The path to the settings file
|
||||
///
|
||||
/// sol_code_path: str
|
||||
/// The path to the create the solidity verifying key.
|
||||
///
|
||||
/// abi_path: str
|
||||
/// The path to create the ABI for the solidity verifier
|
||||
/// vka_path: str
|
||||
/// The path to the verification artifact calldata bytes file.
|
||||
///
|
||||
/// srs_path: str
|
||||
/// The path to the SRS file
|
||||
///
|
||||
/// decimals: int
|
||||
/// The number of decimals used for the rescaling of fixed point felt instances into on-chain floats.
|
||||
///
|
||||
/// Returns
|
||||
/// -------
|
||||
/// bool
|
||||
@@ -1718,21 +1704,21 @@ fn create_evm_verifier(
|
||||
#[pyfunction(signature = (
|
||||
vk_path=PathBuf::from(DEFAULT_VK),
|
||||
settings_path=PathBuf::from(DEFAULT_SETTINGS),
|
||||
sol_code_path=PathBuf::from(DEFAULT_VK_SOL),
|
||||
abi_path=PathBuf::from(DEFAULT_VERIFIER_ABI),
|
||||
srs_path=None
|
||||
vka_path=PathBuf::from(DEFAULT_VKA),
|
||||
srs_path=None,
|
||||
decimals=DEFAULT_DECIMALS.parse().unwrap(),
|
||||
))]
|
||||
#[gen_stub_pyfunction]
|
||||
fn create_evm_vka(
|
||||
py: Python,
|
||||
vk_path: PathBuf,
|
||||
settings_path: PathBuf,
|
||||
sol_code_path: PathBuf,
|
||||
abi_path: PathBuf,
|
||||
vka_path: PathBuf,
|
||||
srs_path: Option<PathBuf>,
|
||||
decimals: usize,
|
||||
) -> PyResult<Bound<'_, PyAny>> {
|
||||
pyo3_async_runtimes::tokio::future_into_py(py, async move {
|
||||
crate::execute::create_evm_vka(vk_path, srs_path, settings_path, sol_code_path, abi_path)
|
||||
crate::execute::create_evm_vka(vk_path, srs_path, settings_path, vka_path, decimals)
|
||||
.await
|
||||
.map_err(|e| {
|
||||
let err_str = format!("Failed to run create_evm_verifier: {}", e);
|
||||
@@ -1743,124 +1729,7 @@ fn create_evm_vka(
|
||||
})
|
||||
}
|
||||
|
||||
/// Creates an EVM compatible data attestation verifier, you will need solc installed in your environment to run this
|
||||
///
|
||||
/// Arguments
|
||||
/// ---------
|
||||
/// input_data: str
|
||||
/// The path to the .json data file, which should contain the necessary calldata and account addresses needed to read from all the on-chain view functions that return the data that the network ingests as inputs
|
||||
///
|
||||
/// settings_path: str
|
||||
/// The path to the settings file
|
||||
///
|
||||
/// sol_code_path: str
|
||||
/// The path to the create the solidity verifier
|
||||
///
|
||||
/// abi_path: str
|
||||
/// The path to create the ABI for the solidity verifier
|
||||
///
|
||||
/// Returns
|
||||
/// -------
|
||||
/// bool
|
||||
///
|
||||
#[pyfunction(signature = (
|
||||
input_data=String::from(DEFAULT_DATA),
|
||||
settings_path=PathBuf::from(DEFAULT_SETTINGS),
|
||||
sol_code_path=PathBuf::from(DEFAULT_SOL_CODE_DA),
|
||||
abi_path=PathBuf::from(DEFAULT_VERIFIER_DA_ABI),
|
||||
witness_path=None,
|
||||
))]
|
||||
#[gen_stub_pyfunction]
|
||||
fn create_evm_data_attestation(
|
||||
py: Python,
|
||||
input_data: String,
|
||||
settings_path: PathBuf,
|
||||
sol_code_path: PathBuf,
|
||||
abi_path: PathBuf,
|
||||
witness_path: Option<PathBuf>,
|
||||
) -> PyResult<Bound<'_, PyAny>> {
|
||||
pyo3_async_runtimes::tokio::future_into_py(py, async move {
|
||||
crate::execute::create_evm_data_attestation(
|
||||
settings_path,
|
||||
sol_code_path,
|
||||
abi_path,
|
||||
input_data,
|
||||
witness_path,
|
||||
)
|
||||
.await
|
||||
.map_err(|e| {
|
||||
let err_str = format!("Failed to run create_evm_data_attestation: {}", e);
|
||||
PyRuntimeError::new_err(err_str)
|
||||
})?;
|
||||
|
||||
Ok(true)
|
||||
})
|
||||
}
|
||||
|
||||
/// Setup test evm witness
|
||||
///
|
||||
/// Arguments
|
||||
/// ---------
|
||||
/// data_path: str
|
||||
/// The path to the .json data file, which should include both the network input (possibly private) and the network output (public input to the proof)
|
||||
///
|
||||
/// compiled_circuit_path: str
|
||||
/// The path to the compiled model file (generated using the compile-circuit command)
|
||||
///
|
||||
/// test_data: str
|
||||
/// For testing purposes only. The optional path to the .json data file that will be generated that contains the OnChain data storage information derived from the file information in the data .json file. Should include both the network input (possibly private) and the network output (public input to the proof)
|
||||
///
|
||||
/// input_sources: str
|
||||
/// Where the input data comes from
|
||||
///
|
||||
/// output_source: str
|
||||
/// Where the output data comes from
|
||||
///
|
||||
/// rpc_url: str
|
||||
/// RPC URL for an EVM compatible node, if None, uses Anvil as a local RPC node
|
||||
///
|
||||
/// Returns
|
||||
/// -------
|
||||
/// bool
|
||||
///
|
||||
#[pyfunction(signature = (
|
||||
data_path,
|
||||
compiled_circuit_path,
|
||||
test_data,
|
||||
input_source,
|
||||
output_source,
|
||||
rpc_url,
|
||||
))]
|
||||
#[gen_stub_pyfunction]
|
||||
fn setup_test_evm_data(
|
||||
py: Python,
|
||||
data_path: String,
|
||||
compiled_circuit_path: PathBuf,
|
||||
test_data: PathBuf,
|
||||
input_source: PyTestDataSource,
|
||||
output_source: PyTestDataSource,
|
||||
rpc_url: String,
|
||||
) -> PyResult<Bound<'_, PyAny>> {
|
||||
pyo3_async_runtimes::tokio::future_into_py(py, async move {
|
||||
crate::execute::setup_test_evm_data(
|
||||
data_path,
|
||||
compiled_circuit_path,
|
||||
test_data,
|
||||
rpc_url,
|
||||
input_source.into(),
|
||||
output_source.into(),
|
||||
)
|
||||
.await
|
||||
.map_err(|e| {
|
||||
let err_str = format!("Failed to run setup_test_evm_data: {}", e);
|
||||
PyRuntimeError::new_err(err_str)
|
||||
})?;
|
||||
|
||||
Ok(true)
|
||||
})
|
||||
}
|
||||
|
||||
/// deploys the solidity verifier
|
||||
/// Deploys the solidity verifier
|
||||
#[pyfunction(signature = (
|
||||
addr_path,
|
||||
rpc_url,
|
||||
@@ -1898,46 +1767,65 @@ fn deploy_evm(
|
||||
})
|
||||
}
|
||||
|
||||
/// deploys the solidity da verifier
|
||||
#[cfg(feature = "reusable-verifier")]
|
||||
/// Registers a VKA on the EZKL reusable verifier contract
|
||||
///
|
||||
/// Arguments
|
||||
/// ---------
|
||||
/// addr_verifier: str
|
||||
/// The reusable verifier contract's address as a hex string
|
||||
///
|
||||
/// rpc_url: str
|
||||
/// RPC URL for an Ethereum node, if None will use Anvil but WON'T persist state
|
||||
///
|
||||
/// vka_path: str
|
||||
/// The path to the VKA calldata bytes file (generated using the create_evm_vka command)
|
||||
///
|
||||
/// vka_digest_path: str
|
||||
/// The path to the VKA digest file, aka hash of the VKA calldata bytes file
|
||||
///
|
||||
/// private_key: str
|
||||
/// The private key to use for signing the transaction. If None, will use the default private key
|
||||
///
|
||||
/// Returns
|
||||
/// -------
|
||||
/// bool
|
||||
///
|
||||
#[pyfunction(signature = (
|
||||
addr_path,
|
||||
input_data,
|
||||
addr_verifier,
|
||||
rpc_url,
|
||||
settings_path=PathBuf::from(DEFAULT_SETTINGS),
|
||||
sol_code_path=PathBuf::from(DEFAULT_SOL_CODE_DA),
|
||||
optimizer_runs=DEFAULT_OPTIMIZER_RUNS.parse().unwrap(),
|
||||
private_key=None
|
||||
vka_path=PathBuf::from(DEFAULT_VKA),
|
||||
vka_digest_path=PathBuf::from(DEFAULT_VKA_DIGEST),
|
||||
private_key=None,
|
||||
))]
|
||||
#[gen_stub_pyfunction]
|
||||
fn deploy_da_evm(
|
||||
py: Python,
|
||||
addr_path: PathBuf,
|
||||
input_data: String,
|
||||
fn register_vka<'a>(
|
||||
py: Python<'a>,
|
||||
addr_verifier: &'a str,
|
||||
rpc_url: String,
|
||||
settings_path: PathBuf,
|
||||
sol_code_path: PathBuf,
|
||||
optimizer_runs: usize,
|
||||
vka_path: PathBuf,
|
||||
vka_digest_path: PathBuf,
|
||||
private_key: Option<String>,
|
||||
) -> PyResult<Bound<'_, PyAny>> {
|
||||
) -> PyResult<Bound<'a, PyAny>> {
|
||||
let addr_verifier = H160Flag::from(addr_verifier);
|
||||
pyo3_async_runtimes::tokio::future_into_py(py, async move {
|
||||
crate::execute::deploy_da_evm(
|
||||
input_data,
|
||||
settings_path,
|
||||
sol_code_path,
|
||||
crate::execute::register_vka(
|
||||
rpc_url,
|
||||
addr_path,
|
||||
optimizer_runs,
|
||||
addr_verifier,
|
||||
vka_path,
|
||||
vka_digest_path,
|
||||
private_key,
|
||||
)
|
||||
.await
|
||||
.map_err(|e| {
|
||||
let err_str = format!("Failed to run deploy_da_evm: {}", e);
|
||||
let err_str = format!("Failed to run register_vka: {}", e);
|
||||
PyRuntimeError::new_err(err_str)
|
||||
})?;
|
||||
|
||||
Ok(true)
|
||||
})
|
||||
}
|
||||
|
||||
/// verifies an evm compatible proof, you will need solc installed in your environment to run this
|
||||
///
|
||||
/// Arguments
|
||||
@@ -1951,11 +1839,11 @@ fn deploy_da_evm(
|
||||
/// rpc_url: str
|
||||
/// RPC URL for an Ethereum node, if None will use Anvil but WON'T persist state
|
||||
///
|
||||
/// addr_da: str
|
||||
/// does the verifier use data attestation ?
|
||||
/// vka_path: str
|
||||
/// The path to the VKA calldata bytes file (generated using the create_evm_vka command)
|
||||
///
|
||||
/// addr_vk: str
|
||||
/// The address of the separate VK contract (if the verifier key is rendered as a separate contract)
|
||||
/// encoded_calldata: str
|
||||
/// The path to the encoded calldata bytes file (generated using the encode calldata command)
|
||||
/// Returns
|
||||
/// -------
|
||||
/// bool
|
||||
@@ -1964,8 +1852,8 @@ fn deploy_da_evm(
|
||||
addr_verifier,
|
||||
rpc_url,
|
||||
proof_path=PathBuf::from(DEFAULT_PROOF),
|
||||
addr_da = None,
|
||||
addr_vk = None,
|
||||
vka_path = None,
|
||||
encoded_calldata = None,
|
||||
))]
|
||||
#[gen_stub_pyfunction]
|
||||
fn verify_evm<'a>(
|
||||
@@ -1973,30 +1861,24 @@ fn verify_evm<'a>(
|
||||
addr_verifier: &'a str,
|
||||
rpc_url: String,
|
||||
proof_path: PathBuf,
|
||||
addr_da: Option<&'a str>,
|
||||
addr_vk: Option<&'a str>,
|
||||
vka_path: Option<PathBuf>,
|
||||
encoded_calldata: Option<PathBuf>,
|
||||
) -> PyResult<Bound<'a, PyAny>> {
|
||||
let addr_verifier = H160Flag::from(addr_verifier);
|
||||
let addr_da = if let Some(addr_da) = addr_da {
|
||||
let addr_da = H160Flag::from(addr_da);
|
||||
Some(addr_da)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
let addr_vk = if let Some(addr_vk) = addr_vk {
|
||||
let addr_vk = H160Flag::from(addr_vk);
|
||||
Some(addr_vk)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
pyo3_async_runtimes::tokio::future_into_py(py, async move {
|
||||
crate::execute::verify_evm(proof_path, addr_verifier, rpc_url, addr_da, addr_vk)
|
||||
.await
|
||||
.map_err(|e| {
|
||||
let err_str = format!("Failed to run verify_evm: {}", e);
|
||||
PyRuntimeError::new_err(err_str)
|
||||
})?;
|
||||
crate::execute::verify_evm(
|
||||
proof_path,
|
||||
addr_verifier,
|
||||
rpc_url,
|
||||
vka_path,
|
||||
encoded_calldata,
|
||||
)
|
||||
.await
|
||||
.map_err(|e| {
|
||||
let err_str = format!("Failed to run verify_evm: {}", e);
|
||||
PyRuntimeError::new_err(err_str)
|
||||
})?;
|
||||
|
||||
Ok(true)
|
||||
})
|
||||
@@ -2113,14 +1995,14 @@ fn ezkl(m: &Bound<'_, PyModule>) -> PyResult<()> {
|
||||
m.add_function(wrap_pyfunction!(compile_circuit, m)?)?;
|
||||
m.add_function(wrap_pyfunction!(verify_aggr, m)?)?;
|
||||
m.add_function(wrap_pyfunction!(create_evm_verifier, m)?)?;
|
||||
#[cfg(feature = "reusable-verifier")]
|
||||
m.add_function(wrap_pyfunction!(create_evm_vka, m)?)?;
|
||||
m.add_function(wrap_pyfunction!(deploy_evm, m)?)?;
|
||||
m.add_function(wrap_pyfunction!(deploy_da_evm, m)?)?;
|
||||
m.add_function(wrap_pyfunction!(verify_evm, m)?)?;
|
||||
m.add_function(wrap_pyfunction!(setup_test_evm_data, m)?)?;
|
||||
m.add_function(wrap_pyfunction!(create_evm_verifier_aggr, m)?)?;
|
||||
m.add_function(wrap_pyfunction!(create_evm_data_attestation, m)?)?;
|
||||
m.add_function(wrap_pyfunction!(encode_evm_calldata, m)?)?;
|
||||
#[cfg(feature = "reusable-verifier")]
|
||||
m.add_function(wrap_pyfunction!(register_vka, m)?)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
||||
@@ -23,7 +23,7 @@ use crate::{
|
||||
circuit::region::RegionSettings,
|
||||
graph::GraphSettings,
|
||||
pfsys::{
|
||||
create_proof_circuit,
|
||||
create_proof_circuit, encode_calldata,
|
||||
evm::aggregation_kzg::{AggregationCircuit, PoseidonTranscript},
|
||||
verify_proof_circuit, TranscriptType,
|
||||
},
|
||||
@@ -31,8 +31,12 @@ use crate::{
|
||||
CheckMode, Commitments, EZKLError as InnerEZKLError,
|
||||
};
|
||||
|
||||
use crate::circuit::modules::poseidon::{
|
||||
spec::{PoseidonSpec, POSEIDON_RATE, POSEIDON_WIDTH},
|
||||
PoseidonChip,
|
||||
};
|
||||
use crate::circuit::modules::Module;
|
||||
use crate::graph::{GraphCircuit, GraphWitness};
|
||||
use halo2_solidity_verifier::encode_calldata;
|
||||
use halo2curves::{
|
||||
bn256::{Bn256, Fr, G1Affine},
|
||||
ff::{FromUniformBytes, PrimeField},
|
||||
@@ -61,49 +65,74 @@ impl From<InnerEZKLError> for EZKLError {
|
||||
}
|
||||
}
|
||||
|
||||
/// Hash the input message with poseidon
|
||||
#[cfg_attr(feature = "ios-bindings", uniffi::export)]
|
||||
pub fn poseidon_hash(message: Vec<u8>) -> Result<Vec<u8>, EZKLError> {
|
||||
let message: Vec<Fr> = serde_json::from_slice(&message[..]).map_err(InnerEZKLError::from)?;
|
||||
|
||||
let output = PoseidonChip::<PoseidonSpec, POSEIDON_WIDTH, POSEIDON_RATE>::run(message.clone())
|
||||
.map_err(InnerEZKLError::from)?;
|
||||
|
||||
Ok(serde_json::to_vec(&output).map_err(InnerEZKLError::from)?)
|
||||
}
|
||||
|
||||
/// Hash the input message with poseidon without converting to Fr
|
||||
#[cfg_attr(feature = "ios-bindings", uniffi::export)]
|
||||
pub fn poseidon_hash_no_felt(message: Vec<u8>) -> Result<Vec<u8>, EZKLError> {
|
||||
let message: Vec<Fr> = message.iter().map(|x| Fr::from(*x as u64)).collect();
|
||||
|
||||
let output = PoseidonChip::<PoseidonSpec, POSEIDON_WIDTH, POSEIDON_RATE>::run(message.clone())
|
||||
.map_err(InnerEZKLError::from)?;
|
||||
|
||||
Ok(serde_json::to_vec(&output).map_err(InnerEZKLError::from)?)
|
||||
}
|
||||
|
||||
/// Encode verifier calldata from proof and ethereum vk_address
|
||||
#[cfg_attr(feature = "ios-bindings", uniffi::export)]
|
||||
pub(crate) fn encode_verifier_calldata(
|
||||
// TODO - shuold it be pub(crate) or pub or pub(super)?
|
||||
pub fn encode_verifier_calldata(
|
||||
// TODO - shuold it be pub or pub or pub(super)?
|
||||
proof: Vec<u8>,
|
||||
vk_address: Option<Vec<u8>>,
|
||||
vka: Option<Vec<u8>>,
|
||||
) -> Result<Vec<u8>, EZKLError> {
|
||||
let snark: crate::pfsys::Snark<Fr, G1Affine> =
|
||||
serde_json::from_slice(&proof[..]).map_err(InnerEZKLError::from)?;
|
||||
|
||||
let vk_address: Option<[u8; 20]> = if let Some(vk_address) = vk_address {
|
||||
let array: [u8; 20] =
|
||||
serde_json::from_slice(&vk_address[..]).map_err(InnerEZKLError::from)?;
|
||||
let vka_buf: Option<Vec<[u8; 32]>> = if let Some(vka) = vka {
|
||||
let array: Vec<[u8; 32]> =
|
||||
serde_json::from_slice(&vka[..]).map_err(InnerEZKLError::from)?;
|
||||
Some(array)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let vka: Option<&[[u8; 32]]> = vka_buf.as_deref();
|
||||
|
||||
let flattened_instances = snark.instances.into_iter().flatten();
|
||||
|
||||
let encoded = encode_calldata(
|
||||
vk_address,
|
||||
&snark.proof,
|
||||
&flattened_instances.collect::<Vec<_>>(),
|
||||
);
|
||||
let encoded = encode_calldata(vka, &snark.proof, &flattened_instances.collect::<Vec<_>>());
|
||||
|
||||
Ok(encoded)
|
||||
}
|
||||
|
||||
/// Generate witness from compiled circuit and input json
|
||||
#[cfg_attr(feature = "ios-bindings", uniffi::export)]
|
||||
pub(crate) fn gen_witness(compiled_circuit: Vec<u8>, input: Vec<u8>) -> Result<Vec<u8>, EZKLError> {
|
||||
pub fn gen_witness(compiled_circuit: Vec<u8>, input: Vec<u8>) -> Result<Vec<u8>, EZKLError> {
|
||||
println!("[circuit]");
|
||||
let mut circuit: crate::graph::GraphCircuit = bincode::deserialize(&compiled_circuit[..])
|
||||
.map_err(|e| {
|
||||
EZKLError::InternalError(format!("Failed to deserialize compiled model: {}", e))
|
||||
})?;
|
||||
|
||||
println!("[input]");
|
||||
let input: crate::graph::input::GraphData = serde_json::from_slice(&input[..])
|
||||
.map_err(|e| EZKLError::InternalError(format!("Failed to deserialize input: {}", e)))?;
|
||||
|
||||
println!("[load graph input]");
|
||||
let mut input = circuit
|
||||
.load_graph_input(&input)
|
||||
.map_err(|e| EZKLError::InternalError(format!("{}", e)))?;
|
||||
|
||||
println!("[load graph witness]");
|
||||
let witness = circuit
|
||||
.forward::<KZGCommitmentScheme<Bn256>>(
|
||||
&mut input,
|
||||
@@ -116,13 +145,14 @@ pub(crate) fn gen_witness(compiled_circuit: Vec<u8>, input: Vec<u8>) -> Result<V
|
||||
)
|
||||
.map_err(|e| EZKLError::InternalError(format!("{}", e)))?;
|
||||
|
||||
println!("[serialize witness]");
|
||||
serde_json::to_vec(&witness)
|
||||
.map_err(|e| EZKLError::InternalError(format!("Failed to serialize witness: {}", e)))
|
||||
}
|
||||
|
||||
/// Generate verifying key from compiled circuit, and parameters srs
|
||||
#[cfg_attr(feature = "ios-bindings", uniffi::export)]
|
||||
pub(crate) fn gen_vk(
|
||||
pub fn gen_vk(
|
||||
compiled_circuit: Vec<u8>,
|
||||
srs: Vec<u8>,
|
||||
compress_selectors: bool,
|
||||
@@ -152,11 +182,7 @@ pub(crate) fn gen_vk(
|
||||
|
||||
/// Generate proving key from vk, compiled circuit and parameters srs
|
||||
#[cfg_attr(feature = "ios-bindings", uniffi::export)]
|
||||
pub(crate) fn gen_pk(
|
||||
vk: Vec<u8>,
|
||||
compiled_circuit: Vec<u8>,
|
||||
srs: Vec<u8>,
|
||||
) -> Result<Vec<u8>, EZKLError> {
|
||||
pub fn gen_pk(vk: Vec<u8>, compiled_circuit: Vec<u8>, srs: Vec<u8>) -> Result<Vec<u8>, EZKLError> {
|
||||
let mut reader = BufReader::new(&srs[..]);
|
||||
let params: ParamsKZG<Bn256> = get_params(&mut reader)?;
|
||||
|
||||
@@ -183,7 +209,7 @@ pub(crate) fn gen_pk(
|
||||
|
||||
/// Verify proof with vk, proof json, circuit settings json and srs
|
||||
#[cfg_attr(feature = "ios-bindings", uniffi::export)]
|
||||
pub(crate) fn verify(
|
||||
pub fn verify(
|
||||
proof: Vec<u8>,
|
||||
vk: Vec<u8>,
|
||||
settings: Vec<u8>,
|
||||
@@ -265,7 +291,7 @@ pub(crate) fn verify(
|
||||
|
||||
/// Verify aggregate proof with vk, proof, circuit settings and srs
|
||||
#[cfg_attr(feature = "ios-bindings", uniffi::export)]
|
||||
pub(crate) fn verify_aggr(
|
||||
pub fn verify_aggr(
|
||||
proof: Vec<u8>,
|
||||
vk: Vec<u8>,
|
||||
logrows: u64,
|
||||
@@ -347,7 +373,7 @@ pub(crate) fn verify_aggr(
|
||||
|
||||
/// Prove in browser with compiled circuit, witness json, proving key, and srs
|
||||
#[cfg_attr(feature = "ios-bindings", uniffi::export)]
|
||||
pub(crate) fn prove(
|
||||
pub fn prove(
|
||||
witness: Vec<u8>,
|
||||
pk: Vec<u8>,
|
||||
compiled_circuit: Vec<u8>,
|
||||
@@ -445,7 +471,7 @@ pub(crate) fn prove(
|
||||
|
||||
/// Validate the witness json
|
||||
#[cfg_attr(feature = "ios-bindings", uniffi::export)]
|
||||
pub(crate) fn witness_validation(witness: Vec<u8>) -> Result<bool, EZKLError> {
|
||||
pub fn witness_validation(witness: Vec<u8>) -> Result<bool, EZKLError> {
|
||||
let _: GraphWitness = serde_json::from_slice(&witness[..]).map_err(InnerEZKLError::from)?;
|
||||
|
||||
Ok(true)
|
||||
@@ -453,7 +479,7 @@ pub(crate) fn witness_validation(witness: Vec<u8>) -> Result<bool, EZKLError> {
|
||||
|
||||
/// Validate the compiled circuit
|
||||
#[cfg_attr(feature = "ios-bindings", uniffi::export)]
|
||||
pub(crate) fn compiled_circuit_validation(compiled_circuit: Vec<u8>) -> Result<bool, EZKLError> {
|
||||
pub fn compiled_circuit_validation(compiled_circuit: Vec<u8>) -> Result<bool, EZKLError> {
|
||||
let _: GraphCircuit = bincode::deserialize(&compiled_circuit[..]).map_err(|e| {
|
||||
EZKLError::InternalError(format!("Failed to deserialize compiled circuit: {}", e))
|
||||
})?;
|
||||
@@ -463,7 +489,7 @@ pub(crate) fn compiled_circuit_validation(compiled_circuit: Vec<u8>) -> Result<b
|
||||
|
||||
/// Validate the input json
|
||||
#[cfg_attr(feature = "ios-bindings", uniffi::export)]
|
||||
pub(crate) fn input_validation(input: Vec<u8>) -> Result<bool, EZKLError> {
|
||||
pub fn input_validation(input: Vec<u8>) -> Result<bool, EZKLError> {
|
||||
let _: crate::graph::input::GraphData =
|
||||
serde_json::from_slice(&input[..]).map_err(InnerEZKLError::from)?;
|
||||
|
||||
@@ -472,7 +498,7 @@ pub(crate) fn input_validation(input: Vec<u8>) -> Result<bool, EZKLError> {
|
||||
|
||||
/// Validate the proof json
|
||||
#[cfg_attr(feature = "ios-bindings", uniffi::export)]
|
||||
pub(crate) fn proof_validation(proof: Vec<u8>) -> Result<bool, EZKLError> {
|
||||
pub fn proof_validation(proof: Vec<u8>) -> Result<bool, EZKLError> {
|
||||
let _: crate::pfsys::Snark<Fr, G1Affine> =
|
||||
serde_json::from_slice(&proof[..]).map_err(InnerEZKLError::from)?;
|
||||
|
||||
@@ -481,7 +507,7 @@ pub(crate) fn proof_validation(proof: Vec<u8>) -> Result<bool, EZKLError> {
|
||||
|
||||
/// Validate the verifying key given the settings json
|
||||
#[cfg_attr(feature = "ios-bindings", uniffi::export)]
|
||||
pub(crate) fn vk_validation(vk: Vec<u8>, settings: Vec<u8>) -> Result<bool, EZKLError> {
|
||||
pub fn vk_validation(vk: Vec<u8>, settings: Vec<u8>) -> Result<bool, EZKLError> {
|
||||
let circuit_settings: GraphSettings =
|
||||
serde_json::from_slice(&settings[..]).map_err(InnerEZKLError::from)?;
|
||||
|
||||
@@ -498,7 +524,7 @@ pub(crate) fn vk_validation(vk: Vec<u8>, settings: Vec<u8>) -> Result<bool, EZKL
|
||||
|
||||
/// Validate the proving key given the settings json
|
||||
#[cfg_attr(feature = "ios-bindings", uniffi::export)]
|
||||
pub(crate) fn pk_validation(pk: Vec<u8>, settings: Vec<u8>) -> Result<bool, EZKLError> {
|
||||
pub fn pk_validation(pk: Vec<u8>, settings: Vec<u8>) -> Result<bool, EZKLError> {
|
||||
let circuit_settings: GraphSettings =
|
||||
serde_json::from_slice(&settings[..]).map_err(InnerEZKLError::from)?;
|
||||
|
||||
@@ -515,7 +541,7 @@ pub(crate) fn pk_validation(pk: Vec<u8>, settings: Vec<u8>) -> Result<bool, EZKL
|
||||
|
||||
/// Validate the settings json
|
||||
#[cfg_attr(feature = "ios-bindings", uniffi::export)]
|
||||
pub(crate) fn settings_validation(settings: Vec<u8>) -> Result<bool, EZKLError> {
|
||||
pub fn settings_validation(settings: Vec<u8>) -> Result<bool, EZKLError> {
|
||||
let _: GraphSettings = serde_json::from_slice(&settings[..]).map_err(InnerEZKLError::from)?;
|
||||
|
||||
Ok(true)
|
||||
@@ -523,7 +549,7 @@ pub(crate) fn settings_validation(settings: Vec<u8>) -> Result<bool, EZKLError>
|
||||
|
||||
/// Validate the srs
|
||||
#[cfg_attr(feature = "ios-bindings", uniffi::export)]
|
||||
pub(crate) fn srs_validation(srs: Vec<u8>) -> Result<bool, EZKLError> {
|
||||
pub fn srs_validation(srs: Vec<u8>) -> Result<bool, EZKLError> {
|
||||
let mut reader = BufReader::new(&srs[..]);
|
||||
let _: ParamsKZG<Bn256> =
|
||||
halo2_proofs::poly::commitment::Params::<'_, G1Affine>::read(&mut reader).map_err(|e| {
|
||||
|
||||
@@ -1,12 +1,5 @@
|
||||
use crate::{
|
||||
circuit::modules::{
|
||||
polycommit::PolyCommitChip,
|
||||
poseidon::{
|
||||
spec::{PoseidonSpec, POSEIDON_RATE, POSEIDON_WIDTH},
|
||||
PoseidonChip,
|
||||
},
|
||||
Module,
|
||||
},
|
||||
circuit::modules::polycommit::PolyCommitChip,
|
||||
fieldutils::{felt_to_integer_rep, integer_rep_to_felt},
|
||||
graph::{quantize_float, scale_to_multiplier, GraphCircuit, GraphSettings},
|
||||
};
|
||||
@@ -15,6 +8,7 @@ use halo2_proofs::{
|
||||
plonk::*,
|
||||
poly::kzg::commitment::{KZGCommitmentScheme, ParamsKZG},
|
||||
};
|
||||
use halo2_solidity_verifier::Evm;
|
||||
use halo2curves::{
|
||||
bn256::{Bn256, Fr, G1Affine},
|
||||
ff::PrimeField,
|
||||
@@ -225,15 +219,9 @@ pub fn bufferToVecOfFelt(
|
||||
pub fn poseidonHash(
|
||||
message: wasm_bindgen::Clamped<Vec<u8>>,
|
||||
) -> Result<wasm_bindgen::Clamped<Vec<u8>>, JsError> {
|
||||
let message: Vec<Fr> = serde_json::from_slice(&message[..])
|
||||
.map_err(|e| JsError::new(&format!("Failed to deserialize message: {}", e)))?;
|
||||
|
||||
let output = PoseidonChip::<PoseidonSpec, POSEIDON_WIDTH, POSEIDON_RATE>::run(message.clone())
|
||||
.map_err(|e| JsError::new(&format!("{}", e)))?;
|
||||
|
||||
Ok(wasm_bindgen::Clamped(serde_json::to_vec(&output).map_err(
|
||||
|e| JsError::new(&format!("Failed to serialize poseidon hash output: {}", e)),
|
||||
)?))
|
||||
super::universal::poseidon_hash(message.0)
|
||||
.map_err(JsError::from)
|
||||
.map(|x| wasm_bindgen::Clamped(x.clone()))
|
||||
}
|
||||
|
||||
/// Generate a witness file from input.json, compiled model and a settings.json file.
|
||||
@@ -279,6 +267,33 @@ pub fn verify(
|
||||
super::universal::verify(proof_js.0, vk.0, settings.0, srs.0).map_err(JsError::from)
|
||||
}
|
||||
|
||||
/// Verify proof in browser evm using wasm
|
||||
#[wasm_bindgen]
|
||||
#[allow(non_snake_case)]
|
||||
pub fn verifyEVM(
|
||||
proof_js: wasm_bindgen::Clamped<Vec<u8>>,
|
||||
bytecode_verifier: Vec<u8>,
|
||||
bytecode_vka: Option<Vec<u8>>,
|
||||
) -> Result<bool, JsError> {
|
||||
let mut evm = Evm::unlimited();
|
||||
let decoded_verifier = utf8_bytes_to_hex_decoded(&bytecode_verifier)?;
|
||||
let (verifier_address, _) = evm.create(decoded_verifier);
|
||||
// if bytecode_vk is Some, then create the vk contract
|
||||
let vk_address = if let Some(bytecode_vka) = bytecode_vka {
|
||||
let decoded_vka = utf8_bytes_to_hex_decoded(&bytecode_vka)?;
|
||||
let (address, _) = evm.create(decoded_vka);
|
||||
Some(address.as_slice().to_vec())
|
||||
// check if bytecode_verifier is none and if so then generate the
|
||||
// reusable verifier
|
||||
} else {
|
||||
None
|
||||
};
|
||||
let calldata = encode_verifier_calldata(proof_js.0, vk_address).map_err(JsError::from);
|
||||
let output = evm.call(verifier_address, calldata?).1;
|
||||
let true_word = [vec![0; 31], vec![1]].concat();
|
||||
Ok(output == true_word)
|
||||
}
|
||||
|
||||
/// Verify aggregate proof in browser using wasm
|
||||
#[wasm_bindgen]
|
||||
#[allow(non_snake_case)]
|
||||
@@ -371,3 +386,13 @@ pub fn u8_array_to_u128_le(arr: [u8; 16]) -> u128 {
|
||||
}
|
||||
n
|
||||
}
|
||||
///
|
||||
pub fn utf8_bytes_to_hex_decoded(input: &[u8]) -> Result<Vec<u8>, JsError> {
|
||||
let string = std::str::from_utf8(input)?.trim();
|
||||
let hex_string = if string.starts_with("0x") {
|
||||
&string[2..]
|
||||
} else {
|
||||
string
|
||||
};
|
||||
hex::decode(hex_string).map_err(JsError::from)
|
||||
}
|
||||
|
||||
173
src/commands.rs
173
src/commands.rs
@@ -1,3 +1,4 @@
|
||||
#[cfg(all(feature = "eth", not(target_arch = "wasm32")))]
|
||||
use alloy::primitives::Address as H160;
|
||||
use clap::{Command, Parser, Subcommand};
|
||||
use clap_complete::{generate, Generator, Shell};
|
||||
@@ -11,7 +12,6 @@ use tosubcommand::{ToFlags, ToSubcommand};
|
||||
use crate::{pfsys::ProofType, Commitments, RunArgs};
|
||||
|
||||
use crate::circuit::CheckMode;
|
||||
use crate::graph::TestDataSource;
|
||||
use crate::pfsys::TranscriptType;
|
||||
|
||||
/// The default path to the .json data file
|
||||
@@ -42,20 +42,14 @@ pub const DEFAULT_SPLIT: &str = "false";
|
||||
pub const DEFAULT_VERIFIER_ABI: &str = "verifier_abi.json";
|
||||
/// Default verifier abi for aggregated proofs
|
||||
pub const DEFAULT_VERIFIER_AGGREGATED_ABI: &str = "verifier_aggr_abi.json";
|
||||
/// Default verifier abi for data attestation
|
||||
pub const DEFAULT_VERIFIER_DA_ABI: &str = "verifier_da_abi.json";
|
||||
/// Default solidity code
|
||||
pub const DEFAULT_SOL_CODE: &str = "evm_deploy.sol";
|
||||
/// Default calldata path
|
||||
pub const DEFAULT_CALLDATA: &str = "calldata.bytes";
|
||||
/// Default solidity code for aggregated proofs
|
||||
pub const DEFAULT_SOL_CODE_AGGREGATED: &str = "evm_deploy_aggr.sol";
|
||||
/// Default solidity code for data attestation
|
||||
pub const DEFAULT_SOL_CODE_DA: &str = "evm_deploy_da.sol";
|
||||
/// Default contract address
|
||||
pub const DEFAULT_CONTRACT_ADDRESS: &str = "contract.address";
|
||||
/// Default contract address for data attestation
|
||||
pub const DEFAULT_CONTRACT_ADDRESS_DA: &str = "contract_da.address";
|
||||
/// Default contract address for vk
|
||||
pub const DEFAULT_CONTRACT_ADDRESS_VK: &str = "contract_vk.address";
|
||||
/// Default check mode
|
||||
@@ -78,8 +72,8 @@ pub const DEFAULT_DISABLE_SELECTOR_COMPRESSION: &str = "false";
|
||||
pub const DEFAULT_RENDER_REUSABLE: &str = "false";
|
||||
/// Default contract deployment type
|
||||
pub const DEFAULT_CONTRACT_DEPLOYMENT_TYPE: &str = "verifier";
|
||||
/// Default VK sol path
|
||||
pub const DEFAULT_VK_SOL: &str = "vk.sol";
|
||||
/// Default VKA calldata path
|
||||
pub const DEFAULT_VKA: &str = "vka.bytes";
|
||||
/// Default VK abi path
|
||||
pub const DEFAULT_VK_ABI: &str = "vk.abi";
|
||||
/// Default scale rebase multipliers for calibration
|
||||
@@ -92,6 +86,10 @@ pub const DEFAULT_ONLY_RANGE_CHECK_REBASE: &str = "false";
|
||||
pub const DEFAULT_COMMITMENT: &str = "kzg";
|
||||
/// Default seed used to generate random data
|
||||
pub const DEFAULT_SEED: &str = "21242";
|
||||
/// Default number of decimals for instances rescaling on-chain.
|
||||
pub const DEFAULT_DECIMALS: &str = "18";
|
||||
/// Default path for the vka digest file
|
||||
pub const DEFAULT_VKA_DIGEST: &str = "vka.digest";
|
||||
|
||||
#[cfg(feature = "python-bindings")]
|
||||
/// Converts TranscriptType into a PyObject (Required for TranscriptType to be compatible with Python)
|
||||
@@ -187,8 +185,6 @@ pub enum ContractType {
|
||||
/// Can also be used as an alternative to aggregation for verifiers that are otherwise too large to fit on-chain.
|
||||
reusable: bool,
|
||||
},
|
||||
/// Deploys a verifying key artifact that the reusable verifier loads into memory during runtime. Encodes the circuit specific data that was otherwise hardcoded onto the stack.
|
||||
VerifyingKeyArtifact,
|
||||
}
|
||||
|
||||
impl Default for ContractType {
|
||||
@@ -207,7 +203,6 @@ impl std::fmt::Display for ContractType {
|
||||
"verifier/reusable".to_string()
|
||||
}
|
||||
ContractType::Verifier { reusable: false } => "verifier".to_string(),
|
||||
ContractType::VerifyingKeyArtifact => "vka".to_string(),
|
||||
}
|
||||
)
|
||||
}
|
||||
@@ -224,7 +219,6 @@ impl From<&str> for ContractType {
|
||||
match s {
|
||||
"verifier" => ContractType::Verifier { reusable: false },
|
||||
"verifier/reusable" => ContractType::Verifier { reusable: true },
|
||||
"vka" => ContractType::VerifyingKeyArtifact,
|
||||
_ => {
|
||||
log::error!("Invalid value for ContractType");
|
||||
log::warn!("Defaulting to verifier");
|
||||
@@ -234,24 +228,25 @@ impl From<&str> for ContractType {
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(all(feature = "eth", not(target_arch = "wasm32")))]
|
||||
#[derive(Debug, Copy, Clone, Serialize, Deserialize, PartialEq, PartialOrd)]
|
||||
/// wrapper for H160 to make it easy to parse into flag vals
|
||||
pub struct H160Flag {
|
||||
inner: H160,
|
||||
}
|
||||
|
||||
#[cfg(all(feature = "eth", not(target_arch = "wasm32")))]
|
||||
impl From<H160Flag> for H160 {
|
||||
fn from(val: H160Flag) -> H160 {
|
||||
val.inner
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(all(feature = "eth", not(target_arch = "wasm32")))]
|
||||
impl ToFlags for H160Flag {
|
||||
fn to_flags(&self) -> Vec<String> {
|
||||
vec![format!("{:#x}", self.inner)]
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(all(feature = "eth", not(target_arch = "wasm32")))]
|
||||
impl From<&str> for H160Flag {
|
||||
fn from(s: &str) -> Self {
|
||||
Self {
|
||||
@@ -299,7 +294,6 @@ impl IntoPy<PyObject> for ContractType {
|
||||
match self {
|
||||
ContractType::Verifier { reusable: true } => "verifier/reusable".to_object(py),
|
||||
ContractType::Verifier { reusable: false } => "verifier".to_object(py),
|
||||
ContractType::VerifyingKeyArtifact => "vka".to_object(py),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -312,7 +306,6 @@ impl<'source> FromPyObject<'source> for ContractType {
|
||||
match strval.to_lowercase().as_str() {
|
||||
"verifier" => Ok(ContractType::Verifier { reusable: false }),
|
||||
"verifier/reusable" => Ok(ContractType::Verifier { reusable: true }),
|
||||
"vka" => Ok(ContractType::VerifyingKeyArtifact),
|
||||
_ => Err(PyValueError::new_err("Invalid value for ContractType")),
|
||||
}
|
||||
}
|
||||
@@ -669,30 +662,6 @@ pub enum Commands {
|
||||
#[arg(long, default_value = DEFAULT_DISABLE_SELECTOR_COMPRESSION, action = clap::ArgAction::SetTrue)]
|
||||
disable_selector_compression: Option<bool>,
|
||||
},
|
||||
/// Deploys a test contact that the data attester reads from and creates a data attestation formatted input.json file that contains call data information
|
||||
#[command(arg_required_else_help = true)]
|
||||
SetupTestEvmData {
|
||||
/// The path to the .json data file, which should include both the network input (possibly private) and the network output (public input to the proof)
|
||||
#[arg(short = 'D', long, value_hint = clap::ValueHint::FilePath)]
|
||||
data: Option<String>,
|
||||
/// The path to the compiled model file (generated using the compile-circuit command)
|
||||
#[arg(short = 'M', long, value_hint = clap::ValueHint::FilePath)]
|
||||
compiled_circuit: Option<PathBuf>,
|
||||
/// For testing purposes only. The optional path to the .json data file that will be generated that contains the OnChain data storage information
|
||||
/// derived from the file information in the data .json file.
|
||||
/// Should include both the network input (possibly private) and the network output (public input to the proof)
|
||||
#[arg(short = 'T', long, value_hint = clap::ValueHint::FilePath)]
|
||||
test_data: PathBuf,
|
||||
/// RPC URL for an Ethereum node
|
||||
#[arg(short = 'U', long, value_hint = clap::ValueHint::Url)]
|
||||
rpc_url: String,
|
||||
/// where the input data come from
|
||||
#[arg(long, default_value = "on-chain", value_hint = clap::ValueHint::Other)]
|
||||
input_source: TestDataSource,
|
||||
/// where the output data come from
|
||||
#[arg(long, default_value = "on-chain", value_hint = clap::ValueHint::Other)]
|
||||
output_source: TestDataSource,
|
||||
},
|
||||
/// Swaps the positions in the transcript that correspond to commitments
|
||||
SwapProofCommitments {
|
||||
/// The path to the proof file
|
||||
@@ -735,6 +704,7 @@ pub enum Commands {
|
||||
},
|
||||
/// Encodes a proof into evm calldata
|
||||
#[command(name = "encode-evm-calldata")]
|
||||
#[cfg(all(feature = "eth", not(target_arch = "wasm32")))]
|
||||
EncodeEvmCalldata {
|
||||
/// The path to the proof file (generated using the prove command)
|
||||
#[arg(long, default_value = DEFAULT_PROOF, value_hint = clap::ValueHint::FilePath)]
|
||||
@@ -742,12 +712,13 @@ pub enum Commands {
|
||||
/// The path to save the calldata to
|
||||
#[arg(long, default_value = DEFAULT_CALLDATA, value_hint = clap::ValueHint::FilePath)]
|
||||
calldata_path: Option<PathBuf>,
|
||||
/// The path to the verification key address (only used if the vk is rendered as a separate contract)
|
||||
#[arg(long, value_hint = clap::ValueHint::Other)]
|
||||
addr_vk: Option<H160Flag>,
|
||||
/// The path to the serialized VKA file
|
||||
#[cfg_attr(all(feature = "reusable-verifier", not(target_arch = "wasm32")), arg(long, value_hint = clap::ValueHint::Other))]
|
||||
vka_path: Option<PathBuf>,
|
||||
},
|
||||
/// Creates an Evm verifier for a single proof
|
||||
#[command(name = "create-evm-verifier")]
|
||||
#[cfg(all(feature = "eth", not(target_arch = "wasm32")))]
|
||||
CreateEvmVerifier {
|
||||
/// The path to SRS, if None will use ~/.ezkl/srs/kzg{logrows}.srs
|
||||
#[arg(long, value_hint = clap::ValueHint::FilePath)]
|
||||
@@ -764,12 +735,17 @@ pub enum Commands {
|
||||
/// The path to output the Solidity verifier ABI
|
||||
#[arg(long, default_value = DEFAULT_VERIFIER_ABI, value_hint = clap::ValueHint::FilePath)]
|
||||
abi_path: Option<PathBuf>,
|
||||
/// Whether the to render the verifier as reusable or not. If true, you will need to deploy a VK artifact, passing it as part of the calldata to the verifier.
|
||||
#[arg(long, default_value = DEFAULT_RENDER_REUSABLE, action = clap::ArgAction::SetTrue)]
|
||||
/// Whether to render the verifier as reusable or not. If true, you will need to deploy a VK artifact, passing it as part of the calldata to the verifier.
|
||||
#[cfg_attr(all(feature = "reusable-verifier", not(target_arch = "wasm32")), arg(short = 'R', long, default_value = DEFAULT_RENDER_REUSABLE, action = clap::ArgAction::SetTrue))]
|
||||
reusable: Option<bool>,
|
||||
},
|
||||
/// Creates an Evm verifier artifact for a single proof to be used by the reusable verifier
|
||||
/// Creates an evm verifier artifact to be used by the reusable verifier
|
||||
#[command(name = "create-evm-vka")]
|
||||
#[cfg(all(
|
||||
feature = "eth",
|
||||
feature = "reusable-verifier",
|
||||
not(target_arch = "wasm32")
|
||||
))]
|
||||
CreateEvmVka {
|
||||
/// The path to SRS, if None will use ~/.ezkl/srs/kzg{logrows}.srs
|
||||
#[arg(long, value_hint = clap::ValueHint::FilePath)]
|
||||
@@ -780,39 +756,18 @@ pub enum Commands {
|
||||
/// The path to load the desired verification key file
|
||||
#[arg(long, default_value = DEFAULT_VK, value_hint = clap::ValueHint::FilePath)]
|
||||
vk_path: Option<PathBuf>,
|
||||
/// The path to output the Solidity code
|
||||
#[arg(long, default_value = DEFAULT_VK_SOL, value_hint = clap::ValueHint::FilePath)]
|
||||
sol_code_path: Option<PathBuf>,
|
||||
/// The path to output the Solidity verifier ABI
|
||||
#[arg(long, default_value = DEFAULT_VK_ABI, value_hint = clap::ValueHint::FilePath)]
|
||||
abi_path: Option<PathBuf>,
|
||||
},
|
||||
/// Creates an Evm verifier that attests to on-chain inputs for a single proof
|
||||
#[command(name = "create-evm-da")]
|
||||
CreateEvmDa {
|
||||
/// The path to load circuit settings .json file from (generated using the gen-settings command)
|
||||
#[arg(short = 'S', long, default_value = DEFAULT_SETTINGS, value_hint = clap::ValueHint::FilePath)]
|
||||
settings_path: Option<PathBuf>,
|
||||
/// The path to output the Solidity code
|
||||
#[arg(long, default_value = DEFAULT_SOL_CODE_DA, value_hint = clap::ValueHint::FilePath)]
|
||||
sol_code_path: Option<PathBuf>,
|
||||
/// The path to output the Solidity verifier ABI
|
||||
#[arg(long, default_value = DEFAULT_VERIFIER_DA_ABI, value_hint = clap::ValueHint::FilePath)]
|
||||
abi_path: Option<PathBuf>,
|
||||
/// The path to the .json data file, which should
|
||||
/// contain the necessary calldata and account addresses
|
||||
/// needed to read from all the on-chain
|
||||
/// view functions that return the data that the network
|
||||
/// ingests as inputs.
|
||||
#[arg(short = 'D', long, default_value = DEFAULT_DATA, value_hint = clap::ValueHint::FilePath)]
|
||||
data: Option<String>,
|
||||
/// The path to the witness file. This is needed for proof swapping for kzg commitments.
|
||||
#[arg(short = 'W', long, default_value = DEFAULT_WITNESS, value_hint = clap::ValueHint::FilePath)]
|
||||
witness: Option<PathBuf>,
|
||||
/// The path to output the vka calldata
|
||||
#[arg(long, default_value = DEFAULT_VKA, value_hint = clap::ValueHint::FilePath)]
|
||||
vka_path: Option<PathBuf>,
|
||||
/// The number of decimals we want to use for the rescaling of the instances into on-chain floats
|
||||
/// Default is 18, which is the number of decimals used by most ERC20 tokens
|
||||
#[arg(long, default_value = DEFAULT_DECIMALS, value_hint = clap::ValueHint::Other)]
|
||||
decimals: Option<usize>,
|
||||
},
|
||||
|
||||
/// Creates an Evm verifier for an aggregate proof
|
||||
#[command(name = "create-evm-verifier-aggr")]
|
||||
#[cfg(all(feature = "eth", not(target_arch = "wasm32")))]
|
||||
CreateEvmVerifierAggr {
|
||||
/// The path to SRS, if None will use ~/.ezkl/srs/kzg{logrows}.srs
|
||||
#[arg(long, value_hint = clap::ValueHint::FilePath)]
|
||||
@@ -832,8 +787,8 @@ pub enum Commands {
|
||||
// logrows used for aggregation circuit
|
||||
#[arg(long, default_value = DEFAULT_AGGREGATED_LOGROWS, value_hint = clap::ValueHint::Other)]
|
||||
logrows: Option<u32>,
|
||||
/// Whether the to render the verifier as reusable or not. If true, you will need to deploy a VK artifact, passing it as part of the calldata to the verifier.
|
||||
#[arg(long, default_value = DEFAULT_RENDER_REUSABLE, action = clap::ArgAction::SetTrue)]
|
||||
/// Whether to render the verifier as reusable or not. If true, you will need to deploy a VK artifact, passing it as part of the calldata to the verifier.
|
||||
#[cfg_attr(all(feature = "reusable-verifier", not(target_arch = "wasm32")), arg(short = 'R', long, action = clap::ArgAction::SetTrue))]
|
||||
reusable: Option<bool>,
|
||||
},
|
||||
/// Verifies a proof, returning accept or reject
|
||||
@@ -876,6 +831,7 @@ pub enum Commands {
|
||||
commitment: Option<Commitments>,
|
||||
},
|
||||
/// Deploys an evm contract (verifier, reusable verifier, or vk artifact) that is generated by ezkl
|
||||
#[cfg(all(feature = "eth", not(target_arch = "wasm32")))]
|
||||
DeployEvm {
|
||||
/// The path to the Solidity code (generated using the create-evm-verifier command)
|
||||
#[arg(long, default_value = DEFAULT_SOL_CODE, value_hint = clap::ValueHint::FilePath)]
|
||||
@@ -893,36 +849,13 @@ pub enum Commands {
|
||||
#[arg(short = 'P', long, value_hint = clap::ValueHint::Other)]
|
||||
private_key: Option<String>,
|
||||
/// Contract type to be deployed
|
||||
#[cfg(all(feature = "reusable-verifier", not(target_arch = "wasm32")))]
|
||||
#[arg(long = "contract-type", short = 'C', default_value = DEFAULT_CONTRACT_DEPLOYMENT_TYPE, value_hint = clap::ValueHint::Other)]
|
||||
contract: ContractType,
|
||||
},
|
||||
/// Deploys an evm verifier that allows for data attestation
|
||||
#[command(name = "deploy-evm-da")]
|
||||
DeployEvmDa {
|
||||
/// The path to the .json data file, which should include both the network input (possibly private) and the network output (public input to the proof)
|
||||
#[arg(short = 'D', long, default_value = DEFAULT_DATA, value_hint = clap::ValueHint::FilePath)]
|
||||
data: Option<String>,
|
||||
/// The path to load circuit settings .json file from (generated using the gen-settings command)
|
||||
#[arg(long, default_value = DEFAULT_SETTINGS, value_hint = clap::ValueHint::FilePath)]
|
||||
settings_path: Option<PathBuf>,
|
||||
/// The path to the Solidity code
|
||||
#[arg(long, default_value = DEFAULT_SOL_CODE_DA, value_hint = clap::ValueHint::FilePath)]
|
||||
sol_code_path: Option<PathBuf>,
|
||||
/// RPC URL for an Ethereum node
|
||||
#[arg(short = 'U', long, value_hint = clap::ValueHint::Url)]
|
||||
rpc_url: String,
|
||||
#[arg(long, default_value = DEFAULT_CONTRACT_ADDRESS_DA, value_hint = clap::ValueHint::FilePath)]
|
||||
/// The path to output the contract address
|
||||
addr_path: Option<PathBuf>,
|
||||
/// The optimizer runs to set on the verifier. (Lower values optimize for deployment, while higher values optimize for execution)
|
||||
#[arg(long, default_value = DEFAULT_OPTIMIZER_RUNS, value_hint = clap::ValueHint::Other)]
|
||||
optimizer_runs: usize,
|
||||
/// Private secp256K1 key in hex format, 64 chars, no 0x prefix, of the account signing transactions. If None the private key will be generated by Anvil
|
||||
#[arg(short = 'P', long, value_hint = clap::ValueHint::Other)]
|
||||
private_key: Option<String>,
|
||||
},
|
||||
/// Verifies a proof using a local Evm executor, returning accept or reject
|
||||
#[command(name = "verify-evm")]
|
||||
#[cfg(all(feature = "eth", not(target_arch = "wasm32")))]
|
||||
VerifyEvm {
|
||||
/// The path to the proof file (generated using the prove command)
|
||||
#[arg(long, default_value = DEFAULT_PROOF, value_hint = clap::ValueHint::FilePath)]
|
||||
@@ -933,12 +866,32 @@ pub enum Commands {
|
||||
/// RPC URL for an Ethereum node
|
||||
#[arg(short = 'U', long, value_hint = clap::ValueHint::Url)]
|
||||
rpc_url: String,
|
||||
/// does the verifier use data attestation ?
|
||||
#[arg(long, value_hint = clap::ValueHint::Other)]
|
||||
addr_da: Option<H160Flag>,
|
||||
// is the vk rendered seperately, if so specify an address
|
||||
#[arg(long, value_hint = clap::ValueHint::Other)]
|
||||
addr_vk: Option<H160Flag>,
|
||||
/// The path to the serialized vka file
|
||||
#[cfg_attr(all(feature = "reusable-verifier", not(target_arch = "wasm32")), arg(long, value_hint = clap::ValueHint::FilePath))]
|
||||
vka_path: Option<PathBuf>,
|
||||
/// The path to the serialized encoded calldata file generated via the encode_calldata command
|
||||
#[arg(long, value_hint = clap::ValueHint::FilePath)]
|
||||
encoded_calldata: Option<PathBuf>,
|
||||
},
|
||||
/// Registers a VKA, returning the its digest used to identify it on-chain.
|
||||
#[command(name = "register-vka")]
|
||||
#[cfg(feature = "reusable-verifier")]
|
||||
RegisterVka {
|
||||
/// RPC URL for an Ethereum node, if None will use Anvil but WON'T persist state
|
||||
#[arg(short = 'U', long, value_hint = clap::ValueHint::Url)]
|
||||
rpc_url: String,
|
||||
/// The path to the reusable verifier contract's address
|
||||
#[arg(long, default_value = DEFAULT_CONTRACT_ADDRESS, value_hint = clap::ValueHint::Other)]
|
||||
addr_verifier: H160Flag,
|
||||
/// The path to the serialized VKA file
|
||||
#[arg(long, default_value = DEFAULT_VKA, value_hint = clap::ValueHint::FilePath)]
|
||||
vka_path: Option<PathBuf>,
|
||||
/// The path to output the VKA digest to
|
||||
#[arg(long, default_value = DEFAULT_VKA_DIGEST, value_hint = clap::ValueHint::FilePath)]
|
||||
vka_digest_path: Option<PathBuf>,
|
||||
/// Private secp256K1 key in hex format, 64 chars, no 0x prefix, of the account signing transactions. If None the private key will be generated by Anvil
|
||||
#[arg(short = 'P', long, value_hint = clap::ValueHint::Other)]
|
||||
private_key: Option<String>,
|
||||
},
|
||||
#[cfg(not(feature = "no-update"))]
|
||||
/// Updates ezkl binary to version specified (or latest if not specified)
|
||||
|
||||
942
src/eth.rs
942
src/eth.rs
File diff suppressed because one or more lines are too long
425
src/execute.rs
425
src/execute.rs
@@ -1,12 +1,15 @@
|
||||
use crate::circuit::region::RegionSettings;
|
||||
use crate::circuit::CheckMode;
|
||||
use crate::commands::CalibrationTarget;
|
||||
use crate::eth::{deploy_contract_via_solidity, deploy_da_verifier_via_solidity, fix_da_sol};
|
||||
#[cfg(all(feature = "eth", not(target_arch = "wasm32")))]
|
||||
use crate::eth::deploy_contract_via_solidity;
|
||||
#[cfg(all(feature = "reusable-verifier", not(target_arch = "wasm32")))]
|
||||
use crate::eth::register_vka_via_rv;
|
||||
#[allow(unused_imports)]
|
||||
#[cfg(all(feature = "eth", not(target_arch = "wasm32")))]
|
||||
use crate::eth::{get_contract_artifacts, verify_proof_via_solidity};
|
||||
use crate::graph::input::GraphData;
|
||||
use crate::graph::{GraphCircuit, GraphSettings, GraphWitness, Model};
|
||||
use crate::graph::{TestDataSource, TestSources};
|
||||
use crate::pfsys::evm::aggregation_kzg::{AggregationCircuit, PoseidonTranscript};
|
||||
use crate::pfsys::{
|
||||
create_keys, load_pk, load_vk, save_params, save_pk, Snark, StrategyType, TranscriptType,
|
||||
@@ -14,7 +17,7 @@ use crate::pfsys::{
|
||||
use crate::pfsys::{
|
||||
create_proof_circuit, swap_proof_commitments_polycommit, verify_proof_circuit, ProofSplitCommit,
|
||||
};
|
||||
use crate::pfsys::{save_vk, srs::*};
|
||||
use crate::pfsys::{encode_calldata, save_vk, srs::*};
|
||||
use crate::tensor::TensorError;
|
||||
use crate::EZKL_BUF_CAPACITY;
|
||||
use crate::{commands::*, EZKLError};
|
||||
@@ -38,6 +41,7 @@ use halo2_proofs::poly::kzg::{
|
||||
};
|
||||
use halo2_proofs::poly::VerificationStrategy;
|
||||
use halo2_proofs::transcript::{EncodedChallenge, TranscriptReadBuffer};
|
||||
#[cfg(all(feature = "eth", not(target_arch = "wasm32")))]
|
||||
use halo2_solidity_verifier;
|
||||
use halo2curves::bn256::{Bn256, Fr, G1Affine};
|
||||
use halo2curves::ff::{FromUniformBytes, WithSmallOrderMulGroup};
|
||||
@@ -54,9 +58,12 @@ use snark_verifier::loader::native::NativeLoader;
|
||||
use snark_verifier::system::halo2::compile;
|
||||
use snark_verifier::system::halo2::transcript::evm::EvmTranscript;
|
||||
use snark_verifier::system::halo2::Config;
|
||||
#[cfg(all(feature = "eth", not(target_arch = "wasm32")))]
|
||||
use std::fs::File;
|
||||
use std::io::BufWriter;
|
||||
use std::io::{Cursor, Write};
|
||||
use std::io::Cursor;
|
||||
#[cfg(all(feature = "eth", not(target_arch = "wasm32")))]
|
||||
use std::io::Write;
|
||||
use std::path::Path;
|
||||
use std::path::PathBuf;
|
||||
use std::str::FromStr;
|
||||
@@ -166,7 +173,6 @@ pub async fn run(command: Commands) -> Result<String, EZKLError> {
|
||||
scale_rebase_multiplier,
|
||||
max_logrows,
|
||||
)
|
||||
.await
|
||||
.map(|e| serde_json::to_string(&e).unwrap()),
|
||||
Commands::GenWitness {
|
||||
data,
|
||||
@@ -181,12 +187,12 @@ pub async fn run(command: Commands) -> Result<String, EZKLError> {
|
||||
vk_path,
|
||||
srs_path,
|
||||
)
|
||||
.await
|
||||
.map(|e| serde_json::to_string(&e).unwrap()),
|
||||
Commands::Mock { model, witness } => mock(
|
||||
model.unwrap_or(DEFAULT_MODEL.into()),
|
||||
witness.unwrap_or(DEFAULT_WITNESS.into()),
|
||||
),
|
||||
#[cfg(all(feature = "eth", not(target_arch = "wasm32")))]
|
||||
Commands::CreateEvmVerifier {
|
||||
vk_path,
|
||||
srs_path,
|
||||
@@ -205,48 +211,39 @@ pub async fn run(command: Commands) -> Result<String, EZKLError> {
|
||||
)
|
||||
.await
|
||||
}
|
||||
#[cfg(all(feature = "eth", not(target_arch = "wasm32")))]
|
||||
Commands::EncodeEvmCalldata {
|
||||
proof_path,
|
||||
calldata_path,
|
||||
addr_vk,
|
||||
vka_path,
|
||||
} => encode_evm_calldata(
|
||||
proof_path.unwrap_or(DEFAULT_PROOF.into()),
|
||||
calldata_path.unwrap_or(DEFAULT_CALLDATA.into()),
|
||||
addr_vk,
|
||||
vka_path,
|
||||
)
|
||||
.map(|e| serde_json::to_string(&e).unwrap()),
|
||||
#[cfg(all(
|
||||
feature = "eth",
|
||||
feature = "reusable-verifier",
|
||||
not(target_arch = "wasm32")
|
||||
))]
|
||||
Commands::CreateEvmVka {
|
||||
vk_path,
|
||||
srs_path,
|
||||
settings_path,
|
||||
sol_code_path,
|
||||
abi_path,
|
||||
vka_path,
|
||||
decimals,
|
||||
} => {
|
||||
create_evm_vka(
|
||||
vk_path.unwrap_or(DEFAULT_VK.into()),
|
||||
srs_path,
|
||||
settings_path.unwrap_or(DEFAULT_SETTINGS.into()),
|
||||
sol_code_path.unwrap_or(DEFAULT_VK_SOL.into()),
|
||||
abi_path.unwrap_or(DEFAULT_VK_ABI.into()),
|
||||
)
|
||||
.await
|
||||
}
|
||||
Commands::CreateEvmDa {
|
||||
settings_path,
|
||||
sol_code_path,
|
||||
abi_path,
|
||||
data,
|
||||
witness,
|
||||
} => {
|
||||
create_evm_data_attestation(
|
||||
settings_path.unwrap_or(DEFAULT_SETTINGS.into()),
|
||||
sol_code_path.unwrap_or(DEFAULT_SOL_CODE_DA.into()),
|
||||
abi_path.unwrap_or(DEFAULT_VERIFIER_DA_ABI.into()),
|
||||
data.unwrap_or(DEFAULT_DATA.into()),
|
||||
witness,
|
||||
vka_path.unwrap_or(DEFAULT_VKA.into()),
|
||||
decimals.unwrap_or(DEFAULT_DECIMALS.parse().unwrap()),
|
||||
)
|
||||
.await
|
||||
}
|
||||
#[cfg(all(feature = "eth", not(target_arch = "wasm32")))]
|
||||
Commands::CreateEvmVerifierAggr {
|
||||
vk_path,
|
||||
srs_path,
|
||||
@@ -292,24 +289,6 @@ pub async fn run(command: Commands) -> Result<String, EZKLError> {
|
||||
disable_selector_compression
|
||||
.unwrap_or(DEFAULT_DISABLE_SELECTOR_COMPRESSION.parse().unwrap()),
|
||||
),
|
||||
Commands::SetupTestEvmData {
|
||||
data,
|
||||
compiled_circuit,
|
||||
test_data,
|
||||
rpc_url,
|
||||
input_source,
|
||||
output_source,
|
||||
} => {
|
||||
setup_test_evm_data(
|
||||
data.unwrap_or(DEFAULT_DATA.into()),
|
||||
compiled_circuit.unwrap_or(DEFAULT_COMPILED_CIRCUIT.into()),
|
||||
test_data,
|
||||
rpc_url,
|
||||
input_source,
|
||||
output_source,
|
||||
)
|
||||
.await
|
||||
}
|
||||
Commands::SwapProofCommitments {
|
||||
proof_path,
|
||||
witness_path,
|
||||
@@ -418,12 +397,14 @@ pub async fn run(command: Commands) -> Result<String, EZKLError> {
|
||||
commitment.into(),
|
||||
)
|
||||
.map(|e| serde_json::to_string(&e).unwrap()),
|
||||
#[cfg(all(feature = "eth", not(target_arch = "wasm32")))]
|
||||
Commands::DeployEvm {
|
||||
sol_code_path,
|
||||
rpc_url,
|
||||
addr_path,
|
||||
optimizer_runs,
|
||||
private_key,
|
||||
#[cfg(all(feature = "reusable-verifier", not(target_arch = "wasm32")))]
|
||||
contract,
|
||||
} => {
|
||||
deploy_evm(
|
||||
@@ -432,43 +413,44 @@ pub async fn run(command: Commands) -> Result<String, EZKLError> {
|
||||
addr_path.unwrap_or(DEFAULT_CONTRACT_ADDRESS.into()),
|
||||
optimizer_runs,
|
||||
private_key,
|
||||
#[cfg(all(feature = "reusable-verifier", not(target_arch = "wasm32")))]
|
||||
contract,
|
||||
#[cfg(not(all(feature = "reusable-verifier", not(target_arch = "wasm32"))))]
|
||||
ContractType::default(),
|
||||
)
|
||||
.await
|
||||
}
|
||||
Commands::DeployEvmDa {
|
||||
data,
|
||||
settings_path,
|
||||
sol_code_path,
|
||||
rpc_url,
|
||||
addr_path,
|
||||
optimizer_runs,
|
||||
private_key,
|
||||
} => {
|
||||
deploy_da_evm(
|
||||
data.unwrap_or(DEFAULT_DATA.into()),
|
||||
settings_path.unwrap_or(DEFAULT_SETTINGS.into()),
|
||||
sol_code_path.unwrap_or(DEFAULT_SOL_CODE_DA.into()),
|
||||
rpc_url,
|
||||
addr_path.unwrap_or(DEFAULT_CONTRACT_ADDRESS_DA.into()),
|
||||
optimizer_runs,
|
||||
private_key,
|
||||
)
|
||||
.await
|
||||
}
|
||||
#[cfg(all(feature = "eth", not(target_arch = "wasm32")))]
|
||||
Commands::VerifyEvm {
|
||||
proof_path,
|
||||
addr_verifier,
|
||||
rpc_url,
|
||||
addr_da,
|
||||
addr_vk,
|
||||
vka_path,
|
||||
encoded_calldata,
|
||||
} => {
|
||||
verify_evm(
|
||||
proof_path.unwrap_or(DEFAULT_PROOF.into()),
|
||||
addr_verifier,
|
||||
rpc_url,
|
||||
addr_da,
|
||||
addr_vk,
|
||||
vka_path,
|
||||
encoded_calldata,
|
||||
)
|
||||
.await
|
||||
}
|
||||
#[cfg(feature = "reusable-verifier")]
|
||||
Commands::RegisterVka {
|
||||
addr_verifier,
|
||||
vka_path,
|
||||
rpc_url,
|
||||
vka_digest_path,
|
||||
private_key,
|
||||
} => {
|
||||
register_vka(
|
||||
rpc_url,
|
||||
addr_verifier,
|
||||
vka_path.unwrap_or(DEFAULT_VKA.into()),
|
||||
vka_digest_path.unwrap_or(DEFAULT_VKA_DIGEST.into()),
|
||||
private_key,
|
||||
)
|
||||
.await
|
||||
}
|
||||
@@ -719,7 +701,7 @@ pub(crate) fn table(model: PathBuf, run_args: RunArgs) -> Result<String, EZKLErr
|
||||
Ok(String::new())
|
||||
}
|
||||
|
||||
pub(crate) async fn gen_witness(
|
||||
pub(crate) fn gen_witness(
|
||||
compiled_circuit_path: PathBuf,
|
||||
data: String,
|
||||
output: Option<PathBuf>,
|
||||
@@ -741,7 +723,7 @@ pub(crate) async fn gen_witness(
|
||||
None
|
||||
};
|
||||
|
||||
let mut input = circuit.load_graph_input(&data).await?;
|
||||
let mut input = circuit.load_graph_input(&data)?;
|
||||
#[cfg(any(not(feature = "ezkl"), target_arch = "wasm32"))]
|
||||
let mut input = circuit.load_graph_input(&data)?;
|
||||
|
||||
@@ -1052,7 +1034,7 @@ impl AccuracyResults {
|
||||
/// Calibrate the circuit parameters to a given a dataset
|
||||
#[allow(trivial_casts)]
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub(crate) async fn calibrate(
|
||||
pub(crate) fn calibrate(
|
||||
model_path: PathBuf,
|
||||
data: String,
|
||||
settings_path: PathBuf,
|
||||
@@ -1078,7 +1060,7 @@ pub(crate) async fn calibrate(
|
||||
|
||||
let input_shapes = model.graph.input_shapes()?;
|
||||
|
||||
let chunks = data.split_into_batches(input_shapes).await?;
|
||||
let chunks = data.split_into_batches(input_shapes)?;
|
||||
info!("num calibration batches: {}", chunks.len());
|
||||
|
||||
debug!("running onnx predictions...");
|
||||
@@ -1191,7 +1173,7 @@ pub(crate) async fn calibrate(
|
||||
let chunk = chunk.clone();
|
||||
|
||||
let data = circuit
|
||||
.load_graph_from_file_exclusively(&chunk)
|
||||
.load_graph_input(&chunk)
|
||||
.map_err(|e| format!("failed to load circuit inputs: {}", e))?;
|
||||
|
||||
let forward_res = circuit
|
||||
@@ -1467,6 +1449,7 @@ pub(crate) fn mock(
|
||||
Ok(String::new())
|
||||
}
|
||||
|
||||
#[cfg(all(feature = "eth", not(target_arch = "wasm32")))]
|
||||
pub(crate) async fn create_evm_verifier(
|
||||
vk_path: PathBuf,
|
||||
srs_path: Option<PathBuf>,
|
||||
@@ -1484,7 +1467,9 @@ pub(crate) async fn create_evm_verifier(
|
||||
)?;
|
||||
|
||||
let num_instance = settings.total_instances();
|
||||
let num_instance: usize = num_instance.iter().sum::<usize>();
|
||||
// create a scales array that is the same length as the number of instances, all populated with 0
|
||||
let scales = vec![0; num_instance.len()];
|
||||
// let poseidon_instance = settings.module_sizes.num_instances().iter().sum::<usize>();
|
||||
|
||||
let vk = load_vk::<KZGCommitmentScheme<Bn256>, GraphCircuit>(vk_path, settings)?;
|
||||
trace!("params computed");
|
||||
@@ -1493,7 +1478,10 @@ pub(crate) async fn create_evm_verifier(
|
||||
¶ms,
|
||||
&vk,
|
||||
halo2_solidity_verifier::BatchOpenScheme::Bdfg21,
|
||||
num_instance,
|
||||
&num_instance,
|
||||
&scales,
|
||||
0,
|
||||
0,
|
||||
);
|
||||
let (verifier_solidity, name) = if reusable {
|
||||
(generator.render_separately()?.0, "Halo2VerifierReusable") // ignore the rendered vk artifact for now and generate it in create_evm_vka
|
||||
@@ -1511,13 +1499,15 @@ pub(crate) async fn create_evm_verifier(
|
||||
Ok(String::new())
|
||||
}
|
||||
|
||||
#[cfg(feature = "reusable-verifier")]
|
||||
pub(crate) async fn create_evm_vka(
|
||||
vk_path: PathBuf,
|
||||
srs_path: Option<PathBuf>,
|
||||
settings_path: PathBuf,
|
||||
sol_code_path: PathBuf,
|
||||
abi_path: PathBuf,
|
||||
vka_path: PathBuf,
|
||||
decimals: usize,
|
||||
) -> Result<String, EZKLError> {
|
||||
log::warn!("Reusable verifier support is experimental and may change in the future. Use at your own risk.");
|
||||
let settings = GraphSettings::load(&settings_path)?;
|
||||
let commitment: Commitments = settings.run_args.commitment.into();
|
||||
let params = load_params_verifier::<KZGCommitmentScheme<Bn256>>(
|
||||
@@ -1526,133 +1516,52 @@ pub(crate) async fn create_evm_vka(
|
||||
commitment,
|
||||
)?;
|
||||
|
||||
let num_instance = settings.total_instances();
|
||||
let num_instance: usize = num_instance.iter().sum::<usize>();
|
||||
let num_poseidon_instance = settings.module_sizes.num_instances().iter().sum::<usize>();
|
||||
let num_fixed_point_instance = settings
|
||||
.model_instance_shapes
|
||||
.iter()
|
||||
.map(|x| x.iter().product::<usize>())
|
||||
.collect_vec();
|
||||
|
||||
let scales = settings.get_model_instance_scales();
|
||||
let vk = load_vk::<KZGCommitmentScheme<Bn256>, GraphCircuit>(vk_path, settings)?;
|
||||
trace!("params computed");
|
||||
// assert that the decimals must be less than or equal to 38 to prevent overflow
|
||||
if decimals > 38 {
|
||||
return Err("decimals must be less than or equal to 38".into());
|
||||
}
|
||||
|
||||
let generator = halo2_solidity_verifier::SolidityGenerator::new(
|
||||
¶ms,
|
||||
&vk,
|
||||
halo2_solidity_verifier::BatchOpenScheme::Bdfg21,
|
||||
num_instance,
|
||||
&num_fixed_point_instance,
|
||||
&scales,
|
||||
decimals,
|
||||
num_poseidon_instance,
|
||||
);
|
||||
|
||||
let vk_solidity = generator.render_separately()?.1;
|
||||
let vka_words: Vec<[u8; 32]> = generator.render_separately_vka_words()?.1;
|
||||
let serialized_vka_words = bincode::serialize(&vka_words).or_else(|e| {
|
||||
Err(EZKLError::from(format!(
|
||||
"Failed to serialize vka words: {}",
|
||||
e
|
||||
)))
|
||||
})?;
|
||||
|
||||
File::create(sol_code_path.clone())?.write_all(vk_solidity.as_bytes())?;
|
||||
File::create(vka_path.clone())?.write_all(&serialized_vka_words)?;
|
||||
|
||||
// fetch abi of the contract
|
||||
let (abi, _, _) = get_contract_artifacts(sol_code_path, "Halo2VerifyingArtifact", 0).await?;
|
||||
// save abi to file
|
||||
serde_json::to_writer(std::fs::File::create(abi_path)?, &abi)?;
|
||||
// Load in the vka words and deserialize them and check that they match the original
|
||||
let bytes = std::fs::read(vka_path)?;
|
||||
let vka_buf: Vec<[u8; 32]> = bincode::deserialize(&bytes)
|
||||
.map_err(|e| EZKLError::from(format!("Failed to deserialize vka words: {e}")))?;
|
||||
if vka_buf != vka_words {
|
||||
return Err("vka words do not match".into());
|
||||
};
|
||||
|
||||
Ok(String::new())
|
||||
}
|
||||
|
||||
pub(crate) async fn create_evm_data_attestation(
|
||||
settings_path: PathBuf,
|
||||
sol_code_path: PathBuf,
|
||||
abi_path: PathBuf,
|
||||
input: String,
|
||||
witness: Option<PathBuf>,
|
||||
) -> Result<String, EZKLError> {
|
||||
#[allow(unused_imports)]
|
||||
use crate::graph::{DataSource, VarVisibility};
|
||||
use crate::{graph::Visibility, pfsys::get_proof_commitments};
|
||||
|
||||
let settings = GraphSettings::load(&settings_path)?;
|
||||
|
||||
let visibility = VarVisibility::from_args(&settings.run_args)?;
|
||||
trace!("params computed");
|
||||
|
||||
// if input is not provided, we just instantiate dummy input data
|
||||
let data =
|
||||
GraphData::from_str(&input).unwrap_or_else(|_| GraphData::new(DataSource::File(vec![])));
|
||||
|
||||
debug!("data attestation data: {:?}", data);
|
||||
|
||||
// The number of input and output instances we attest to for the single call data attestation
|
||||
let mut input_len = None;
|
||||
let mut output_len = None;
|
||||
|
||||
if let Some(DataSource::OnChain(source)) = data.output_data {
|
||||
if visibility.output.is_private() {
|
||||
return Err("private output data on chain is not supported on chain".into());
|
||||
}
|
||||
output_len = Some(source.call.decimals.len());
|
||||
};
|
||||
|
||||
if let DataSource::OnChain(source) = data.input_data {
|
||||
if visibility.input.is_private() {
|
||||
return Err("private input data on chain is not supported on chain".into());
|
||||
}
|
||||
input_len = Some(source.call.decimals.len());
|
||||
};
|
||||
|
||||
// If both model inputs and outputs are attested to then we
|
||||
|
||||
// Read the settings file. Look if either the run_ars.input_visibility, run_args.output_visibility or run_args.param_visibility is KZGCommit
|
||||
// if so, then we need to load the witness
|
||||
|
||||
let commitment_bytes = if settings.run_args.input_visibility == Visibility::KZGCommit
|
||||
|| settings.run_args.output_visibility == Visibility::KZGCommit
|
||||
|| settings.run_args.param_visibility == Visibility::KZGCommit
|
||||
{
|
||||
let witness = GraphWitness::from_path(witness.unwrap_or(DEFAULT_WITNESS.into()))?;
|
||||
let commitments = witness.get_polycommitments();
|
||||
let proof_first_bytes = get_proof_commitments::<
|
||||
KZGCommitmentScheme<Bn256>,
|
||||
_,
|
||||
EvmTranscript<G1Affine, _, _, _>,
|
||||
>(&commitments);
|
||||
|
||||
Some(proof_first_bytes.unwrap())
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let output: String = fix_da_sol(
|
||||
commitment_bytes,
|
||||
input_len.is_none() && output_len.is_none(),
|
||||
)?;
|
||||
let mut f = File::create(sol_code_path.clone())?;
|
||||
let _ = f.write(output.as_bytes());
|
||||
// fetch abi of the contract
|
||||
let (abi, _, _) = get_contract_artifacts(sol_code_path, "DataAttestation", 0).await?;
|
||||
// save abi to file
|
||||
serde_json::to_writer(std::fs::File::create(abi_path)?, &abi)?;
|
||||
|
||||
Ok(String::new())
|
||||
}
|
||||
|
||||
pub(crate) async fn deploy_da_evm(
|
||||
data: String,
|
||||
settings_path: PathBuf,
|
||||
sol_code_path: PathBuf,
|
||||
rpc_url: String,
|
||||
addr_path: PathBuf,
|
||||
runs: usize,
|
||||
private_key: Option<String>,
|
||||
) -> Result<String, EZKLError> {
|
||||
let contract_address = deploy_da_verifier_via_solidity(
|
||||
settings_path,
|
||||
data,
|
||||
sol_code_path,
|
||||
&rpc_url,
|
||||
runs,
|
||||
private_key.as_deref(),
|
||||
)
|
||||
.await?;
|
||||
info!("Contract deployed at: {}", contract_address);
|
||||
|
||||
let mut f = File::create(addr_path)?;
|
||||
write!(f, "{:#?}", contract_address)?;
|
||||
|
||||
Ok(String::new())
|
||||
}
|
||||
|
||||
#[cfg(all(feature = "eth", not(target_arch = "wasm32")))]
|
||||
pub(crate) async fn deploy_evm(
|
||||
sol_code_path: PathBuf,
|
||||
rpc_url: String,
|
||||
@@ -1664,7 +1573,6 @@ pub(crate) async fn deploy_evm(
|
||||
let contract_name = match contract {
|
||||
ContractType::Verifier { reusable: false } => "Halo2Verifier",
|
||||
ContractType::Verifier { reusable: true } => "Halo2VerifierReusable",
|
||||
ContractType::VerifyingKeyArtifact => "Halo2VerifyingArtifact",
|
||||
};
|
||||
let contract_address = deploy_contract_via_solidity(
|
||||
sol_code_path,
|
||||
@@ -1682,24 +1590,61 @@ pub(crate) async fn deploy_evm(
|
||||
Ok(String::new())
|
||||
}
|
||||
|
||||
#[cfg(all(feature = "reusable-verifier", not(target_arch = "wasm32")))]
|
||||
pub(crate) async fn register_vka(
|
||||
rpc_url: String,
|
||||
rv_addr: H160Flag,
|
||||
vka_path: PathBuf,
|
||||
vka_digest_path: PathBuf,
|
||||
private_key: Option<String>,
|
||||
) -> Result<String, EZKLError> {
|
||||
log::warn!("Reusable verifier support is experimental and may change in the future. Use at your own risk.");
|
||||
// Load the vka, which is bincode serialized, from the vka_path
|
||||
let bytes = std::fs::read(vka_path)?;
|
||||
let vka_buf: Vec<[u8; 32]> = bincode::deserialize(&bytes)
|
||||
.map_err(|e| EZKLError::from(format!("Failed to deserialize vka words: {e}")))?;
|
||||
let vka_digest = register_vka_via_rv(
|
||||
rpc_url.as_ref(),
|
||||
private_key.as_deref(),
|
||||
rv_addr.into(),
|
||||
&vka_buf,
|
||||
)
|
||||
.await?;
|
||||
|
||||
info!("VKA digest: {:#?}", vka_digest);
|
||||
|
||||
let mut f = File::create(vka_digest_path)?;
|
||||
write!(f, "{:#?}", vka_digest)?;
|
||||
Ok(String::new())
|
||||
}
|
||||
|
||||
/// Encodes the calldata for the EVM verifier (both aggregated and single proof)
|
||||
/// TODO: Add a "RV address param" which will query the "RegisteredVKA" events to fetch the
|
||||
/// VKA from the vka_digest.
|
||||
#[cfg(all(feature = "eth", not(target_arch = "wasm32")))]
|
||||
pub(crate) fn encode_evm_calldata(
|
||||
proof_path: PathBuf,
|
||||
calldata_path: PathBuf,
|
||||
addr_vk: Option<H160Flag>,
|
||||
vka_path: Option<PathBuf>,
|
||||
) -> Result<Vec<u8>, EZKLError> {
|
||||
let snark = Snark::load::<IPACommitmentScheme<G1Affine>>(&proof_path)?;
|
||||
|
||||
let flattened_instances = snark.instances.into_iter().flatten();
|
||||
|
||||
let encoded = halo2_solidity_verifier::encode_calldata(
|
||||
addr_vk
|
||||
.as_ref()
|
||||
.map(|x| alloy::primitives::Address::from(*x).0)
|
||||
.map(|x| x.0),
|
||||
&snark.proof,
|
||||
&flattened_instances.collect::<Vec<_>>(),
|
||||
);
|
||||
// Load the vka, which is bincode serialized, from the vka_path
|
||||
let vka_buf: Option<Vec<[u8; 32]>> =
|
||||
match vka_path {
|
||||
Some(path) => {
|
||||
let bytes = std::fs::read(path)?;
|
||||
Some(bincode::deserialize(&bytes).map_err(|e| {
|
||||
EZKLError::from(format!("Failed to deserialize vka words: {e}"))
|
||||
})?)
|
||||
}
|
||||
None => None,
|
||||
};
|
||||
|
||||
let vka: Option<&[[u8; 32]]> = vka_buf.as_deref();
|
||||
let encoded = encode_calldata(vka, &snark.proof, &flattened_instances.collect::<Vec<_>>());
|
||||
|
||||
log::debug!("Encoded calldata: {:?}", encoded);
|
||||
|
||||
@@ -1708,35 +1653,26 @@ pub(crate) fn encode_evm_calldata(
|
||||
Ok(encoded)
|
||||
}
|
||||
|
||||
/// TODO: Add an optional vka_digest param that will allow us to fetch the associated VKA
|
||||
/// from the RegisteredVKA events on the RV.
|
||||
#[cfg(all(feature = "eth", not(target_arch = "wasm32")))]
|
||||
pub(crate) async fn verify_evm(
|
||||
proof_path: PathBuf,
|
||||
addr_verifier: H160Flag,
|
||||
rpc_url: String,
|
||||
addr_da: Option<H160Flag>,
|
||||
addr_vk: Option<H160Flag>,
|
||||
vka_path: Option<PathBuf>,
|
||||
encoded_calldata: Option<PathBuf>,
|
||||
) -> Result<String, EZKLError> {
|
||||
use crate::eth::verify_proof_with_data_attestation;
|
||||
|
||||
let proof = Snark::load::<KZGCommitmentScheme<Bn256>>(&proof_path)?;
|
||||
|
||||
let result = if let Some(addr_da) = addr_da {
|
||||
verify_proof_with_data_attestation(
|
||||
proof.clone(),
|
||||
addr_verifier.into(),
|
||||
addr_da.into(),
|
||||
addr_vk.map(|s| s.into()),
|
||||
&rpc_url,
|
||||
)
|
||||
.await?
|
||||
} else {
|
||||
verify_proof_via_solidity(
|
||||
proof.clone(),
|
||||
addr_verifier.into(),
|
||||
addr_vk.map(|s| s.into()),
|
||||
&rpc_url,
|
||||
)
|
||||
.await?
|
||||
};
|
||||
let result = verify_proof_via_solidity(
|
||||
proof.clone(),
|
||||
addr_verifier.into(),
|
||||
vka_path.map(|s| s.into()),
|
||||
rpc_url.as_ref(),
|
||||
encoded_calldata.map(|s| s.into()),
|
||||
)
|
||||
.await?;
|
||||
|
||||
info!("Solidity verification result: {}", result);
|
||||
|
||||
@@ -1747,6 +1683,7 @@ pub(crate) async fn verify_evm(
|
||||
Ok(String::new())
|
||||
}
|
||||
|
||||
#[cfg(all(feature = "eth", not(target_arch = "wasm32")))]
|
||||
pub(crate) async fn create_evm_aggregate_verifier(
|
||||
vk_path: PathBuf,
|
||||
srs_path: Option<PathBuf>,
|
||||
@@ -1772,8 +1709,8 @@ pub(crate) async fn create_evm_aggregate_verifier(
|
||||
.sum();
|
||||
|
||||
let num_instance = AggregationCircuit::num_instance(num_instance);
|
||||
let scales = vec![0; num_instance.len()];
|
||||
assert_eq!(num_instance.len(), 1);
|
||||
let num_instance = num_instance[0];
|
||||
|
||||
let agg_vk = load_vk::<KZGCommitmentScheme<Bn256>, AggregationCircuit>(vk_path, ())?;
|
||||
|
||||
@@ -1781,7 +1718,10 @@ pub(crate) async fn create_evm_aggregate_verifier(
|
||||
¶ms,
|
||||
&agg_vk,
|
||||
halo2_solidity_verifier::BatchOpenScheme::Bdfg21,
|
||||
num_instance,
|
||||
&num_instance,
|
||||
&scales,
|
||||
0,
|
||||
0,
|
||||
);
|
||||
|
||||
let acc_encoding = halo2_solidity_verifier::AccumulatorEncoding::new(
|
||||
@@ -1870,41 +1810,6 @@ pub(crate) fn setup(
|
||||
Ok(String::new())
|
||||
}
|
||||
|
||||
pub(crate) async fn setup_test_evm_data(
|
||||
data_path: String,
|
||||
compiled_circuit_path: PathBuf,
|
||||
test_data: PathBuf,
|
||||
rpc_url: String,
|
||||
input_source: TestDataSource,
|
||||
output_source: TestDataSource,
|
||||
) -> Result<String, EZKLError> {
|
||||
use crate::graph::TestOnChainData;
|
||||
|
||||
let mut data = GraphData::from_str(&data_path)?;
|
||||
let mut circuit = GraphCircuit::load(compiled_circuit_path)?;
|
||||
|
||||
// if both input and output are from files fail
|
||||
if matches!(input_source, TestDataSource::File) && matches!(output_source, TestDataSource::File)
|
||||
{
|
||||
return Err("Both input and output cannot be from files".into());
|
||||
}
|
||||
|
||||
let test_on_chain_data = TestOnChainData {
|
||||
data: test_data.clone(),
|
||||
rpc: rpc_url,
|
||||
data_sources: TestSources {
|
||||
input: input_source,
|
||||
output: output_source,
|
||||
},
|
||||
};
|
||||
|
||||
circuit
|
||||
.populate_on_chain_test_data(&mut data, test_on_chain_data)
|
||||
.await?;
|
||||
|
||||
Ok(String::new())
|
||||
}
|
||||
|
||||
use crate::pfsys::ProofType;
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
|
||||
@@ -107,6 +107,7 @@ pub enum GraphError {
|
||||
not(all(target_arch = "wasm32", target_os = "unknown"))
|
||||
))]
|
||||
#[error("[eth] {0}")]
|
||||
#[cfg(all(feature = "eth", not(target_arch = "wasm32")))]
|
||||
EthError(#[from] crate::eth::EthError),
|
||||
/// Json error
|
||||
#[error("[json] {0}")]
|
||||
|
||||
@@ -182,46 +182,6 @@ impl OnChainSource {
|
||||
pub fn new(call: CallToAccount, rpc: RPCUrl) -> Self {
|
||||
OnChainSource { call, rpc }
|
||||
}
|
||||
|
||||
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
|
||||
/// Creates test data for the OnChain data source
|
||||
/// Used for testing and development purposes
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `data` - Sample file data to use
|
||||
/// * `scales` - Scaling factors for each input
|
||||
/// * `shapes` - Shapes of the input tensors
|
||||
/// * `rpc` - Optional RPC endpoint override
|
||||
pub async fn test_from_file_data(
|
||||
data: &FileSource,
|
||||
scales: Vec<crate::Scale>,
|
||||
mut shapes: Vec<Vec<usize>>,
|
||||
rpc: &str,
|
||||
) -> Result<Self, GraphError> {
|
||||
use crate::eth::{read_on_chain_inputs, test_on_chain_data};
|
||||
use log::debug;
|
||||
|
||||
// Set up local anvil instance for reading on-chain data
|
||||
let (client, client_address) = crate::eth::setup_eth_backend(rpc, None).await?;
|
||||
|
||||
let mut scales = scales;
|
||||
// set scales to 1 where data is a field element
|
||||
for (idx, i) in data.iter().enumerate() {
|
||||
if i.iter().all(|e| e.is_field()) {
|
||||
scales[idx] = 0;
|
||||
shapes[idx] = vec![i.len()];
|
||||
}
|
||||
}
|
||||
let used_rpc = rpc.to_string();
|
||||
|
||||
let call_to_account = test_on_chain_data(client.clone(), data).await?;
|
||||
debug!("Call to account: {:?}", call_to_account);
|
||||
let inputs = read_on_chain_inputs(client.clone(), client_address, &call_to_account).await?;
|
||||
debug!("Inputs: {:?}", inputs);
|
||||
|
||||
// Fill the input_data field of the GraphData struct
|
||||
Ok(OnChainSource::new(call_to_account, used_rpc))
|
||||
}
|
||||
}
|
||||
|
||||
/// Specification for view-only calls to fetch on-chain data
|
||||
@@ -249,29 +209,30 @@ pub struct CallToAccount {
|
||||
|
||||
/// Represents different sources of input/output data for the EZKL model
|
||||
#[derive(Clone, Debug, Serialize, PartialOrd, PartialEq)]
|
||||
#[serde(untagged)]
|
||||
pub enum DataSource {
|
||||
/// Data from a JSON file containing arrays of values
|
||||
File(FileSource),
|
||||
/// Data fetched from blockchain contracts
|
||||
OnChain(OnChainSource),
|
||||
pub struct DataSource(FileSource);
|
||||
|
||||
impl DataSource {
|
||||
/// Gets the underlying file source data
|
||||
pub fn values(&self) -> &FileSource {
|
||||
&self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for DataSource {
|
||||
fn default() -> Self {
|
||||
DataSource::File(vec![vec![]])
|
||||
DataSource(vec![vec![]])
|
||||
}
|
||||
}
|
||||
|
||||
impl From<FileSource> for DataSource {
|
||||
fn from(data: FileSource) -> Self {
|
||||
DataSource::File(data)
|
||||
DataSource(data)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Vec<Vec<Fp>>> for DataSource {
|
||||
fn from(data: Vec<Vec<Fp>>) -> Self {
|
||||
DataSource::File(
|
||||
DataSource(
|
||||
data.iter()
|
||||
.map(|e| e.iter().map(|e| FileSourceInner::Field(*e)).collect())
|
||||
.collect(),
|
||||
@@ -281,7 +242,7 @@ impl From<Vec<Vec<Fp>>> for DataSource {
|
||||
|
||||
impl From<Vec<Vec<f64>>> for DataSource {
|
||||
fn from(data: Vec<Vec<f64>>) -> Self {
|
||||
DataSource::File(
|
||||
DataSource(
|
||||
data.iter()
|
||||
.map(|e| e.iter().map(|e| FileSourceInner::Float(*e)).collect())
|
||||
.collect(),
|
||||
@@ -289,12 +250,6 @@ impl From<Vec<Vec<f64>>> for DataSource {
|
||||
}
|
||||
}
|
||||
|
||||
impl From<OnChainSource> for DataSource {
|
||||
fn from(data: OnChainSource) -> Self {
|
||||
DataSource::OnChain(data)
|
||||
}
|
||||
}
|
||||
|
||||
// Note: Always use JSON serialization for untagged enums
|
||||
impl<'de> Deserialize<'de> for DataSource {
|
||||
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
|
||||
@@ -306,13 +261,7 @@ impl<'de> Deserialize<'de> for DataSource {
|
||||
// Try deserializing as FileSource first
|
||||
let first_try: Result<FileSource, _> = serde_json::from_str(this_json.get());
|
||||
if let Ok(t) = first_try {
|
||||
return Ok(DataSource::File(t));
|
||||
}
|
||||
|
||||
// Try deserializing as OnChainSource
|
||||
let second_try: Result<OnChainSource, _> = serde_json::from_str(this_json.get());
|
||||
if let Ok(t) = second_try {
|
||||
return Ok(DataSource::OnChain(t));
|
||||
return Ok(DataSource(t));
|
||||
}
|
||||
|
||||
Err(serde::de::Error::custom("failed to deserialize DataSource"))
|
||||
@@ -348,25 +297,16 @@ impl GraphData {
|
||||
datum_types: &[tract_onnx::prelude::DatumType],
|
||||
) -> Result<TVec<TValue>, GraphError> {
|
||||
let mut inputs = TVec::new();
|
||||
match &self.input_data {
|
||||
DataSource::File(data) => {
|
||||
for (i, input) in data.iter().enumerate() {
|
||||
if !input.is_empty() {
|
||||
let dt = datum_types[i];
|
||||
let input = input.iter().map(|e| e.to_float()).collect::<Vec<f64>>();
|
||||
let tt = TractTensor::from_shape(&shapes[i], &input)?;
|
||||
let tt = tt.cast_to_dt(dt)?;
|
||||
inputs.push(tt.into_owned().into());
|
||||
}
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
return Err(GraphError::InvalidDims(
|
||||
0,
|
||||
"non file data cannot be split into batches".to_string(),
|
||||
));
|
||||
for (i, input) in self.input_data.values().iter().enumerate() {
|
||||
if !input.is_empty() {
|
||||
let dt = datum_types[i];
|
||||
let input = input.iter().map(|e| e.to_float()).collect::<Vec<f64>>();
|
||||
let tt = TractTensor::from_shape(&shapes[i], &input)?;
|
||||
let tt = tt.cast_to_dt(dt)?;
|
||||
inputs.push(tt.into_owned().into());
|
||||
}
|
||||
}
|
||||
|
||||
Ok(inputs)
|
||||
}
|
||||
|
||||
@@ -398,7 +338,7 @@ impl GraphData {
|
||||
}
|
||||
}
|
||||
Ok(GraphData {
|
||||
input_data: DataSource::File(input_data),
|
||||
input_data: DataSource(input_data),
|
||||
output_data: None,
|
||||
})
|
||||
}
|
||||
@@ -478,7 +418,7 @@ impl GraphData {
|
||||
/// Returns error if:
|
||||
/// - Data is from on-chain source
|
||||
/// - Input size is not evenly divisible by batch size
|
||||
pub async fn split_into_batches(
|
||||
pub fn split_into_batches(
|
||||
&self,
|
||||
input_shapes: Vec<Vec<usize>>,
|
||||
) -> Result<Vec<Self>, GraphError> {
|
||||
@@ -486,18 +426,9 @@ impl GraphData {
|
||||
|
||||
let iterable = match self {
|
||||
GraphData {
|
||||
input_data: DataSource::File(data),
|
||||
input_data: DataSource(data),
|
||||
output_data: _,
|
||||
} => data.clone(),
|
||||
GraphData {
|
||||
input_data: DataSource::OnChain(_),
|
||||
output_data: _,
|
||||
} => {
|
||||
return Err(GraphError::InvalidDims(
|
||||
0,
|
||||
"on-chain data cannot be split into batches".to_string(),
|
||||
));
|
||||
}
|
||||
};
|
||||
|
||||
// Process each input tensor according to its shape
|
||||
@@ -543,12 +474,12 @@ impl GraphData {
|
||||
for input in batched_inputs.iter() {
|
||||
batch.push(input[i].clone());
|
||||
}
|
||||
input_batches.push(DataSource::File(batch));
|
||||
input_batches.push(DataSource(batch));
|
||||
}
|
||||
|
||||
// Ensure at least one batch exists
|
||||
if input_batches.is_empty() {
|
||||
input_batches.push(DataSource::File(vec![vec![]]));
|
||||
input_batches.push(DataSource(vec![vec![]]));
|
||||
}
|
||||
|
||||
// Create GraphData instance for each batch
|
||||
@@ -623,16 +554,7 @@ impl ToPyObject for CallToAccount {
|
||||
#[cfg(feature = "python-bindings")]
|
||||
impl ToPyObject for DataSource {
|
||||
fn to_object(&self, py: Python) -> PyObject {
|
||||
match self {
|
||||
DataSource::File(data) => data.to_object(py),
|
||||
DataSource::OnChain(source) => {
|
||||
let dict = PyDict::new(py);
|
||||
dict.set_item("rpc_url", &source.rpc).unwrap();
|
||||
dict.set_item("calls_to_accounts", &source.call.to_object(py))
|
||||
.unwrap();
|
||||
dict.to_object(py)
|
||||
}
|
||||
}
|
||||
self.0.to_object(py)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
219
src/graph/mod.rs
219
src/graph/mod.rs
@@ -25,9 +25,11 @@ use itertools::Itertools;
|
||||
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
|
||||
use tosubcommand::ToFlags;
|
||||
|
||||
#[cfg(any(not(feature = "ezkl"), target_arch = "wasm32"))]
|
||||
use self::input::{FileSource, GraphData};
|
||||
|
||||
use self::errors::GraphError;
|
||||
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
|
||||
use self::input::OnChainSource;
|
||||
use self::input::{FileSource, GraphData};
|
||||
use self::modules::{GraphModules, ModuleConfigs, ModuleForwardResult, ModuleSizes};
|
||||
use crate::circuit::lookup::LookupOp;
|
||||
@@ -538,16 +540,38 @@ impl GraphSettings {
|
||||
|
||||
/// calculate the total number of instances
|
||||
pub fn total_instances(&self) -> Vec<usize> {
|
||||
let mut instances: Vec<usize> = self
|
||||
.model_instance_shapes
|
||||
.iter()
|
||||
.map(|x| x.iter().product())
|
||||
.collect();
|
||||
instances.extend(self.module_sizes.num_instances());
|
||||
let mut instances: Vec<usize> = self.module_sizes.num_instances();
|
||||
instances.extend(
|
||||
self.model_instance_shapes
|
||||
.iter()
|
||||
.map(|x| x.iter().product::<usize>()),
|
||||
);
|
||||
|
||||
instances
|
||||
}
|
||||
|
||||
/// get the scale data for instances
|
||||
pub fn get_model_instance_scales(&self) -> Vec<crate::Scale> {
|
||||
let mut scales = vec![];
|
||||
if self.run_args.input_visibility.is_public() {
|
||||
scales.extend(
|
||||
self.model_input_scales
|
||||
.iter()
|
||||
.map(|x| x.clone())
|
||||
.collect::<Vec<crate::Scale>>(),
|
||||
);
|
||||
};
|
||||
if self.run_args.output_visibility.is_public() {
|
||||
scales.extend(
|
||||
self.model_output_scales
|
||||
.iter()
|
||||
.map(|x| x.clone())
|
||||
.collect::<Vec<crate::Scale>>(),
|
||||
);
|
||||
};
|
||||
scales
|
||||
}
|
||||
|
||||
/// calculate the log2 of the total number of instances
|
||||
pub fn log2_total_instances(&self) -> u32 {
|
||||
let sum = self.total_instances().iter().sum::<usize>();
|
||||
@@ -928,111 +952,11 @@ impl GraphCircuit {
|
||||
}
|
||||
|
||||
///
|
||||
#[cfg(any(not(feature = "ezkl"), target_arch = "wasm32"))]
|
||||
pub fn load_graph_input(&mut self, data: &GraphData) -> Result<Vec<Tensor<Fp>>, GraphError> {
|
||||
let shapes = self.model().graph.input_shapes()?;
|
||||
let scales = self.model().graph.get_input_scales();
|
||||
let input_types = self.model().graph.get_input_types()?;
|
||||
self.process_data_source(&data.input_data, shapes, scales, input_types)
|
||||
}
|
||||
|
||||
///
|
||||
pub fn load_graph_from_file_exclusively(
|
||||
&mut self,
|
||||
data: &GraphData,
|
||||
) -> Result<Vec<Tensor<Fp>>, GraphError> {
|
||||
let shapes = self.model().graph.input_shapes()?;
|
||||
let scales = self.model().graph.get_input_scales();
|
||||
let input_types = self.model().graph.get_input_types()?;
|
||||
debug!("input scales: {:?}", scales);
|
||||
|
||||
match &data.input_data {
|
||||
DataSource::File(file_data) => {
|
||||
self.load_file_data(file_data, &shapes, scales, input_types)
|
||||
}
|
||||
_ => Err(GraphError::OnChainDataSource),
|
||||
}
|
||||
}
|
||||
|
||||
///
|
||||
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
|
||||
pub async fn load_graph_input(
|
||||
&mut self,
|
||||
data: &GraphData,
|
||||
) -> Result<Vec<Tensor<Fp>>, GraphError> {
|
||||
let shapes = self.model().graph.input_shapes()?;
|
||||
let scales = self.model().graph.get_input_scales();
|
||||
let input_types = self.model().graph.get_input_types()?;
|
||||
debug!("input scales: {:?}", scales);
|
||||
|
||||
self.process_data_source(&data.input_data, shapes, scales, input_types)
|
||||
.await
|
||||
}
|
||||
|
||||
#[cfg(any(not(feature = "ezkl"), target_arch = "wasm32"))]
|
||||
/// Process the data source for the model
|
||||
fn process_data_source(
|
||||
&mut self,
|
||||
data: &DataSource,
|
||||
shapes: Vec<Vec<usize>>,
|
||||
scales: Vec<crate::Scale>,
|
||||
input_types: Vec<InputType>,
|
||||
) -> Result<Vec<Tensor<Fp>>, GraphError> {
|
||||
match &data {
|
||||
DataSource::File(file_data) => {
|
||||
self.load_file_data(file_data, &shapes, scales, input_types)
|
||||
}
|
||||
DataSource::OnChain(_) => Err(GraphError::OnChainDataSource),
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
|
||||
/// Process the data source for the model
|
||||
async fn process_data_source(
|
||||
&mut self,
|
||||
data: &DataSource,
|
||||
shapes: Vec<Vec<usize>>,
|
||||
scales: Vec<crate::Scale>,
|
||||
input_types: Vec<InputType>,
|
||||
) -> Result<Vec<Tensor<Fp>>, GraphError> {
|
||||
match &data {
|
||||
DataSource::OnChain(source) => {
|
||||
let mut per_item_scale = vec![];
|
||||
for (i, shape) in shapes.iter().enumerate() {
|
||||
per_item_scale.extend(vec![scales[i]; shape.iter().product::<usize>()]);
|
||||
}
|
||||
|
||||
self.load_on_chain_data(source.clone(), &shapes, per_item_scale)
|
||||
.await
|
||||
}
|
||||
DataSource::File(file_data) => {
|
||||
self.load_file_data(file_data, &shapes, scales, input_types)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Prepare on chain test data
|
||||
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
|
||||
pub async fn load_on_chain_data(
|
||||
&mut self,
|
||||
source: OnChainSource,
|
||||
shapes: &Vec<Vec<usize>>,
|
||||
scales: Vec<crate::Scale>,
|
||||
) -> Result<Vec<Tensor<Fp>>, GraphError> {
|
||||
use crate::eth::{evm_quantize, read_on_chain_inputs, setup_eth_backend};
|
||||
let (client, client_address) = setup_eth_backend(&source.rpc, None).await?;
|
||||
let input = read_on_chain_inputs(client.clone(), client_address, &source.call).await?;
|
||||
let quantized_evm_inputs =
|
||||
evm_quantize(client, scales, &input, &source.call.decimals).await?;
|
||||
// on-chain data has already been quantized at this point. Just need to reshape it and push into tensor vector
|
||||
let mut inputs: Vec<Tensor<Fp>> = vec![];
|
||||
for (input, shape) in [quantized_evm_inputs].iter().zip(shapes) {
|
||||
let mut t: Tensor<Fp> = input.iter().cloned().collect();
|
||||
t.reshape(shape)?;
|
||||
inputs.push(t);
|
||||
}
|
||||
|
||||
Ok(inputs)
|
||||
self.load_file_data(data.input_data.values(), &shapes, scales, input_types)
|
||||
}
|
||||
|
||||
///
|
||||
@@ -1404,85 +1328,6 @@ impl GraphCircuit {
|
||||
let model = Model::from_run_args(¶ms.run_args, model_path)?;
|
||||
Self::new_from_settings(model, params.clone(), check_mode)
|
||||
}
|
||||
|
||||
///
|
||||
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
|
||||
pub async fn populate_on_chain_test_data(
|
||||
&mut self,
|
||||
data: &mut GraphData,
|
||||
test_on_chain_data: TestOnChainData,
|
||||
) -> Result<(), GraphError> {
|
||||
// Set up local anvil instance for reading on-chain data
|
||||
|
||||
let input_scales = self.model().graph.get_input_scales();
|
||||
let output_scales = self.model().graph.get_output_scales()?;
|
||||
let input_shapes = self.model().graph.input_shapes()?;
|
||||
let output_shapes = self.model().graph.output_shapes()?;
|
||||
let mut input_data = None;
|
||||
let mut output_data = None;
|
||||
|
||||
if matches!(
|
||||
test_on_chain_data.data_sources.input,
|
||||
TestDataSource::OnChain
|
||||
) {
|
||||
// if not public then fail
|
||||
if self.settings().run_args.input_visibility.is_private() {
|
||||
return Err(GraphError::OnChainDataSource);
|
||||
}
|
||||
|
||||
input_data = match &data.input_data {
|
||||
DataSource::File(input_data) => Some(input_data),
|
||||
_ => {
|
||||
return Err(GraphError::MissingDataSource);
|
||||
}
|
||||
};
|
||||
}
|
||||
if matches!(
|
||||
test_on_chain_data.data_sources.output,
|
||||
TestDataSource::OnChain
|
||||
) {
|
||||
// if not public then fail
|
||||
if self.settings().run_args.output_visibility.is_private() {
|
||||
return Err(GraphError::OnChainDataSource);
|
||||
}
|
||||
|
||||
output_data = match &data.output_data {
|
||||
Some(DataSource::File(output_data)) => Some(output_data),
|
||||
_ => return Err(GraphError::MissingDataSource),
|
||||
};
|
||||
}
|
||||
// Merge the input and output data
|
||||
let mut file_data: Vec<Vec<input::FileSourceInner>> = vec![];
|
||||
let mut scales: Vec<crate::Scale> = vec![];
|
||||
let mut shapes: Vec<Vec<usize>> = vec![];
|
||||
if let Some(input_data) = input_data {
|
||||
file_data.extend(input_data.clone());
|
||||
scales.extend(input_scales.clone());
|
||||
shapes.extend(input_shapes.clone());
|
||||
}
|
||||
if let Some(output_data) = output_data {
|
||||
file_data.extend(output_data.clone());
|
||||
scales.extend(output_scales.clone());
|
||||
shapes.extend(output_shapes.clone());
|
||||
};
|
||||
// print file data
|
||||
debug!("file data: {:?}", file_data);
|
||||
|
||||
let on_chain_data: OnChainSource =
|
||||
OnChainSource::test_from_file_data(&file_data, scales, shapes, &test_on_chain_data.rpc)
|
||||
.await?;
|
||||
// Here we update the GraphData struct with the on-chain data
|
||||
if input_data.is_some() {
|
||||
data.input_data = on_chain_data.clone().into();
|
||||
}
|
||||
if output_data.is_some() {
|
||||
data.output_data = Some(on_chain_data.into());
|
||||
}
|
||||
debug!("test on-chain data: {:?}", data);
|
||||
// Save the updated GraphData struct to the data_path
|
||||
data.save(test_on_chain_data.data)?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Default, Serialize, Deserialize)]
|
||||
|
||||
@@ -23,7 +23,6 @@
|
||||
)]
|
||||
// we allow this for our dynamic range based indexing scheme
|
||||
#![allow(clippy::single_range_in_vec_init)]
|
||||
#![feature(buf_read_has_data_left)]
|
||||
#![feature(stmt_expr_attributes)]
|
||||
|
||||
//! A library for turning computational graphs, such as neural networks, into ZK-circuits.
|
||||
@@ -50,6 +49,7 @@ pub enum EZKLError {
|
||||
not(all(target_arch = "wasm32", target_os = "unknown"))
|
||||
))]
|
||||
#[error("[eth] {0}")]
|
||||
#[cfg(all(feature = "eth", not(target_arch = "wasm32")))]
|
||||
EthError(#[from] eth::EthError),
|
||||
#[error("[graph] {0}")]
|
||||
GraphError(#[from] graph::errors::GraphError),
|
||||
@@ -130,7 +130,7 @@ pub fn version() -> &'static str {
|
||||
|
||||
/// Bindings management
|
||||
#[cfg(any(
|
||||
feature = "ios-bindings",
|
||||
feature = "universal-bindings",
|
||||
all(target_arch = "wasm32", target_os = "unknown"),
|
||||
feature = "python-bindings"
|
||||
))]
|
||||
@@ -140,7 +140,7 @@ pub mod circuit;
|
||||
/// CLI commands.
|
||||
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
|
||||
pub mod commands;
|
||||
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
|
||||
#[cfg(all(feature = "eth", not(target_arch = "wasm32")))]
|
||||
// abigen doesn't generate docs for this module
|
||||
#[allow(missing_docs)]
|
||||
/// Utility functions for contracts
|
||||
@@ -157,7 +157,7 @@ pub mod fieldutils;
|
||||
pub mod graph;
|
||||
/// beautiful logging
|
||||
#[cfg(all(
|
||||
feature = "ezkl",
|
||||
feature = "logging",
|
||||
not(all(target_arch = "wasm32", target_os = "unknown"))
|
||||
))]
|
||||
pub mod logger;
|
||||
|
||||
@@ -8,6 +8,8 @@ pub mod srs;
|
||||
pub mod errors;
|
||||
|
||||
pub use errors::PfsysError;
|
||||
use itertools::chain;
|
||||
use std::borrow::Borrow;
|
||||
|
||||
use crate::circuit::CheckMode;
|
||||
use crate::graph::GraphWitness;
|
||||
@@ -26,7 +28,7 @@ use halo2_proofs::poly::VerificationStrategy;
|
||||
use halo2_proofs::transcript::{EncodedChallenge, TranscriptReadBuffer, TranscriptWriterBuffer};
|
||||
use halo2curves::ff::{FromUniformBytes, PrimeField, WithSmallOrderMulGroup};
|
||||
use halo2curves::serde::SerdeObject;
|
||||
use halo2curves::CurveAffine;
|
||||
use halo2curves::{bn256, CurveAffine};
|
||||
use instant::Instant;
|
||||
use log::{debug, info, trace};
|
||||
#[cfg(not(feature = "det-prove"))]
|
||||
@@ -63,6 +65,81 @@ fn serde_format_from_str(s: &str) -> halo2_proofs::SerdeFormat {
|
||||
}
|
||||
}
|
||||
|
||||
/// Function signature of `verifyProof(bytes,uint256[])`.
|
||||
pub const FN_SIG_VERIFY_PROOF: [u8; 4] = [0x1e, 0x8e, 0x1e, 0x13];
|
||||
|
||||
/// Function signature of `verifyProof(bytes,uint256[],bytes32[])`.
|
||||
pub const FN_SIG_VERIFY_PROOF_WITH_VKA: [u8; 4] = [0x34, 0x09, 0xfc, 0x9f];
|
||||
|
||||
/// Function signature of verifyWithDataAttestation(address,bytes)
|
||||
pub const FN_SIG_VERIFY_WITH_DATA_ATTESTATION: [u8; 4] = [0x4c, 0x79, 0x85, 0xd0];
|
||||
|
||||
/// Function signatore of registeredVkas(bytes32[]) 0xdc8b4094
|
||||
pub const FN_SIG_REGISTER_VKA: [u8; 4] = [0xdc, 0x8b, 0x40, 0x94];
|
||||
|
||||
/// Encode proof into calldata to invoke `Halo2Verifier.verifyProof`.
|
||||
///
|
||||
/// For `vk_address`:
|
||||
/// - Pass `None` if verifying key is embedded in `Halo2Verifier`
|
||||
/// - Pass `Some(vka)` if verifying key is separated and already registered
|
||||
pub fn encode_calldata(vka: Option<&[[u8; 32]]>, proof: &[u8], instances: &[bn256::Fr]) -> Vec<u8> {
|
||||
let (fn_sig, offset) = if vka.is_some() {
|
||||
(FN_SIG_VERIFY_PROOF_WITH_VKA, 0x60)
|
||||
} else {
|
||||
(FN_SIG_VERIFY_PROOF, 0x40)
|
||||
};
|
||||
let num_instances = instances.len();
|
||||
let (vka_offset, vka_data) = if let Some(vka) = vka {
|
||||
(
|
||||
to_be_bytes_32(offset + 0x40 + proof.len() + (num_instances * 0x20)).to_vec(),
|
||||
vka.to_vec(),
|
||||
)
|
||||
} else {
|
||||
(Vec::new(), Vec::new())
|
||||
};
|
||||
let num_vka_words = vka_data.len();
|
||||
chain![
|
||||
fn_sig, // function signature
|
||||
to_be_bytes_32(offset), // offset of proof
|
||||
to_be_bytes_32(offset + 0x20 + proof.len()), // offset of instances
|
||||
vka_offset, // offset of vka
|
||||
to_be_bytes_32(proof.len()), // length of proof
|
||||
proof.iter().cloned(), // proof
|
||||
to_be_bytes_32(num_instances), // length of instances
|
||||
instances.iter().map(fr_to_bytes32).flatten(), // instances
|
||||
to_be_bytes_32(num_vka_words), // vka length
|
||||
vka_data.iter().flat_map(|arr| arr.iter().cloned()) // vka words
|
||||
]
|
||||
.collect()
|
||||
}
|
||||
|
||||
fn to_be_bytes_32(value: usize) -> [u8; 32] {
|
||||
let mut bytes = [0u8; 32];
|
||||
// Convert the usize to big-endian bytes in the last 8 bytes (or however many needed)
|
||||
let value_bytes = value.to_be_bytes();
|
||||
let start_idx = 32 - value_bytes.len();
|
||||
bytes[start_idx..].copy_from_slice(&value_bytes);
|
||||
bytes
|
||||
}
|
||||
|
||||
fn fr_to_bytes32(fe: impl Borrow<bn256::Fr>) -> [u8; 32] {
|
||||
fe_to_bytes32(fe)
|
||||
}
|
||||
|
||||
fn fe_to_bytes32<F>(fe: impl Borrow<F>) -> [u8; 32]
|
||||
where
|
||||
F: PrimeField<Repr = halo2_proofs::halo2curves::serde::Repr<32>>,
|
||||
{
|
||||
let repr = fe.borrow().to_repr();
|
||||
// Note: we're converting from little-endian representation to big-endian bytes
|
||||
let mut bytes = [0u8; 32];
|
||||
let inner = repr.inner();
|
||||
for i in 0..32 {
|
||||
bytes[31 - i] = inner[i];
|
||||
}
|
||||
bytes
|
||||
}
|
||||
|
||||
#[allow(missing_docs)]
|
||||
#[derive(Copy, Clone, Default, Debug, PartialEq, Eq, Deserialize, Serialize, PartialOrd)]
|
||||
#[cfg_attr(all(feature = "ezkl", not(target_arch = "wasm32")), derive(ValueEnum))]
|
||||
|
||||
@@ -19,8 +19,7 @@ use maybe_rayon::{
|
||||
slice::ParallelSliceMut,
|
||||
};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::io::BufRead;
|
||||
use std::io::Write;
|
||||
use std::io::{BufRead, Write};
|
||||
use std::path::PathBuf;
|
||||
pub use val::*;
|
||||
pub use var::*;
|
||||
@@ -58,7 +57,7 @@ pub trait TensorType: Clone + Debug {
|
||||
}
|
||||
|
||||
macro_rules! tensor_type {
|
||||
($rust_type:ty, $tensor_type:ident, $zero:expr_2021, $one:expr_2021) => {
|
||||
($rust_type:ty, $tensor_type:ident, $zero:expr, $one:expr) => {
|
||||
impl TensorType for $rust_type {
|
||||
fn zero() -> Option<Self> {
|
||||
Some($zero)
|
||||
@@ -322,19 +321,51 @@ impl<T: Clone + TensorType + PrimeField> Tensor<T> {
|
||||
let mut buf_reader = std::io::BufReader::new(reader);
|
||||
|
||||
let mut inner = Vec::new();
|
||||
while let Ok(true) = buf_reader.has_data_left() {
|
||||
loop {
|
||||
// Check if there's more data available
|
||||
let has_data = match buf_reader.fill_buf() {
|
||||
Ok(buffer) => !buffer.is_empty(),
|
||||
Err(e) => {
|
||||
return Err(TensorError::FileLoadError(format!(
|
||||
"IO error while checking for data: {}",
|
||||
e
|
||||
)));
|
||||
}
|
||||
};
|
||||
|
||||
// If no data left, we're done
|
||||
if !has_data {
|
||||
break;
|
||||
}
|
||||
|
||||
// Try to read a complete T::Repr
|
||||
let mut repr = T::Repr::default();
|
||||
|
||||
match buf_reader.read_exact(repr.as_mut()) {
|
||||
Ok(_) => {
|
||||
inner.push(T::from_repr(repr).unwrap());
|
||||
// Successfully read a complete representation
|
||||
let tensor = T::from_repr(repr);
|
||||
|
||||
// Check if the conversion was successful
|
||||
if tensor.is_some().into() {
|
||||
// Unwrap the value safely (we already checked it's Some)
|
||||
inner.push(tensor.unwrap());
|
||||
} else {
|
||||
return Err(TensorError::FileLoadError(
|
||||
"Failed to convert representation to tensor".to_string(),
|
||||
));
|
||||
}
|
||||
}
|
||||
Err(_) => {
|
||||
// Any error during read_exact is treated as a failure
|
||||
// This matches the original implementation
|
||||
return Err(TensorError::FileLoadError(
|
||||
"Failed to read tensor".to_string(),
|
||||
));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(Tensor::new(Some(&inner), &[inner.len()]).unwrap())
|
||||
}
|
||||
}
|
||||
|
||||
Binary file not shown.
File diff suppressed because one or more lines are too long
@@ -22,7 +22,6 @@
|
||||
"input_visibility": "Private",
|
||||
"output_visibility": "Public",
|
||||
"param_visibility": "Private",
|
||||
"div_rebasing": false,
|
||||
"rebase_frac_zero_constants": false,
|
||||
"check_mode": "UNSAFE",
|
||||
"commitment": "KZG",
|
||||
@@ -74,5 +73,5 @@
|
||||
"check_mode": "UNSAFE",
|
||||
"version": "0.0.0",
|
||||
"num_blinding_factors": null,
|
||||
"timestamp": 1726429587279
|
||||
"timestamp": 1741214578354
|
||||
}
|
||||
Binary file not shown.
1
tests/assets/wasm.code
Normal file
1
tests/assets/wasm.code
Normal file
File diff suppressed because one or more lines are too long
14
tests/foundry/.gitignore
vendored
14
tests/foundry/.gitignore
vendored
@@ -1,14 +0,0 @@
|
||||
# Compiler files
|
||||
cache/
|
||||
out/
|
||||
|
||||
# Ignores development broadcast logs
|
||||
!/broadcast
|
||||
/broadcast/*/31337/
|
||||
/broadcast/**/dry-run/
|
||||
|
||||
# Docs
|
||||
docs/
|
||||
|
||||
# Dotenv file
|
||||
.env
|
||||
@@ -1,66 +0,0 @@
|
||||
## Foundry
|
||||
|
||||
**Foundry is a blazing fast, portable and modular toolkit for Ethereum application development written in Rust.**
|
||||
|
||||
Foundry consists of:
|
||||
|
||||
- **Forge**: Ethereum testing framework (like Truffle, Hardhat and DappTools).
|
||||
- **Cast**: Swiss army knife for interacting with EVM smart contracts, sending transactions and getting chain data.
|
||||
- **Anvil**: Local Ethereum node, akin to Ganache, Hardhat Network.
|
||||
- **Chisel**: Fast, utilitarian, and verbose solidity REPL.
|
||||
|
||||
## Documentation
|
||||
|
||||
https://book.getfoundry.sh/
|
||||
|
||||
## Usage
|
||||
|
||||
### Build
|
||||
|
||||
```shell
|
||||
$ forge build
|
||||
```
|
||||
|
||||
### Test
|
||||
|
||||
```shell
|
||||
$ forge test
|
||||
```
|
||||
|
||||
### Format
|
||||
|
||||
```shell
|
||||
$ forge fmt
|
||||
```
|
||||
|
||||
### Gas Snapshots
|
||||
|
||||
```shell
|
||||
$ forge snapshot
|
||||
```
|
||||
|
||||
### Anvil
|
||||
|
||||
```shell
|
||||
$ anvil
|
||||
```
|
||||
|
||||
### Deploy
|
||||
|
||||
```shell
|
||||
$ forge script script/Counter.s.sol:CounterScript --rpc-url <your_rpc_url> --private-key <your_private_key>
|
||||
```
|
||||
|
||||
### Cast
|
||||
|
||||
```shell
|
||||
$ cast <subcommand>
|
||||
```
|
||||
|
||||
### Help
|
||||
|
||||
```shell
|
||||
$ forge --help
|
||||
$ anvil --help
|
||||
$ cast --help
|
||||
```
|
||||
@@ -1,6 +0,0 @@
|
||||
[profile.default]
|
||||
src = "../../contracts"
|
||||
out = "out"
|
||||
libs = ["lib"]
|
||||
|
||||
# See more config options https://github.com/foundry-rs/foundry/blob/master/crates/config/README.md#all-options
|
||||
@@ -1 +0,0 @@
|
||||
contracts/=../../contracts/
|
||||
@@ -1,429 +0,0 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
pragma solidity ^0.8.20;
|
||||
|
||||
import "forge-std/Test.sol";
|
||||
import {console} from "forge-std/console.sol";
|
||||
import "contracts/AttestData.sol" as AttestData;
|
||||
|
||||
contract MockVKA {
|
||||
constructor() {}
|
||||
}
|
||||
|
||||
contract MockVerifier {
|
||||
bool public shouldVerify;
|
||||
|
||||
constructor(bool _shouldVerify) {
|
||||
shouldVerify = _shouldVerify;
|
||||
}
|
||||
|
||||
function verifyProof(
|
||||
bytes calldata,
|
||||
uint256[] calldata
|
||||
) external view returns (bool) {
|
||||
require(shouldVerify, "Verification failed");
|
||||
return shouldVerify;
|
||||
}
|
||||
}
|
||||
|
||||
contract MockVerifierSeperate {
|
||||
bool public shouldVerify;
|
||||
|
||||
constructor(bool _shouldVerify) {
|
||||
shouldVerify = _shouldVerify;
|
||||
}
|
||||
|
||||
function verifyProof(
|
||||
address,
|
||||
bytes calldata,
|
||||
uint256[] calldata
|
||||
) external view returns (bool) {
|
||||
require(shouldVerify, "Verification failed");
|
||||
return shouldVerify;
|
||||
}
|
||||
}
|
||||
|
||||
contract MockTargetContract {
|
||||
int256[] public data;
|
||||
|
||||
constructor(int256[] memory _data) {
|
||||
data = _data;
|
||||
}
|
||||
|
||||
function setData(int256[] memory _data) external {
|
||||
data = _data;
|
||||
}
|
||||
|
||||
function getData() external view returns (int256[] memory) {
|
||||
return data;
|
||||
}
|
||||
}
|
||||
|
||||
contract DataAttestationTest is Test {
|
||||
AttestData.DataAttestation das;
|
||||
MockVerifier verifier;
|
||||
MockVerifierSeperate verifierSeperate;
|
||||
MockVKA vka;
|
||||
MockTargetContract target;
|
||||
int256[] mockData = [int256(1e18), -int256(5e17)];
|
||||
uint256[] decimals = [18, 18];
|
||||
uint256[] bits = [13, 13];
|
||||
uint8 instanceOffset = 0;
|
||||
bytes callData;
|
||||
|
||||
function setUp() public {
|
||||
target = new MockTargetContract(mockData);
|
||||
verifier = new MockVerifier(true);
|
||||
verifierSeperate = new MockVerifierSeperate(true);
|
||||
vka = new MockVKA();
|
||||
|
||||
callData = abi.encodeWithSignature("getData()");
|
||||
|
||||
das = new AttestData.DataAttestation(
|
||||
address(target),
|
||||
callData,
|
||||
decimals,
|
||||
bits,
|
||||
instanceOffset
|
||||
);
|
||||
}
|
||||
|
||||
// Fork of mulDivRound which doesn't revert on overflow and returns a boolean instead to indicate overflow
|
||||
function mulDivRound(
|
||||
uint256 x,
|
||||
uint256 y,
|
||||
uint256 denominator
|
||||
) public pure returns (uint256 result, bool overflow) {
|
||||
unchecked {
|
||||
uint256 prod0;
|
||||
uint256 prod1;
|
||||
assembly {
|
||||
let mm := mulmod(x, y, not(0))
|
||||
prod0 := mul(x, y)
|
||||
prod1 := sub(sub(mm, prod0), lt(mm, prod0))
|
||||
}
|
||||
uint256 remainder = mulmod(x, y, denominator);
|
||||
bool addOne;
|
||||
if (remainder * 2 >= denominator) {
|
||||
addOne = true;
|
||||
}
|
||||
|
||||
if (prod1 == 0) {
|
||||
if (addOne) {
|
||||
return ((prod0 / denominator) + 1, false);
|
||||
}
|
||||
return (prod0 / denominator, false);
|
||||
}
|
||||
|
||||
if (denominator > prod1) {
|
||||
return (0, true);
|
||||
}
|
||||
|
||||
assembly {
|
||||
prod1 := sub(prod1, gt(remainder, prod0))
|
||||
prod0 := sub(prod0, remainder)
|
||||
}
|
||||
|
||||
uint256 twos = denominator & (~denominator + 1);
|
||||
assembly {
|
||||
denominator := div(denominator, twos)
|
||||
prod0 := div(prod0, twos)
|
||||
twos := add(div(sub(0, twos), twos), 1)
|
||||
}
|
||||
|
||||
prod0 |= prod1 * twos;
|
||||
|
||||
uint256 inverse = (3 * denominator) ^ 2;
|
||||
|
||||
inverse *= 2 - denominator * inverse;
|
||||
inverse *= 2 - denominator * inverse;
|
||||
inverse *= 2 - denominator * inverse;
|
||||
inverse *= 2 - denominator * inverse;
|
||||
inverse *= 2 - denominator * inverse;
|
||||
inverse *= 2 - denominator * inverse;
|
||||
|
||||
result = prod0 * inverse;
|
||||
if (addOne) {
|
||||
result += 1;
|
||||
}
|
||||
return (result, false);
|
||||
}
|
||||
}
|
||||
struct SampleAttestation {
|
||||
int256 mockData;
|
||||
uint8 decimals;
|
||||
uint8 bits;
|
||||
}
|
||||
function test_fuzzAttestedData(
|
||||
SampleAttestation[] memory _attestations
|
||||
) public {
|
||||
vm.assume(_attestations.length == 1);
|
||||
int256[] memory _mockData = new int256[](1);
|
||||
uint256[] memory _decimals = new uint256[](1);
|
||||
uint256[] memory _bits = new uint256[](1);
|
||||
uint256[] memory _instances = new uint256[](1);
|
||||
for (uint256 i = 0; i < 1; i++) {
|
||||
SampleAttestation memory attestation = _attestations[i];
|
||||
_mockData[i] = attestation.mockData;
|
||||
vm.assume(attestation.mockData != type(int256).min); /// Will overflow int256 during negation op
|
||||
vm.assume(attestation.decimals < 77); /// Else will exceed uint256 bounds
|
||||
vm.assume(attestation.bits < 128); /// Else will exceed EZKL fixed point bounds for int128 type
|
||||
bool neg = attestation.mockData < 0;
|
||||
if (neg) {
|
||||
attestation.mockData = -attestation.mockData;
|
||||
}
|
||||
(uint256 _result, bool overflow) = mulDivRound(
|
||||
uint256(attestation.mockData),
|
||||
uint256(1 << attestation.bits),
|
||||
uint256(10 ** attestation.decimals)
|
||||
);
|
||||
vm.assume(!overflow);
|
||||
vm.assume(_result < das.HALF_ORDER());
|
||||
if (neg) {
|
||||
// No possibility of overflow here since output is less than or equal to HALF_ORDER
|
||||
// and therefore falls within the max range of int256 without overflow
|
||||
vm.assume(-int256(_result) > type(int128).min);
|
||||
_instances[i] =
|
||||
uint256(int(das.ORDER()) - int256(_result)) %
|
||||
das.ORDER();
|
||||
} else {
|
||||
vm.assume(_result < uint128(type(int128).max));
|
||||
_instances[i] = _result;
|
||||
}
|
||||
_decimals[i] = attestation.decimals;
|
||||
_bits[i] = attestation.bits;
|
||||
}
|
||||
// Update the attested data
|
||||
target.setData(_mockData);
|
||||
// Deploy the new data attestation contract
|
||||
AttestData.DataAttestation dasNew = new AttestData.DataAttestation(
|
||||
address(target),
|
||||
callData,
|
||||
_decimals,
|
||||
_bits,
|
||||
instanceOffset
|
||||
);
|
||||
bytes memory proof = hex"1234"; // Would normally contain commitments
|
||||
bytes memory encoded = abi.encodeWithSignature(
|
||||
"verifyProof(bytes,uint256[])",
|
||||
proof,
|
||||
_instances
|
||||
);
|
||||
|
||||
AttestData.DataAttestation.Scalars memory _scalars = AttestData
|
||||
.DataAttestation
|
||||
.Scalars(10 ** _decimals[0], 1 << _bits[0]);
|
||||
|
||||
int256 output = dasNew.quantizeData(_mockData[0], _scalars);
|
||||
console.log("output: ", output);
|
||||
uint256 fieldElement = dasNew.toFieldElement(output);
|
||||
// output should equal to _instances[0]
|
||||
assertEq(fieldElement, _instances[0]);
|
||||
|
||||
bool verificationResult = dasNew.verifyWithDataAttestation(
|
||||
address(verifier),
|
||||
encoded
|
||||
);
|
||||
assertTrue(verificationResult);
|
||||
}
|
||||
|
||||
// Test deployment parameters
|
||||
function testDeployment() public view {
|
||||
assertEq(das.contractAddress(), address(target));
|
||||
assertEq(das.callData(), abi.encodeWithSignature("getData()"));
|
||||
assertEq(das.instanceOffset(), instanceOffset);
|
||||
|
||||
AttestData.DataAttestation.Scalars memory scalar = das.getScalars(0);
|
||||
assertEq(scalar.decimals, 1e18);
|
||||
assertEq(scalar.bits, 1 << 13);
|
||||
}
|
||||
|
||||
// Test quantizeData function
|
||||
function testQuantizeData() public view {
|
||||
AttestData.DataAttestation.Scalars memory scalar = das.getScalars(0);
|
||||
|
||||
int256 positive = das.quantizeData(1e18, scalar);
|
||||
assertEq(positive, int256(scalar.bits));
|
||||
|
||||
int256 negative = das.quantizeData(-1e18, scalar);
|
||||
assertEq(negative, -int256(scalar.bits));
|
||||
|
||||
// Test rounding
|
||||
int half = int(0.5e18 / scalar.bits);
|
||||
int256 rounded = das.quantizeData(half, scalar);
|
||||
assertEq(rounded, 1);
|
||||
}
|
||||
|
||||
// Test staticCall functionality
|
||||
function testStaticCall() public view {
|
||||
bytes memory result = das.staticCall(
|
||||
address(target),
|
||||
abi.encodeWithSignature("getData()")
|
||||
);
|
||||
int256[] memory decoded = abi.decode(result, (int256[]));
|
||||
assertEq(decoded[0], mockData[0]);
|
||||
assertEq(decoded[1], mockData[1]);
|
||||
}
|
||||
|
||||
// Test attestData validation
|
||||
function testAttestDataSuccess() public view {
|
||||
uint256[] memory instances = new uint256[](2);
|
||||
AttestData.DataAttestation.Scalars memory scalar = das.getScalars(0);
|
||||
instances[0] = das.toFieldElement(int(scalar.bits));
|
||||
instances[1] = das.toFieldElement(-int(scalar.bits >> 1));
|
||||
das.attestData(instances); // Should not revert
|
||||
}
|
||||
|
||||
function testAttestDataFailure() public {
|
||||
uint256[] memory instances = new uint256[](2);
|
||||
instances[0] = das.toFieldElement(1e18); // Incorrect value
|
||||
instances[1] = das.toFieldElement(5e17);
|
||||
|
||||
vm.expectRevert("Public input does not match");
|
||||
das.attestData(instances);
|
||||
}
|
||||
|
||||
// Test full verification flow
|
||||
function testSuccessfulVerification() public view {
|
||||
// Prepare valid instances
|
||||
uint256[] memory instances = new uint256[](2);
|
||||
AttestData.DataAttestation.Scalars memory scalar = das.getScalars(0);
|
||||
instances[0] = das.toFieldElement(int(scalar.bits));
|
||||
instances[1] = das.toFieldElement(-int(scalar.bits >> 1));
|
||||
|
||||
// Create valid calldata (mock)
|
||||
bytes memory proof = hex"1234"; // Would normally contain commitments
|
||||
bytes memory encoded = abi.encodeWithSignature(
|
||||
"verifyProof(bytes,uint256[])",
|
||||
proof,
|
||||
instances
|
||||
);
|
||||
bytes memory encoded_vka = abi.encodeWithSignature(
|
||||
"verifyProof(address,bytes,uint256[])",
|
||||
address(vka),
|
||||
proof,
|
||||
instances
|
||||
);
|
||||
|
||||
bool result = das.verifyWithDataAttestation(address(verifier), encoded);
|
||||
assertTrue(result);
|
||||
result = das.verifyWithDataAttestation(
|
||||
address(verifierSeperate),
|
||||
encoded_vka
|
||||
);
|
||||
assertTrue(result);
|
||||
}
|
||||
|
||||
function testLoadInstances() public view {
|
||||
uint256[] memory instances = new uint256[](2);
|
||||
AttestData.DataAttestation.Scalars memory scalar = das.getScalars(0);
|
||||
instances[0] = das.toFieldElement(int(scalar.bits));
|
||||
instances[1] = das.toFieldElement(-int(scalar.bits >> 1));
|
||||
|
||||
// Create valid calldata (mock)
|
||||
bytes memory proof = hex"1234"; // Would normally contain commitments
|
||||
bytes memory encoded = abi.encodeWithSignature(
|
||||
"verifyProof(bytes,uint256[])",
|
||||
proof,
|
||||
instances
|
||||
);
|
||||
bytes memory encoded_vka = abi.encodeWithSignature(
|
||||
"verifyProof(address,bytes,uint256[])",
|
||||
address(vka),
|
||||
proof,
|
||||
instances
|
||||
);
|
||||
|
||||
// Load encoded instances from calldata
|
||||
uint256[] memory extracted_instances_calldata = das
|
||||
.getInstancesCalldata(encoded);
|
||||
assertEq(extracted_instances_calldata[0], instances[0]);
|
||||
assertEq(extracted_instances_calldata[1], instances[1]);
|
||||
// Load encoded instances from memory
|
||||
uint256[] memory extracted_instances_memory = das.getInstancesMemory(
|
||||
encoded
|
||||
);
|
||||
assertEq(extracted_instances_memory[0], instances[0]);
|
||||
assertEq(extracted_instances_memory[1], instances[1]);
|
||||
// Load encoded with vk instances from calldata
|
||||
uint256[] memory extracted_instances_calldata_vk = das
|
||||
.getInstancesCalldata(encoded_vka);
|
||||
assertEq(extracted_instances_calldata_vk[0], instances[0]);
|
||||
assertEq(extracted_instances_calldata_vk[1], instances[1]);
|
||||
// Load encoded with vk instances from memory
|
||||
uint256[] memory extracted_instances_memory_vk = das.getInstancesMemory(
|
||||
encoded_vka
|
||||
);
|
||||
assertEq(extracted_instances_memory_vk[0], instances[0]);
|
||||
assertEq(extracted_instances_memory_vk[1], instances[1]);
|
||||
}
|
||||
|
||||
function testInvalidCommitments() public {
|
||||
// Create calldata with invalid commitments
|
||||
bytes memory invalidProof = hex"5678";
|
||||
uint256[] memory instances = new uint256[](2);
|
||||
AttestData.DataAttestation.Scalars memory scalar = das.getScalars(0);
|
||||
instances[0] = das.toFieldElement(int(scalar.bits));
|
||||
instances[1] = das.toFieldElement(-int(scalar.bits >> 1));
|
||||
bytes memory encoded = abi.encodeWithSignature(
|
||||
"verifyProof(bytes,uint256[])",
|
||||
invalidProof,
|
||||
instances
|
||||
);
|
||||
|
||||
vm.expectRevert("Invalid KZG commitments");
|
||||
das.verifyWithDataAttestation(address(verifier), encoded);
|
||||
}
|
||||
|
||||
function testInvalidVerifier() public {
|
||||
MockVerifier invalidVerifier = new MockVerifier(false);
|
||||
uint256[] memory instances = new uint256[](2);
|
||||
AttestData.DataAttestation.Scalars memory scalar = das.getScalars(0);
|
||||
instances[0] = das.toFieldElement(int(scalar.bits));
|
||||
instances[1] = das.toFieldElement(-int(scalar.bits >> 1));
|
||||
bytes memory encoded = abi.encodeWithSignature(
|
||||
"verifyProof(bytes,uint256[])",
|
||||
hex"1234",
|
||||
instances
|
||||
);
|
||||
|
||||
vm.expectRevert("low-level call to verifier failed");
|
||||
das.verifyWithDataAttestation(address(invalidVerifier), encoded);
|
||||
}
|
||||
|
||||
// Test edge cases
|
||||
function testZeroValueQuantization() public view {
|
||||
AttestData.DataAttestation.Scalars memory scalar = das.getScalars(0);
|
||||
int256 zero = das.quantizeData(0, scalar);
|
||||
assertEq(zero, 0);
|
||||
}
|
||||
|
||||
function testOverflowProtection() public {
|
||||
int256 order = int(
|
||||
uint256(
|
||||
0x30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f0000001
|
||||
)
|
||||
);
|
||||
// int256 half_order = int(order >> 1);
|
||||
AttestData.DataAttestation.Scalars memory scalar = AttestData
|
||||
.DataAttestation
|
||||
.Scalars(1, 1 << 2);
|
||||
|
||||
vm.expectRevert("Overflow field modulus");
|
||||
das.quantizeData(order, scalar); // Value that would overflow
|
||||
}
|
||||
|
||||
function testInvalidFunctionSignature() public {
|
||||
uint256[] memory instances = new uint256[](2);
|
||||
AttestData.DataAttestation.Scalars memory scalar = das.getScalars(0);
|
||||
instances[0] = das.toFieldElement(int(scalar.bits));
|
||||
instances[1] = das.toFieldElement(-int(scalar.bits >> 1));
|
||||
bytes memory encoded_invalid_sig = abi.encodeWithSignature(
|
||||
"verifyProofff(bytes,uint256[])",
|
||||
hex"1234",
|
||||
instances
|
||||
);
|
||||
|
||||
vm.expectRevert("Invalid function signature");
|
||||
das.verifyWithDataAttestation(address(verifier), encoded_invalid_sig);
|
||||
}
|
||||
}
|
||||
@@ -3,8 +3,8 @@
|
||||
mod native_tests {
|
||||
|
||||
// use ezkl::circuit::table::RESERVED_BLINDING_ROWS_PAD;
|
||||
use ezkl::graph::input::{FileSource, FileSourceInner, GraphData};
|
||||
use ezkl::graph::{DataSource, GraphSettings, GraphWitness};
|
||||
use ezkl::graph::input::{FileSource, GraphData};
|
||||
use ezkl::graph::GraphSettings;
|
||||
use ezkl::pfsys::Snark;
|
||||
use ezkl::Commitments;
|
||||
use halo2_proofs::poly::kzg::commitment::KZGCommitmentScheme;
|
||||
@@ -163,17 +163,14 @@ mod native_tests {
|
||||
let data = GraphData::from_path(format!("{}/{}/input.json", test_dir, test).into())
|
||||
.expect("failed to load input data");
|
||||
|
||||
let input_data = match data.input_data {
|
||||
DataSource::File(data) => data,
|
||||
_ => panic!("Only File data sources support batching"),
|
||||
};
|
||||
|
||||
let duplicated_input_data: FileSource = input_data
|
||||
let duplicated_input_data: FileSource = data
|
||||
.input_data
|
||||
.values()
|
||||
.iter()
|
||||
.map(|data| (0..num_batches).flat_map(|_| data.clone()).collect())
|
||||
.collect();
|
||||
|
||||
let duplicated_data = GraphData::new(DataSource::File(duplicated_input_data));
|
||||
let duplicated_data = GraphData::new(duplicated_input_data.into());
|
||||
|
||||
let res =
|
||||
duplicated_data.save(format!("{}/{}/input.json", test_dir, output_dir).into());
|
||||
@@ -991,7 +988,6 @@ mod native_tests {
|
||||
use crate::native_tests::kzg_evm_prove_and_verify;
|
||||
use crate::native_tests::kzg_evm_prove_and_verify_reusable_verifier;
|
||||
|
||||
use crate::native_tests::kzg_evm_on_chain_input_prove_and_verify;
|
||||
use crate::native_tests::kzg_evm_aggr_prove_and_verify;
|
||||
use tempdir::TempDir;
|
||||
use crate::native_tests::Hardfork;
|
||||
@@ -1006,101 +1002,6 @@ mod native_tests {
|
||||
}
|
||||
|
||||
|
||||
/// Currently only on chain inputs that return a non-negative value are supported.
|
||||
const TESTS_ON_CHAIN_INPUT: [&str; 17] = [
|
||||
"1l_mlp",
|
||||
"1l_average",
|
||||
"1l_reshape",
|
||||
"1l_sigmoid",
|
||||
"1l_div",
|
||||
"1l_sqrt",
|
||||
"1l_prelu",
|
||||
"1l_var",
|
||||
"1l_leakyrelu",
|
||||
"1l_gelu_noappx",
|
||||
"1l_relu",
|
||||
"1l_tanh",
|
||||
"2l_relu_sigmoid_small",
|
||||
"2l_relu_small",
|
||||
"2l_relu_fc",
|
||||
"min",
|
||||
"max"
|
||||
];
|
||||
|
||||
seq!(N in 0..=16 {
|
||||
#(#[test_case((TESTS_ON_CHAIN_INPUT[N],Hardfork::Latest))])*
|
||||
#(#[test_case((TESTS_ON_CHAIN_INPUT[N],Hardfork::Paris))])*
|
||||
#(#[test_case((TESTS_ON_CHAIN_INPUT[N],Hardfork::London))])*
|
||||
#(#[test_case((TESTS_ON_CHAIN_INPUT[N],Hardfork::Shanghai))])*
|
||||
fn kzg_evm_on_chain_input_prove_and_verify_(test: (&str,Hardfork)) {
|
||||
let (test,hardfork) = test;
|
||||
crate::native_tests::init_binary();
|
||||
let test_dir = TempDir::new(test).unwrap();
|
||||
let path = test_dir.path().to_str().unwrap(); crate::native_tests::mv_test_(path, test);
|
||||
let _anvil_child = crate::native_tests::start_anvil(true, hardfork);
|
||||
kzg_evm_on_chain_input_prove_and_verify(path, test.to_string(), "on-chain", "file", "public", "private", "private");
|
||||
// test_dir.close().unwrap();
|
||||
}
|
||||
|
||||
#(#[test_case(TESTS_ON_CHAIN_INPUT[N])])*
|
||||
fn kzg_evm_on_chain_output_prove_and_verify_(test: &str) {
|
||||
crate::native_tests::init_binary();
|
||||
let test_dir = TempDir::new(test).unwrap();
|
||||
let path = test_dir.path().to_str().unwrap(); crate::native_tests::mv_test_(path, test);
|
||||
let _anvil_child = crate::native_tests::start_anvil(true, Hardfork::Latest);
|
||||
kzg_evm_on_chain_input_prove_and_verify(path, test.to_string(), "file", "on-chain", "private", "public", "private");
|
||||
// test_dir.close().unwrap();
|
||||
}
|
||||
|
||||
#(#[test_case(TESTS_ON_CHAIN_INPUT[N])])*
|
||||
fn kzg_evm_on_chain_input_output_prove_and_verify_(test: &str) {
|
||||
crate::native_tests::init_binary();
|
||||
let test_dir = TempDir::new(test).unwrap();
|
||||
let path = test_dir.path().to_str().unwrap(); crate::native_tests::mv_test_(path, test);
|
||||
let _anvil_child = crate::native_tests::start_anvil(true, Hardfork::Latest);
|
||||
kzg_evm_on_chain_input_prove_and_verify(path, test.to_string(), "on-chain", "on-chain", "public", "public", "private");
|
||||
test_dir.close().unwrap();
|
||||
}
|
||||
|
||||
#(#[test_case(TESTS_ON_CHAIN_INPUT[N])])*
|
||||
fn kzg_evm_on_chain_input_output_hashed_prove_and_verify_(test: &str) {
|
||||
crate::native_tests::init_binary();
|
||||
let test_dir = TempDir::new(test).unwrap();
|
||||
let path = test_dir.path().to_str().unwrap(); crate::native_tests::mv_test_(path, test);
|
||||
let _anvil_child = crate::native_tests::start_anvil(true, Hardfork::Latest);
|
||||
kzg_evm_on_chain_input_prove_and_verify(path, test.to_string(), "on-chain", "on-chain", "hashed", "hashed", "private");
|
||||
test_dir.close().unwrap();
|
||||
}
|
||||
#(#[test_case(TESTS_ON_CHAIN_INPUT[N])])*
|
||||
fn kzg_evm_on_chain_input_kzg_output_kzg_params_prove_and_verify_(test: &str) {
|
||||
crate::native_tests::init_binary();
|
||||
let test_dir = TempDir::new(test).unwrap();
|
||||
let path = test_dir.path().to_str().unwrap(); crate::native_tests::mv_test_(path, test);
|
||||
let _anvil_child = crate::native_tests::start_anvil(true, Hardfork::Latest);
|
||||
kzg_evm_on_chain_input_prove_and_verify(path, test.to_string(), "on-chain", "file", "public", "polycommit", "polycommit");
|
||||
test_dir.close().unwrap();
|
||||
}
|
||||
#(#[test_case(TESTS_ON_CHAIN_INPUT[N])])*
|
||||
fn kzg_evm_on_chain_output_kzg_input_kzg_params_prove_and_verify_(test: &str) {
|
||||
crate::native_tests::init_binary();
|
||||
let test_dir = TempDir::new(test).unwrap();
|
||||
let path = test_dir.path().to_str().unwrap(); crate::native_tests::mv_test_(path, test);
|
||||
let _anvil_child = crate::native_tests::start_anvil(true, Hardfork::Latest);
|
||||
kzg_evm_on_chain_input_prove_and_verify(path, test.to_string(), "file", "on-chain", "polycommit", "public", "polycommit");
|
||||
test_dir.close().unwrap();
|
||||
}
|
||||
#(#[test_case(TESTS_ON_CHAIN_INPUT[N])])*
|
||||
fn kzg_evm_on_chain_all_kzg_params_prove_and_verify_(test: &str) {
|
||||
crate::native_tests::init_binary();
|
||||
let test_dir = TempDir::new(test).unwrap();
|
||||
let path = test_dir.path().to_str().unwrap(); crate::native_tests::mv_test_(path, test);
|
||||
let _anvil_child = crate::native_tests::start_anvil(true, Hardfork::Latest);
|
||||
kzg_evm_on_chain_input_prove_and_verify(path, test.to_string(), "file", "file", "polycommit", "polycommit", "polycommit");
|
||||
test_dir.close().unwrap();
|
||||
}
|
||||
});
|
||||
|
||||
|
||||
seq!(N in 0..=17 {
|
||||
// these take a particularly long time to run
|
||||
#(#[test_case(TESTS_EVM_AGGR[N])])*
|
||||
@@ -1116,7 +1017,7 @@ mod native_tests {
|
||||
|
||||
});
|
||||
|
||||
seq!(N in 0..4 {
|
||||
seq!(N in 0..=98 {
|
||||
#(#[test_case(TESTS[N])])*
|
||||
fn kzg_evm_prove_and_verify_reusable_verifier_(test: &str) {
|
||||
crate::native_tests::init_binary();
|
||||
@@ -1126,7 +1027,7 @@ mod native_tests {
|
||||
init_logger();
|
||||
log::error!("Running kzg_evm_prove_and_verify_reusable_verifier_ for test: {}", test);
|
||||
// default vis
|
||||
let reusable_verifier_address: String = kzg_evm_prove_and_verify_reusable_verifier(2, path, test.to_string(), "private", "private", "public", &mut REUSABLE_VERIFIER_ADDR.lock().unwrap(), false);
|
||||
let reusable_verifier_address: String = kzg_evm_prove_and_verify_reusable_verifier(2, path, test.to_string(), "public", "private", "private", &mut REUSABLE_VERIFIER_ADDR.lock().unwrap(), false);
|
||||
// public/public vis
|
||||
let reusable_verifier_address: String = kzg_evm_prove_and_verify_reusable_verifier(2, path, test.to_string(), "public", "private", "public", &mut Some(reusable_verifier_address), false);
|
||||
// hashed input
|
||||
@@ -1148,7 +1049,7 @@ mod native_tests {
|
||||
|
||||
#(#[test_case(TESTS[N])])*
|
||||
fn kzg_evm_prove_and_verify_reusable_verifier_with_overflow_(test: &str) {
|
||||
// verifier too big to fit on chain with overflow calibration target
|
||||
// verifier too big to fit on chain with overflow calibration target or num instances exceed 0x10000
|
||||
if test == "1l_eltwise_div" || test == "lenet_5" || test == "ltsf" || test == "lstm_large" {
|
||||
return;
|
||||
}
|
||||
@@ -2037,6 +1938,7 @@ mod native_tests {
|
||||
let rpc_arg = format!("--rpc-url={}", anvil_url);
|
||||
let addr_path_arg = format!("--addr-path={}/{}/addr.txt", test_dir, example_name);
|
||||
let settings_arg = format!("--settings-path={}", settings_path);
|
||||
let calldata_path = format!("{}/{}/calldata.bytes", test_dir, example_name);
|
||||
|
||||
// create encoded calldata
|
||||
let status = Command::new(format!("{}/{}", *CARGO_TARGET_DIR, TEST_BINARY))
|
||||
@@ -2044,6 +1946,8 @@ mod native_tests {
|
||||
"encode-evm-calldata",
|
||||
"--proof-path",
|
||||
&format!("{}/{}/proof.pf", test_dir, example_name),
|
||||
"--calldata-path",
|
||||
calldata_path.as_str(),
|
||||
])
|
||||
.status()
|
||||
.expect("failed to execute process");
|
||||
@@ -2083,6 +1987,24 @@ mod native_tests {
|
||||
|
||||
let deployed_addr_arg = format!("--addr-verifier={}", addr);
|
||||
|
||||
// verify the proof using the calldata bytes file
|
||||
let pf_arg = format!("{}/{}/proof.pf", test_dir, example_name);
|
||||
let args = vec![
|
||||
"verify-evm",
|
||||
"--proof-path",
|
||||
pf_arg.as_str(),
|
||||
rpc_arg.as_str(),
|
||||
deployed_addr_arg.as_str(),
|
||||
"--encoded-calldata",
|
||||
calldata_path.as_str(),
|
||||
];
|
||||
|
||||
let status = Command::new(format!("{}/{}", *CARGO_TARGET_DIR, TEST_BINARY))
|
||||
.args(&args)
|
||||
.status()
|
||||
.expect("failed to execute process");
|
||||
assert!(status.success());
|
||||
|
||||
// now verify the proof
|
||||
let pf_arg = format!("{}/{}/proof.pf", test_dir, example_name);
|
||||
let mut args = vec![
|
||||
@@ -2171,7 +2093,7 @@ mod native_tests {
|
||||
rpc_arg.as_str(),
|
||||
addr_path_arg.as_str(),
|
||||
sol_arg.as_str(),
|
||||
"-C=verifier/reusable",
|
||||
"--contract-type=verifier/reusable",
|
||||
];
|
||||
|
||||
let status = Command::new(format!("{}/{}", *CARGO_TARGET_DIR, TEST_BINARY))
|
||||
@@ -2192,15 +2114,14 @@ mod native_tests {
|
||||
}
|
||||
};
|
||||
|
||||
let addr_path_arg_vk = format!("--addr-path={}/{}/addr_vk.txt", test_dir, example_name);
|
||||
let sol_arg_vk: String = format!("--sol-code-path={}/{}/vk.sol", test_dir, example_name);
|
||||
let arg_vka: String = format!("--vka-path={}/{}/vka.bytes", test_dir, example_name);
|
||||
// create the verifier
|
||||
let args = vec![
|
||||
"create-evm-vka",
|
||||
"--vk-path",
|
||||
&vk_arg,
|
||||
&settings_arg,
|
||||
&sol_arg_vk,
|
||||
&arg_vka,
|
||||
];
|
||||
|
||||
let status = Command::new(format!("{}/{}", *CARGO_TARGET_DIR, TEST_BINARY))
|
||||
@@ -2209,13 +2130,12 @@ mod native_tests {
|
||||
.expect("failed to execute process");
|
||||
assert!(status.success());
|
||||
|
||||
// deploy the vka
|
||||
// register the vka
|
||||
let args = vec![
|
||||
"deploy-evm",
|
||||
"register-vka",
|
||||
rpc_arg.as_str(),
|
||||
addr_path_arg_vk.as_str(),
|
||||
sol_arg_vk.as_str(),
|
||||
"-C=vka",
|
||||
arg_vka.as_str(),
|
||||
deployed_addr_arg.as_str(),
|
||||
];
|
||||
|
||||
let status = Command::new(format!("{}/{}", *CARGO_TARGET_DIR, TEST_BINARY))
|
||||
@@ -2224,19 +2144,13 @@ mod native_tests {
|
||||
.expect("failed to execute process");
|
||||
assert!(status.success());
|
||||
|
||||
// read in the address
|
||||
let addr_vk = std::fs::read_to_string(format!("{}/{}/addr_vk.txt", test_dir, example_name))
|
||||
.expect("failed to read address file");
|
||||
|
||||
let deployed_addr_arg_vk = format!("--addr-vk={}", addr_vk);
|
||||
|
||||
// create encoded calldata
|
||||
let status = Command::new(format!("{}/{}", *CARGO_TARGET_DIR, TEST_BINARY))
|
||||
.args([
|
||||
"encode-evm-calldata",
|
||||
"--proof-path",
|
||||
&format!("{}/{}/proof.pf", test_dir, example_name),
|
||||
&deployed_addr_arg_vk,
|
||||
&arg_vka,
|
||||
])
|
||||
.status()
|
||||
.expect("failed to execute process");
|
||||
@@ -2251,7 +2165,7 @@ mod native_tests {
|
||||
pf_arg.as_str(),
|
||||
rpc_arg.as_str(),
|
||||
deployed_addr_arg.as_str(),
|
||||
deployed_addr_arg_vk.as_str(),
|
||||
arg_vka.as_str(),
|
||||
];
|
||||
|
||||
let status = Command::new(format!("{}/{}", *CARGO_TARGET_DIR, TEST_BINARY))
|
||||
@@ -2329,298 +2243,6 @@ mod native_tests {
|
||||
assert!(status.success());
|
||||
}
|
||||
|
||||
fn kzg_evm_on_chain_input_prove_and_verify(
|
||||
test_dir: &str,
|
||||
example_name: String,
|
||||
input_source: &str,
|
||||
output_source: &str,
|
||||
input_visibility: &str,
|
||||
output_visibility: &str,
|
||||
param_visibility: &str,
|
||||
) {
|
||||
gen_circuit_settings_and_witness(
|
||||
test_dir,
|
||||
example_name.clone(),
|
||||
input_visibility,
|
||||
param_visibility,
|
||||
output_visibility,
|
||||
1,
|
||||
"resources",
|
||||
// we need the accuracy
|
||||
Some(vec![4]),
|
||||
1,
|
||||
Commitments::KZG,
|
||||
2,
|
||||
false,
|
||||
None,
|
||||
None,
|
||||
);
|
||||
|
||||
let model_path = format!("{}/{}/network.compiled", test_dir, example_name);
|
||||
let settings_path = format!("{}/{}/settings.json", test_dir, example_name);
|
||||
init_params(settings_path.clone().into());
|
||||
|
||||
let data_path = format!("{}/{}/input.json", test_dir, example_name);
|
||||
let witness_path = format!("{}/{}/witness.json", test_dir, example_name);
|
||||
let test_on_chain_data_path = format!("{}/{}/on_chain_input.json", test_dir, example_name);
|
||||
let rpc_arg = format!("--rpc-url={}", LIMITLESS_ANVIL_URL.as_str());
|
||||
let private_key = format!("--private-key={}", *ANVIL_DEFAULT_PRIVATE_KEY);
|
||||
|
||||
let test_input_source = format!("--input-source={}", input_source);
|
||||
let test_output_source = format!("--output-source={}", output_source);
|
||||
|
||||
let status = Command::new(format!("{}/{}", *CARGO_TARGET_DIR, TEST_BINARY))
|
||||
.args([
|
||||
"setup",
|
||||
"-M",
|
||||
&model_path,
|
||||
"--pk-path",
|
||||
&format!("{}/{}/key.pk", test_dir, example_name),
|
||||
"--vk-path",
|
||||
&format!("{}/{}/key.vk", test_dir, example_name),
|
||||
])
|
||||
.status()
|
||||
.expect("failed to execute process");
|
||||
assert!(status.success());
|
||||
|
||||
// generate the witness, passing the vk path to generate the necessary kzg commits
|
||||
let status = Command::new(format!("{}/{}", *CARGO_TARGET_DIR, TEST_BINARY))
|
||||
.args([
|
||||
"gen-witness",
|
||||
"-D",
|
||||
&data_path,
|
||||
"-M",
|
||||
&model_path,
|
||||
"-O",
|
||||
&witness_path,
|
||||
"--vk-path",
|
||||
&format!("{}/{}/key.vk", test_dir, example_name),
|
||||
])
|
||||
.status()
|
||||
.expect("failed to execute process");
|
||||
assert!(status.success());
|
||||
|
||||
// load witness
|
||||
let witness: GraphWitness = GraphWitness::from_path(witness_path.clone().into()).unwrap();
|
||||
// print out the witness
|
||||
println!("WITNESS: {:?}", witness);
|
||||
let mut input: GraphData = GraphData::from_path(data_path.clone().into()).unwrap();
|
||||
if input_source != "file" || output_source != "file" {
|
||||
println!("on chain input");
|
||||
if input_visibility == "hashed" {
|
||||
let hashes = witness.processed_inputs.unwrap().poseidon_hash.unwrap();
|
||||
input.input_data = DataSource::File(
|
||||
hashes
|
||||
.iter()
|
||||
.map(|h| vec![FileSourceInner::Field(*h)])
|
||||
.collect(),
|
||||
);
|
||||
}
|
||||
if output_visibility == "hashed" {
|
||||
let hashes = witness.processed_outputs.unwrap().poseidon_hash.unwrap();
|
||||
input.output_data = Some(DataSource::File(
|
||||
hashes
|
||||
.iter()
|
||||
.map(|h| vec![FileSourceInner::Field(*h)])
|
||||
.collect(),
|
||||
));
|
||||
} else {
|
||||
input.output_data = Some(DataSource::File(
|
||||
witness
|
||||
.pretty_elements
|
||||
.unwrap()
|
||||
.rescaled_outputs
|
||||
.iter()
|
||||
.map(|o| {
|
||||
o.iter()
|
||||
.map(|f| FileSourceInner::Float(f.parse().unwrap()))
|
||||
.collect()
|
||||
})
|
||||
.collect(),
|
||||
));
|
||||
}
|
||||
input.save(data_path.clone().into()).unwrap();
|
||||
let args = vec![
|
||||
"setup-test-evm-data",
|
||||
"-D",
|
||||
data_path.as_str(),
|
||||
"-M",
|
||||
&model_path,
|
||||
"--test-data",
|
||||
test_on_chain_data_path.as_str(),
|
||||
rpc_arg.as_str(),
|
||||
test_input_source.as_str(),
|
||||
test_output_source.as_str(),
|
||||
];
|
||||
|
||||
let status = Command::new(format!("{}/{}", *CARGO_TARGET_DIR, TEST_BINARY))
|
||||
.args(args)
|
||||
.status()
|
||||
.expect("failed to execute process");
|
||||
assert!(status.success());
|
||||
// generate the witness, passing the vk path to generate the necessary kzg commits only
|
||||
// if input visibility is NOT hashed
|
||||
if input_visibility != "hashed" {
|
||||
let status = Command::new(format!("{}/{}", *CARGO_TARGET_DIR, TEST_BINARY))
|
||||
.args([
|
||||
"gen-witness",
|
||||
"-D",
|
||||
&test_on_chain_data_path,
|
||||
"-M",
|
||||
&model_path,
|
||||
"-O",
|
||||
&witness_path,
|
||||
"--vk-path",
|
||||
&format!("{}/{}/key.vk", test_dir, example_name),
|
||||
])
|
||||
.status()
|
||||
.expect("failed to execute process");
|
||||
assert!(status.success());
|
||||
}
|
||||
}
|
||||
|
||||
let status = Command::new(format!("{}/{}", *CARGO_TARGET_DIR, TEST_BINARY))
|
||||
.args([
|
||||
"prove",
|
||||
"-W",
|
||||
&witness_path,
|
||||
"-M",
|
||||
&model_path,
|
||||
"--proof-path",
|
||||
&format!("{}/{}/proof.pf", test_dir, example_name),
|
||||
"--pk-path",
|
||||
&format!("{}/{}/key.pk", test_dir, example_name),
|
||||
])
|
||||
.status()
|
||||
.expect("failed to execute process");
|
||||
assert!(status.success());
|
||||
|
||||
let vk_arg = format!("{}/{}/key.vk", test_dir, example_name);
|
||||
|
||||
let settings_arg = format!("--settings-path={}", settings_path);
|
||||
|
||||
// create encoded calldata
|
||||
let status = Command::new(format!("{}/{}", *CARGO_TARGET_DIR, TEST_BINARY))
|
||||
.args([
|
||||
"encode-evm-calldata",
|
||||
"--proof-path",
|
||||
&format!("{}/{}/proof.pf", test_dir, example_name),
|
||||
])
|
||||
.status()
|
||||
.expect("failed to execute process");
|
||||
|
||||
assert!(status.success());
|
||||
|
||||
// create the verifier
|
||||
let mut args = vec!["create-evm-verifier", "--vk-path", &vk_arg, &settings_arg];
|
||||
|
||||
let sol_arg = format!("{}/{}/kzg.sol", test_dir, example_name);
|
||||
|
||||
args.push("--sol-code-path");
|
||||
args.push(sol_arg.as_str());
|
||||
|
||||
let status = Command::new(format!("{}/{}", *CARGO_TARGET_DIR, TEST_BINARY))
|
||||
.args(&args)
|
||||
.status()
|
||||
.expect("failed to execute process");
|
||||
assert!(status.success());
|
||||
|
||||
let addr_path_verifier_arg = format!(
|
||||
"--addr-path={}/{}/addr_verifier.txt",
|
||||
test_dir, example_name
|
||||
);
|
||||
|
||||
// deploy the verifier
|
||||
let mut args = vec![
|
||||
"deploy-evm",
|
||||
rpc_arg.as_str(),
|
||||
addr_path_verifier_arg.as_str(),
|
||||
];
|
||||
|
||||
args.push("--sol-code-path");
|
||||
args.push(sol_arg.as_str());
|
||||
|
||||
let status = Command::new(format!("{}/{}", *CARGO_TARGET_DIR, TEST_BINARY))
|
||||
.args(&args)
|
||||
.status()
|
||||
.expect("failed to execute process");
|
||||
assert!(status.success());
|
||||
|
||||
let sol_arg = format!("{}/{}/kzg.sol", test_dir, example_name);
|
||||
|
||||
let mut create_da_args = vec![
|
||||
"create-evm-da",
|
||||
&settings_arg,
|
||||
"--sol-code-path",
|
||||
sol_arg.as_str(),
|
||||
"-W",
|
||||
&witness_path,
|
||||
];
|
||||
|
||||
// if there is a on-chain source we add the data
|
||||
if input_source != "file" || output_source != "file" {
|
||||
create_da_args.push("-D");
|
||||
create_da_args.push(test_on_chain_data_path.as_str());
|
||||
}
|
||||
|
||||
let status = Command::new(format!("{}/{}", *CARGO_TARGET_DIR, TEST_BINARY))
|
||||
.args(&create_da_args)
|
||||
.status()
|
||||
.expect("failed to execute process");
|
||||
assert!(status.success());
|
||||
|
||||
let deploy_evm_data_path = if input_source != "file" || output_source != "file" {
|
||||
test_on_chain_data_path.clone()
|
||||
} else {
|
||||
data_path.clone()
|
||||
};
|
||||
|
||||
let addr_path_da_arg = format!("--addr-path={}/{}/addr_da.txt", test_dir, example_name);
|
||||
let status = Command::new(format!("{}/{}", *CARGO_TARGET_DIR, TEST_BINARY))
|
||||
.args([
|
||||
"deploy-evm-da",
|
||||
format!("--settings-path={}", settings_path).as_str(),
|
||||
"-D",
|
||||
deploy_evm_data_path.as_str(),
|
||||
"--sol-code-path",
|
||||
sol_arg.as_str(),
|
||||
rpc_arg.as_str(),
|
||||
addr_path_da_arg.as_str(),
|
||||
private_key.as_str(),
|
||||
])
|
||||
.status()
|
||||
.expect("failed to execute process");
|
||||
assert!(status.success());
|
||||
|
||||
let pf_arg = format!("{}/{}/proof.pf", test_dir, example_name);
|
||||
// read in the verifier address
|
||||
let addr_verifier =
|
||||
std::fs::read_to_string(format!("{}/{}/addr_verifier.txt", test_dir, example_name))
|
||||
.expect("failed to read address file");
|
||||
|
||||
let deployed_addr_verifier_arg = format!("--addr-verifier={}", addr_verifier);
|
||||
|
||||
// read in the da address
|
||||
let addr_da = std::fs::read_to_string(format!("{}/{}/addr_da.txt", test_dir, example_name))
|
||||
.expect("failed to read address file");
|
||||
|
||||
let deployed_addr_da_arg = format!("--addr-da={}", addr_da);
|
||||
|
||||
let args = vec![
|
||||
"verify-evm",
|
||||
"--proof-path",
|
||||
pf_arg.as_str(),
|
||||
deployed_addr_verifier_arg.as_str(),
|
||||
deployed_addr_da_arg.as_str(),
|
||||
rpc_arg.as_str(),
|
||||
];
|
||||
let status = Command::new(format!("{}/{}", *CARGO_TARGET_DIR, TEST_BINARY))
|
||||
.args(&args)
|
||||
.status()
|
||||
.expect("failed to execute process");
|
||||
assert!(status.success());
|
||||
}
|
||||
|
||||
fn build_ezkl() {
|
||||
#[cfg(feature = "icicle")]
|
||||
let args = [
|
||||
@@ -2643,7 +2265,7 @@ mod native_tests {
|
||||
// not macos-metal and not icicle
|
||||
#[cfg(all(not(feature = "icicle"), not(feature = "macos-metal")))]
|
||||
let args = ["build", "--profile=test-runs", "--bin", "ezkl"];
|
||||
#[cfg(not(feature = "mv-lookup"))]
|
||||
#[cfg(feature = "eth-original-lookup")]
|
||||
let args = [
|
||||
"build",
|
||||
"--profile=test-runs",
|
||||
@@ -2651,7 +2273,16 @@ mod native_tests {
|
||||
"ezkl",
|
||||
"--no-default-features",
|
||||
"--features",
|
||||
"ezkl,solidity-verifier,eth",
|
||||
];
|
||||
#[cfg(feature = "reusable-verifier")]
|
||||
let args = [
|
||||
"build",
|
||||
"--profile=test-runs",
|
||||
"--bin",
|
||||
"ezkl",
|
||||
"--features",
|
||||
"reusable-verifier",
|
||||
];
|
||||
|
||||
let status = Command::new("cargo")
|
||||
|
||||
@@ -146,7 +146,7 @@ mod py_tests {
|
||||
}
|
||||
}
|
||||
|
||||
const TESTS: [&str; 35] = [
|
||||
const TESTS: [&str; 31] = [
|
||||
"mnist_gan.ipynb", // 0
|
||||
"ezkl_demo_batch.ipynb", // 1
|
||||
"proof_splitting.ipynb", // 2
|
||||
@@ -155,33 +155,29 @@ mod py_tests {
|
||||
"mnist_gan_proof_splitting.ipynb", // 5
|
||||
"hashed_vis.ipynb", // 6
|
||||
"simple_demo_all_public.ipynb", // 7
|
||||
"data_attest.ipynb", // 8
|
||||
"little_transformer.ipynb", // 9
|
||||
"simple_demo_aggregated_proofs.ipynb", // 10
|
||||
"ezkl_demo.ipynb", // 11
|
||||
"lstm.ipynb", // 12
|
||||
"set_membership.ipynb", // 13
|
||||
"decision_tree.ipynb", // 14
|
||||
"random_forest.ipynb", // 15
|
||||
"gradient_boosted_trees.ipynb", // 16
|
||||
"xgboost.ipynb", // 17
|
||||
"lightgbm.ipynb", // 18
|
||||
"svm.ipynb", // 19
|
||||
"simple_demo_public_input_output.ipynb", // 20
|
||||
"simple_demo_public_network_output.ipynb", // 21
|
||||
"gcn.ipynb", // 22
|
||||
"linear_regression.ipynb", // 23
|
||||
"stacked_regression.ipynb", // 24
|
||||
"data_attest_hashed.ipynb", // 25
|
||||
"kzg_vis.ipynb", // 26
|
||||
"kmeans.ipynb", // 27
|
||||
"solvency.ipynb", // 28
|
||||
"sklearn_mlp.ipynb", // 29
|
||||
"generalized_inverse.ipynb", // 30
|
||||
"mnist_classifier.ipynb", // 31
|
||||
"world_rotation.ipynb", // 32
|
||||
"logistic_regression.ipynb", // 33
|
||||
"univ3-da.ipynb", // 34
|
||||
"little_transformer.ipynb", // 8
|
||||
"simple_demo_aggregated_proofs.ipynb", // 9
|
||||
"ezkl_demo.ipynb", // 10
|
||||
"lstm.ipynb", // 11
|
||||
"set_membership.ipynb", // 12
|
||||
"decision_tree.ipynb", // 13
|
||||
"random_forest.ipynb", // 14
|
||||
"gradient_boosted_trees.ipynb", // 15
|
||||
"xgboost.ipynb", // 16
|
||||
"lightgbm.ipynb", // 17
|
||||
"svm.ipynb", // 18
|
||||
"simple_demo_public_input_output.ipynb", // 19
|
||||
"simple_demo_public_network_output.ipynb", // 20
|
||||
"gcn.ipynb", // 21
|
||||
"linear_regression.ipynb", // 22
|
||||
"stacked_regression.ipynb", // 23
|
||||
"kzg_vis.ipynb", // 24
|
||||
"kmeans.ipynb", // 25
|
||||
"solvency.ipynb", // 26
|
||||
"sklearn_mlp.ipynb", // 27
|
||||
"generalized_inverse.ipynb", // 28
|
||||
"mnist_classifier.ipynb", // 29
|
||||
"logistic_regression.ipynb", // 30
|
||||
];
|
||||
|
||||
macro_rules! test_func {
|
||||
@@ -194,7 +190,7 @@ mod py_tests {
|
||||
use super::*;
|
||||
|
||||
|
||||
seq!(N in 0..=32 {
|
||||
seq!(N in 0..=30 {
|
||||
|
||||
#(#[test_case(TESTS[N])])*
|
||||
fn run_notebook_(test: &str) {
|
||||
|
||||
@@ -135,7 +135,7 @@ async def test_calibrate_over_user_range():
|
||||
model_path, output_path, py_run_args=run_args)
|
||||
assert res == True
|
||||
|
||||
res = await ezkl.calibrate_settings(
|
||||
res = ezkl.calibrate_settings(
|
||||
data_path, model_path, output_path, "resources", 1, [0, 1, 2])
|
||||
assert res == True
|
||||
assert os.path.isfile(output_path)
|
||||
@@ -169,7 +169,7 @@ async def test_calibrate():
|
||||
model_path, output_path, py_run_args=run_args)
|
||||
assert res == True
|
||||
|
||||
res = await ezkl.calibrate_settings(
|
||||
res = ezkl.calibrate_settings(
|
||||
data_path, model_path, output_path, "resources")
|
||||
assert res == True
|
||||
assert os.path.isfile(output_path)
|
||||
@@ -216,7 +216,7 @@ async def test_forward():
|
||||
'witness.json'
|
||||
)
|
||||
|
||||
res = await ezkl.gen_witness(data_path, model_path, output_path)
|
||||
res = ezkl.gen_witness(data_path, model_path, output_path)
|
||||
|
||||
with open(output_path, "r") as f:
|
||||
data = json.load(f)
|
||||
@@ -430,7 +430,7 @@ async def test_create_evm_verifier_separate_vk():
|
||||
vk_path = os.path.join(folder_path, 'test_evm.vk')
|
||||
settings_path = os.path.join(folder_path, 'settings.json')
|
||||
sol_code_path = os.path.join(folder_path, 'test_separate.sol')
|
||||
vk_code_path = os.path.join(folder_path, 'test_vk.sol')
|
||||
vka_path = os.path.join(folder_path, 'vka.calldata')
|
||||
abi_path = os.path.join(folder_path, 'test_separate.abi')
|
||||
abi_vk_path = os.path.join(folder_path, 'test_vk_separate.abi')
|
||||
proof_path = os.path.join(folder_path, 'test_evm.pf')
|
||||
@@ -455,9 +455,8 @@ async def test_create_evm_verifier_separate_vk():
|
||||
res = await ezkl.create_evm_vka(
|
||||
vk_path,
|
||||
settings_path,
|
||||
vk_code_path,
|
||||
abi_vk_path,
|
||||
srs_path=srs_path,
|
||||
vka_path,
|
||||
srs_path=srs_path
|
||||
)
|
||||
|
||||
assert res == True
|
||||
@@ -472,7 +471,7 @@ async def test_deploy_evm_reusable_and_vka():
|
||||
addr_path_verifier = os.path.join(folder_path, 'address_separate.json')
|
||||
addr_path_vk = os.path.join(folder_path, 'address_vk.json')
|
||||
sol_code_path = os.path.join(folder_path, 'test_separate.sol')
|
||||
vk_code_path = os.path.join(folder_path, 'test_vk.sol')
|
||||
vka_path = os.path.join(folder_path, 'vka.calldata')
|
||||
|
||||
# TODO: without optimization there will be out of gas errors
|
||||
# sol_code_path = os.path.join(folder_path, 'test.sol')
|
||||
@@ -484,11 +483,14 @@ async def test_deploy_evm_reusable_and_vka():
|
||||
"verifier/reusable",
|
||||
)
|
||||
|
||||
res = await ezkl.deploy_evm(
|
||||
addr_path_vk,
|
||||
with open(addr_path_verifier, 'r') as file:
|
||||
addr_verifier = file.read().rstrip()
|
||||
|
||||
# TODO fix: we need to call register vka instead of deploy evm
|
||||
res = await ezkl.register_vka(
|
||||
addr_verifier,
|
||||
anvil_url,
|
||||
vk_code_path,
|
||||
"vka",
|
||||
vka_path=vka_path,
|
||||
)
|
||||
|
||||
assert res == True
|
||||
@@ -579,7 +581,7 @@ async def test_verify_evm_separate_vk():
|
||||
"""
|
||||
proof_path = os.path.join(folder_path, 'test_evm.pf')
|
||||
addr_path_verifier = os.path.join(folder_path, 'address_separate.json')
|
||||
addr_path_vk = os.path.join(folder_path, 'address_vk.json')
|
||||
vka_path = os.path.join(folder_path, 'vka.calldata')
|
||||
proof_path = os.path.join(folder_path, 'test_evm.pf')
|
||||
calldata_path = os.path.join(folder_path, 'calldata_separate.bytes')
|
||||
|
||||
@@ -588,13 +590,8 @@ async def test_verify_evm_separate_vk():
|
||||
|
||||
print(addr_verifier)
|
||||
|
||||
with open(addr_path_vk, 'r') as file:
|
||||
addr_vk = file.read().rstrip()
|
||||
|
||||
print(addr_vk)
|
||||
|
||||
# res is now a vector of bytes
|
||||
res = ezkl.encode_evm_calldata(proof_path, calldata_path, addr_vk=addr_vk)
|
||||
res = ezkl.encode_evm_calldata(proof_path, calldata_path, vka_path=vka_path)
|
||||
|
||||
assert os.path.isfile(calldata_path)
|
||||
assert len(res) > 0
|
||||
@@ -606,7 +603,7 @@ async def test_verify_evm_separate_vk():
|
||||
addr_verifier,
|
||||
anvil_url,
|
||||
proof_path,
|
||||
addr_vk=addr_vk,
|
||||
vka_path=vka_path,
|
||||
# sol_code_path
|
||||
# optimizer_runs
|
||||
)
|
||||
@@ -643,7 +640,7 @@ async def test_aggregate_and_verify_aggr():
|
||||
res = ezkl.gen_settings(model_path, settings_path)
|
||||
assert res == True
|
||||
|
||||
res = await ezkl.calibrate_settings(
|
||||
res = ezkl.calibrate_settings(
|
||||
data_path, model_path, settings_path, "resources")
|
||||
assert res == True
|
||||
assert os.path.isfile(settings_path)
|
||||
@@ -665,7 +662,7 @@ async def test_aggregate_and_verify_aggr():
|
||||
'1l_relu_aggr_witness.json'
|
||||
)
|
||||
|
||||
res = await ezkl.gen_witness(data_path, compiled_model_path,
|
||||
res = ezkl.gen_witness(data_path, compiled_model_path,
|
||||
output_path)
|
||||
|
||||
ezkl.prove(
|
||||
@@ -745,7 +742,7 @@ async def test_evm_aggregate_and_verify_aggr():
|
||||
settings_path,
|
||||
)
|
||||
|
||||
await ezkl.calibrate_settings(
|
||||
ezkl.calibrate_settings(
|
||||
data_path,
|
||||
model_path,
|
||||
settings_path,
|
||||
@@ -774,7 +771,7 @@ async def test_evm_aggregate_and_verify_aggr():
|
||||
'1l_relu_aggr_evm_witness.json'
|
||||
)
|
||||
|
||||
res = await ezkl.gen_witness(data_path, compiled_model_path,
|
||||
res = ezkl.gen_witness(data_path, compiled_model_path,
|
||||
output_path)
|
||||
|
||||
ezkl.prove(
|
||||
@@ -908,7 +905,7 @@ async def test_all_examples(model_file, input_file):
|
||||
res = ezkl.gen_settings(model_file, settings_path, py_run_args=run_args)
|
||||
assert res
|
||||
|
||||
res = await ezkl.calibrate_settings(
|
||||
res = ezkl.calibrate_settings(
|
||||
input_file, model_file, settings_path, "resources")
|
||||
assert res
|
||||
|
||||
@@ -939,7 +936,7 @@ async def test_all_examples(model_file, input_file):
|
||||
assert os.path.isfile(pk_path)
|
||||
|
||||
print("Generating witness for example: ", model_file)
|
||||
res = await ezkl.gen_witness(input_file, compiled_model_path, witness_path)
|
||||
res = ezkl.gen_witness(input_file, compiled_model_path, witness_path)
|
||||
assert os.path.isfile(witness_path)
|
||||
|
||||
print("Proving example: ", model_file)
|
||||
|
||||
@@ -5,7 +5,8 @@ mod wasm32 {
|
||||
bufferToVecOfFelt, compiledCircuitValidation, encodeVerifierCalldata, feltToBigEndian,
|
||||
feltToFloat, feltToInt, feltToLittleEndian, genPk, genVk, genWitness, inputValidation,
|
||||
kzgCommit, pkValidation, poseidonHash, proofValidation, prove, settingsValidation,
|
||||
srsValidation, u8_array_to_u128_le, verify, verifyAggr, vkValidation, witnessValidation,
|
||||
srsValidation, u8_array_to_u128_le, verify, verifyAggr, verifyEVM, vkValidation,
|
||||
witnessValidation,
|
||||
};
|
||||
use ezkl::circuit::modules::polycommit::PolyCommitChip;
|
||||
use ezkl::circuit::modules::poseidon::spec::{PoseidonSpec, POSEIDON_RATE, POSEIDON_WIDTH};
|
||||
@@ -14,10 +15,10 @@ mod wasm32 {
|
||||
use ezkl::graph::GraphCircuit;
|
||||
use ezkl::graph::{GraphSettings, GraphWitness};
|
||||
use ezkl::pfsys;
|
||||
use ezkl::pfsys::encode_calldata;
|
||||
use halo2_proofs::plonk::VerifyingKey;
|
||||
use halo2_proofs::poly::kzg::commitment::KZGCommitmentScheme;
|
||||
use halo2_proofs::poly::kzg::commitment::ParamsKZG;
|
||||
use halo2_solidity_verifier::encode_calldata;
|
||||
use halo2curves::bn256::Bn256;
|
||||
use halo2curves::bn256::{Fr, G1Affine};
|
||||
use snark_verifier::util::arithmetic::PrimeField;
|
||||
@@ -39,6 +40,7 @@ mod wasm32 {
|
||||
pub const VK_AGGR: &[u8] = include_bytes!("assets/vk_aggr.key");
|
||||
pub const SRS: &[u8] = include_bytes!("assets/kzg");
|
||||
pub const SRS1: &[u8] = include_bytes!("assets/kzg1.srs");
|
||||
pub const VERIFIER_BYTECODE: &[u8] = include_bytes!("assets/wasm.code");
|
||||
|
||||
#[wasm_bindgen_test]
|
||||
async fn can_verify_aggr() {
|
||||
@@ -74,27 +76,39 @@ mod wasm32 {
|
||||
);
|
||||
assert_eq!(calldata, reference_calldata);
|
||||
// with vk address
|
||||
let vk_address = hex::decode("0000000000000000000000000000000000000000").unwrap();
|
||||
let dummy_32_byte_word = [0u8; 32];
|
||||
|
||||
let vk_address: [u8; 20] = {
|
||||
let mut array = [0u8; 20];
|
||||
array.copy_from_slice(&vk_address);
|
||||
array
|
||||
};
|
||||
// define and initialize a variable of type: &[[u8; 32]] named "vka"
|
||||
let vka: &[[u8; 32]] = &[dummy_32_byte_word.into()];
|
||||
|
||||
let serialized = serde_json::to_vec(&vk_address).unwrap();
|
||||
let serialized = serde_json::to_vec(vka).unwrap();
|
||||
|
||||
let calldata = encodeVerifierCalldata(ser_proof, Some(serialized))
|
||||
.map_err(|_| "failed")
|
||||
.unwrap();
|
||||
let reference_calldata = encode_calldata(
|
||||
Some(vk_address),
|
||||
Some(vka),
|
||||
&snark.proof,
|
||||
&flattened_instances.collect::<Vec<_>>(),
|
||||
);
|
||||
assert_eq!(calldata, reference_calldata);
|
||||
}
|
||||
|
||||
#[wasm_bindgen_test]
|
||||
async fn can_verify_evm() {
|
||||
// verify with single purpose evm verifier contract
|
||||
let value = verifyEVM(
|
||||
wasm_bindgen::Clamped(PROOF.to_vec()),
|
||||
VERIFIER_BYTECODE.to_vec(),
|
||||
None,
|
||||
)
|
||||
.map_err(|_| "failed")
|
||||
.unwrap();
|
||||
|
||||
// should not fail
|
||||
assert!(value);
|
||||
}
|
||||
|
||||
#[wasm_bindgen_test]
|
||||
fn verify_kzg_commit() {
|
||||
// create a vector of field elements Vec<Fr> and assign it to the message variable
|
||||
|
||||
@@ -1,5 +1,8 @@
|
||||
import localEVMVerify from '../../in-browser-evm-verifier/src/index'
|
||||
import { serialize, deserialize } from '@ezkljs/engine/nodejs'
|
||||
import {
|
||||
serialize,
|
||||
deserialize
|
||||
} from './utils';
|
||||
import * as wasmFunctions from './nodejs/ezkl'
|
||||
import { compileContracts } from './utils'
|
||||
import * as fs from 'fs'
|
||||
|
||||
@@ -9,9 +12,9 @@ exports.VK = require("minimist")(process.argv.slice(2))["vk"];
|
||||
|
||||
describe('localEVMVerify', () => {
|
||||
|
||||
let bytecode_verifier: string
|
||||
let bytecode_verifier_buffer: Uint8Array
|
||||
|
||||
let bytecode_vk: string | undefined = undefined
|
||||
let bytecode_vk_buffer: Uint8Array | undefined = undefined
|
||||
|
||||
let proof: any
|
||||
|
||||
@@ -22,16 +25,19 @@ describe('localEVMVerify', () => {
|
||||
beforeEach(() => {
|
||||
const solcOutput = compileContracts(path, example, 'kzg')
|
||||
|
||||
bytecode_verifier =
|
||||
let bytecode_verifier =
|
||||
solcOutput.contracts['artifacts/Verifier.sol']['Halo2Verifier'].evm.bytecode
|
||||
.object
|
||||
bytecode_verifier_buffer = new TextEncoder().encode(bytecode_verifier)
|
||||
|
||||
|
||||
if (vk) {
|
||||
const solcOutput_vk = compileContracts(path, example, 'vk')
|
||||
|
||||
bytecode_vk =
|
||||
let bytecode_vk =
|
||||
solcOutput_vk.contracts['artifacts/Verifier.sol']['Halo2VerifyingKey'].evm.bytecode
|
||||
.object
|
||||
bytecode_vk_buffer = new TextEncoder().encode(bytecode_vk)
|
||||
|
||||
|
||||
console.log('size of verifier bytecode', bytecode_verifier.length)
|
||||
@@ -41,10 +47,11 @@ describe('localEVMVerify', () => {
|
||||
|
||||
it('should return true when verification succeeds', async () => {
|
||||
const proofFileBuffer = fs.readFileSync(`${path}/${example}/proof.pf`)
|
||||
const proofSer = new Uint8ClampedArray(proofFileBuffer.buffer)
|
||||
|
||||
proof = deserialize(proofFileBuffer)
|
||||
proof = deserialize(proofSer)
|
||||
|
||||
const result = await localEVMVerify(proofFileBuffer, bytecode_verifier, bytecode_vk)
|
||||
const result = wasmFunctions.verifyEVM(proofSer, bytecode_verifier_buffer, bytecode_vk_buffer)
|
||||
|
||||
console.log('result', result)
|
||||
|
||||
@@ -64,18 +71,10 @@ describe('localEVMVerify', () => {
|
||||
proof.proof[index] = number
|
||||
console.log('index post', proof.proof[index])
|
||||
const proofModified = serialize(proof)
|
||||
result = await localEVMVerify(proofModified, bytecode_verifier, bytecode_vk)
|
||||
result = wasmFunctions.verifyEVM(proofModified, bytecode_verifier_buffer, bytecode_vk_buffer)
|
||||
} catch (error) {
|
||||
// Check if the error thrown is the "out of gas" error.
|
||||
expect(error).toEqual(
|
||||
expect.objectContaining({
|
||||
error: 'revert',
|
||||
errorType: 'EvmError',
|
||||
}),
|
||||
)
|
||||
result = false
|
||||
}
|
||||
// If localEVMVerify doesn't throw, check the results
|
||||
expect(result).toBe(false)
|
||||
})
|
||||
})
|
||||
|
||||
Reference in New Issue
Block a user