mirror of
https://github.com/zkonduit/ezkl.git
synced 2026-01-13 00:08:12 -05:00
Compare commits
1 Commits
ac/release
...
release-v1
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
0108e9eb22 |
4
.cargo/config
Normal file
4
.cargo/config
Normal file
@@ -0,0 +1,4 @@
|
||||
[target.wasm32-unknown-unknown]
|
||||
runner = 'wasm-bindgen-test-runner'
|
||||
rustflags = ["-C", "target-feature=+atomics,+bulk-memory,+mutable-globals","-C",
|
||||
"link-arg=--max-memory=4294967296"]
|
||||
@@ -1,17 +0,0 @@
|
||||
[target.wasm32-unknown-unknown]
|
||||
runner = 'wasm-bindgen-test-runner'
|
||||
rustflags = ["-C", "target-feature=+atomics,+bulk-memory,+mutable-globals","-C",
|
||||
"link-arg=--max-memory=4294967296"]
|
||||
|
||||
|
||||
[target.x86_64-apple-darwin]
|
||||
rustflags = [
|
||||
"-C", "link-arg=-undefined",
|
||||
"-C", "link-arg=dynamic_lookup",
|
||||
]
|
||||
|
||||
[target.aarch64-apple-darwin]
|
||||
rustflags = [
|
||||
"-C", "link-arg=-undefined",
|
||||
"-C", "link-arg=dynamic_lookup",
|
||||
]
|
||||
101
.github/workflows/benchmarks.yml
vendored
101
.github/workflows/benchmarks.yml
vendored
@@ -6,16 +6,23 @@ on:
|
||||
description: "Test scenario tags"
|
||||
|
||||
jobs:
|
||||
|
||||
bench_poseidon:
|
||||
permissions:
|
||||
contents: read
|
||||
bench_elgamal:
|
||||
runs-on: self-hosted
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 #v4.2.2
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: actions-rust-lang/setup-rust-toolchain@fb51252c7ba57d633bc668f941da052e410add48 #v1.0.6
|
||||
toolchain: nightly-2023-06-27
|
||||
override: true
|
||||
components: rustfmt, clippy
|
||||
- name: Bench elgamal
|
||||
run: cargo bench --verbose --bench elgamal
|
||||
|
||||
bench_poseidon:
|
||||
runs-on: self-hosted
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: nightly-2023-06-27
|
||||
override: true
|
||||
@@ -24,15 +31,11 @@ jobs:
|
||||
run: cargo bench --verbose --bench poseidon
|
||||
|
||||
bench_einsum_accum_matmul:
|
||||
permissions:
|
||||
contents: read
|
||||
runs-on: self-hosted
|
||||
needs: [bench_poseidon]
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 #v4.2.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: actions-rust-lang/setup-rust-toolchain@fb51252c7ba57d633bc668f941da052e410add48 #v1.0.6
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: nightly-2023-06-27
|
||||
override: true
|
||||
@@ -41,15 +44,11 @@ jobs:
|
||||
run: cargo bench --verbose --bench accum_einsum_matmul
|
||||
|
||||
bench_accum_matmul_relu:
|
||||
permissions:
|
||||
contents: read
|
||||
runs-on: self-hosted
|
||||
needs: [bench_poseidon]
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 #v4.2.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: actions-rust-lang/setup-rust-toolchain@fb51252c7ba57d633bc668f941da052e410add48 #v1.0.6
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: nightly-2023-06-27
|
||||
override: true
|
||||
@@ -58,15 +57,11 @@ jobs:
|
||||
run: cargo bench --verbose --bench accum_matmul_relu
|
||||
|
||||
bench_accum_matmul_relu_overflow:
|
||||
permissions:
|
||||
contents: read
|
||||
runs-on: self-hosted
|
||||
needs: [bench_poseidon]
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 #v4.2.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: actions-rust-lang/setup-rust-toolchain@fb51252c7ba57d633bc668f941da052e410add48 #v1.0.6
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: nightly-2023-06-27
|
||||
override: true
|
||||
@@ -75,15 +70,11 @@ jobs:
|
||||
run: cargo bench --verbose --bench accum_matmul_relu_overflow
|
||||
|
||||
bench_relu:
|
||||
permissions:
|
||||
contents: read
|
||||
runs-on: self-hosted
|
||||
needs: [bench_poseidon]
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 #v4.2.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: actions-rust-lang/setup-rust-toolchain@fb51252c7ba57d633bc668f941da052e410add48 #v1.0.6
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: nightly-2023-06-27
|
||||
override: true
|
||||
@@ -92,15 +83,11 @@ jobs:
|
||||
run: cargo bench --verbose --bench relu
|
||||
|
||||
bench_accum_dot:
|
||||
permissions:
|
||||
contents: read
|
||||
runs-on: self-hosted
|
||||
needs: [bench_poseidon]
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 #v4.2.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: actions-rust-lang/setup-rust-toolchain@fb51252c7ba57d633bc668f941da052e410add48 #v1.0.6
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: nightly-2023-06-27
|
||||
override: true
|
||||
@@ -109,15 +96,11 @@ jobs:
|
||||
run: cargo bench --verbose --bench accum_dot
|
||||
|
||||
bench_accum_conv:
|
||||
permissions:
|
||||
contents: read
|
||||
runs-on: self-hosted
|
||||
needs: [bench_poseidon]
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 #v4.2.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: actions-rust-lang/setup-rust-toolchain@fb51252c7ba57d633bc668f941da052e410add48 #v1.0.6
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: nightly-2023-06-27
|
||||
override: true
|
||||
@@ -126,15 +109,11 @@ jobs:
|
||||
run: cargo bench --verbose --bench accum_conv
|
||||
|
||||
bench_accum_sumpool:
|
||||
permissions:
|
||||
contents: read
|
||||
runs-on: self-hosted
|
||||
needs: [bench_poseidon]
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 #v4.2.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: actions-rust-lang/setup-rust-toolchain@fb51252c7ba57d633bc668f941da052e410add48 #v1.0.6
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: nightly-2023-06-27
|
||||
override: true
|
||||
@@ -143,15 +122,11 @@ jobs:
|
||||
run: cargo bench --verbose --bench accum_sumpool
|
||||
|
||||
bench_pairwise_add:
|
||||
permissions:
|
||||
contents: read
|
||||
runs-on: self-hosted
|
||||
needs: [bench_poseidon]
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 #v4.2.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: actions-rust-lang/setup-rust-toolchain@fb51252c7ba57d633bc668f941da052e410add48 #v1.0.6
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: nightly-2023-06-27
|
||||
override: true
|
||||
@@ -160,15 +135,11 @@ jobs:
|
||||
run: cargo bench --verbose --bench pairwise_add
|
||||
|
||||
bench_accum_sum:
|
||||
permissions:
|
||||
contents: read
|
||||
runs-on: self-hosted
|
||||
needs: [bench_poseidon]
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 #v4.2.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: actions-rust-lang/setup-rust-toolchain@fb51252c7ba57d633bc668f941da052e410add48 #v1.0.6
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: nightly-2023-06-27
|
||||
override: true
|
||||
@@ -177,15 +148,11 @@ jobs:
|
||||
run: cargo bench --verbose --bench accum_sum
|
||||
|
||||
bench_pairwise_pow:
|
||||
permissions:
|
||||
contents: read
|
||||
runs-on: self-hosted
|
||||
needs: [bench_poseidon]
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 #v4.2.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: actions-rust-lang/setup-rust-toolchain@fb51252c7ba57d633bc668f941da052e410add48 #v1.0.6
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: nightly-2023-06-27
|
||||
override: true
|
||||
|
||||
90
.github/workflows/engine.yml
vendored
90
.github/workflows/engine.yml
vendored
@@ -15,34 +15,22 @@ defaults:
|
||||
working-directory: .
|
||||
jobs:
|
||||
publish-wasm-bindings:
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
name: publish-wasm-bindings
|
||||
env:
|
||||
RELEASE_TAG: ${{ github.ref_name }}
|
||||
RUSTFLAGS: "-C target-feature=+atomics,+bulk-memory"
|
||||
runs-on: ubuntu-latest
|
||||
if: startsWith(github.ref, 'refs/tags/')
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 #v4.2.2
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: actions-rust-lang/setup-rust-toolchain@fb51252c7ba57d633bc668f941da052e410add48 #v1.0.6
|
||||
with:
|
||||
toolchain: nightly-2025-06-16
|
||||
toolchain: nightly-2024-02-06
|
||||
override: true
|
||||
components: rustfmt, clippy
|
||||
cache: false
|
||||
- uses: jetli/wasm-pack-action@0d096b08b4e5a7de8c28de67e11e945404e9eefa #v0.4.0
|
||||
with:
|
||||
# Pin to version 0.12.1
|
||||
version: 'v0.12.1'
|
||||
- uses: jetli/wasm-pack-action@v0.4.0
|
||||
- name: Add wasm32-unknown-unknown target
|
||||
run: rustup target add wasm32-unknown-unknown
|
||||
|
||||
- name: Add rust-src
|
||||
run: rustup component add rust-src --toolchain nightly-2025-06-16-x86_64-unknown-linux-gnu
|
||||
run: rustup component add rust-src --toolchain nightly-2024-02-06-x86_64-unknown-linux-gnu
|
||||
- name: Install binaryen
|
||||
run: |
|
||||
set -e
|
||||
@@ -51,45 +39,45 @@ jobs:
|
||||
wasm-opt --version
|
||||
- name: Build wasm files for both web and nodejs compilation targets
|
||||
run: |
|
||||
wasm-pack build --release --target nodejs --out-dir ./pkg/nodejs . -- -Z build-std="panic_abort,std"
|
||||
wasm-pack build --release --target web --out-dir ./pkg/web . -- -Z build-std="panic_abort,std" --features web
|
||||
wasm-pack build --release --target nodejs --out-dir ./pkg/nodejs . -- -Z build-std="panic_abort,std"
|
||||
wasm-pack build --release --target web --out-dir ./pkg/web . -- -Z build-std="panic_abort,std" --features web
|
||||
- name: Create package.json in pkg folder
|
||||
shell: bash
|
||||
env:
|
||||
RELEASE_TAG: ${{ github.ref_name }}
|
||||
run: |
|
||||
cat > pkg/package.json << EOF
|
||||
{
|
||||
"name": "@ezkljs/engine",
|
||||
"version": "$RELEASE_TAG",
|
||||
"dependencies": {
|
||||
"@types/json-bigint": "^1.0.1",
|
||||
"json-bigint": "^1.0.0"
|
||||
},
|
||||
"files": [
|
||||
"nodejs/ezkl_bg.wasm",
|
||||
"nodejs/ezkl.js",
|
||||
"nodejs/ezkl.d.ts",
|
||||
"nodejs/package.json",
|
||||
"nodejs/utils.js",
|
||||
"web/ezkl_bg.wasm",
|
||||
"web/ezkl.js",
|
||||
"web/ezkl.d.ts",
|
||||
"web/snippets/**/*",
|
||||
"web/package.json",
|
||||
"web/utils.js",
|
||||
"ezkl.d.ts"
|
||||
],
|
||||
"main": "nodejs/ezkl.js",
|
||||
"module": "web/ezkl.js",
|
||||
"types": "nodejs/ezkl.d.ts",
|
||||
"sideEffects": [
|
||||
"web/snippets/*"
|
||||
]
|
||||
}
|
||||
EOF
|
||||
echo '{
|
||||
"name": "@ezkljs/engine",
|
||||
"version": "${{ github.ref_name }}",
|
||||
"dependencies": {
|
||||
"@types/json-bigint": "^1.0.1",
|
||||
"json-bigint": "^1.0.0"
|
||||
},
|
||||
"files": [
|
||||
"nodejs/ezkl_bg.wasm",
|
||||
"nodejs/ezkl.js",
|
||||
"nodejs/ezkl.d.ts",
|
||||
"nodejs/package.json",
|
||||
"nodejs/utils.js",
|
||||
"web/ezkl_bg.wasm",
|
||||
"web/ezkl.js",
|
||||
"web/ezkl.d.ts",
|
||||
"web/snippets/**/*",
|
||||
"web/package.json",
|
||||
"web/utils.js",
|
||||
"ezkl.d.ts"
|
||||
],
|
||||
"main": "nodejs/ezkl.js",
|
||||
"module": "web/ezkl.js",
|
||||
"types": "nodejs/ezkl.d.ts",
|
||||
"sideEffects": [
|
||||
"web/snippets/*"
|
||||
]
|
||||
}' > pkg/package.json
|
||||
|
||||
- name: Replace memory definition in nodejs
|
||||
run: |
|
||||
sed -i "3s|.*|imports['env'] = {memory: new WebAssembly.Memory({initial:21,maximum:65536,shared:true})}|" pkg/nodejs/ezkl.js
|
||||
sed -i "3s|.*|imports['env'] = {memory: new WebAssembly.Memory({initial:20,maximum:65536,shared:true})}|" pkg/nodejs/ezkl.js
|
||||
|
||||
- name: Replace `import.meta.url` with `import.meta.resolve` definition in workerHelpers.js
|
||||
run: |
|
||||
@@ -178,7 +166,7 @@ jobs:
|
||||
curl -s "https://raw.githubusercontent.com/zkonduit/ezkljs-engine/main/README.md" > ./pkg/README.md
|
||||
|
||||
- name: Set up Node.js
|
||||
uses: actions/setup-node@1a4442cacd436585916779262731d5b162bc6ec7 #v3.8.2
|
||||
uses: actions/setup-node@v3
|
||||
with:
|
||||
node-version: "18.12.1"
|
||||
registry-url: "https://registry.npmjs.org"
|
||||
|
||||
10
.github/workflows/large-tests.yml
vendored
10
.github/workflows/large-tests.yml
vendored
@@ -6,16 +6,12 @@ on:
|
||||
description: "Test scenario tags"
|
||||
jobs:
|
||||
large-tests:
|
||||
permissions:
|
||||
contents: read
|
||||
runs-on: kaiju
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 #v4.2.2
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: actions-rust-lang/setup-rust-toolchain@fb51252c7ba57d633bc668f941da052e410add48 #v1.0.6
|
||||
with:
|
||||
toolchain: nightly-2025-06-16
|
||||
toolchain: nightly-2024-02-06
|
||||
override: true
|
||||
components: rustfmt, clippy
|
||||
- name: nanoGPT Mock
|
||||
|
||||
43
.github/workflows/pypi-gpu.yml
vendored
43
.github/workflows/pypi-gpu.yml
vendored
@@ -18,47 +18,38 @@ defaults:
|
||||
jobs:
|
||||
|
||||
linux:
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
runs-on: GPU
|
||||
strategy:
|
||||
matrix:
|
||||
target: [x86_64]
|
||||
env:
|
||||
RELEASE_TAG: ${{ github.ref_name }}
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 #v4.2.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: actions/setup-python@b64ffcaf5b410884ad320a9cfac8866006a109aa #v4.8.0
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: 3.12
|
||||
architecture: x64
|
||||
|
||||
- name: Set pyproject.toml version to match github tag and rename ezkl to ezkl-gpu
|
||||
- name: Set pyproject.toml version to match github tag
|
||||
shell: bash
|
||||
run: |
|
||||
mv pyproject.toml pyproject.toml.orig
|
||||
sed "s/ezkl/ezkl-gpu/" pyproject.toml.orig > pyproject.toml.tmp
|
||||
sed "s/0\\.0\\.0/${RELEASE_TAG//v}/" pyproject.toml.tmp > pyproject.toml
|
||||
sed "s/ezkl/ezkl-gpu/" pyproject.toml.orig >pyproject.toml
|
||||
|
||||
- uses: actions-rust-lang/setup-rust-toolchain@fb51252c7ba57d633bc668f941da052e410add48 #v1.0.6
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: nightly-2023-06-27
|
||||
override: true
|
||||
components: rustfmt, clippy
|
||||
cache: false
|
||||
|
||||
- name: Set Cargo.toml version to match github tag and rename ezkl to ezkl-gpu
|
||||
- name: Set Cargo.toml version to match github tag
|
||||
shell: bash
|
||||
# the ezkl substitution here looks for the first instance of name = "ezkl" and changes it to "ezkl-gpu"
|
||||
env:
|
||||
RELEASE_TAG: ${{ github.ref_name }}
|
||||
run: |
|
||||
mv Cargo.toml Cargo.toml.orig
|
||||
sed "0,/name = \"ezkl\"/s/name = \"ezkl\"/name = \"ezkl-gpu\"/" Cargo.toml.orig > Cargo.toml.tmp
|
||||
sed "s/0\\.0\\.0/${RELEASE_TAG//v}/" Cargo.toml.tmp > Cargo.toml
|
||||
sed "s/0\\.0\\.0/${RELEASE_TAG//v}/" Cargo.toml.orig >Cargo.toml
|
||||
mv Cargo.lock Cargo.lock.orig
|
||||
sed "s/0\\.0\\.0/${RELEASE_TAG//v}/" Cargo.lock.orig > Cargo.lock
|
||||
sed "s/0\\.0\\.0/${RELEASE_TAG//v}/" Cargo.lock.orig >Cargo.lock
|
||||
|
||||
- name: Install required libraries
|
||||
shell: bash
|
||||
@@ -66,7 +57,7 @@ jobs:
|
||||
sudo apt-get update && sudo apt-get install -y openssl pkg-config libssl-dev
|
||||
|
||||
- name: Build wheels
|
||||
uses: PyO3/maturin-action@5f8a1b3b0aad13193f46c9131f9b9e663def8ce5 #v1.46.0
|
||||
uses: PyO3/maturin-action@v1
|
||||
with:
|
||||
target: ${{ matrix.target }}
|
||||
manylinux: auto
|
||||
@@ -79,7 +70,7 @@ jobs:
|
||||
pip install ezkl-gpu --no-index --find-links dist --force-reinstall
|
||||
|
||||
- name: Upload wheels
|
||||
uses: actions/upload-artifact@65c4c4a1ddee5b72f698fdd19549f0f0fb45cf08 #v4.6.0
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: wheels
|
||||
path: dist
|
||||
@@ -95,7 +86,7 @@ jobs:
|
||||
# needs: [ macos, windows, linux, linux-cross, musllinux, musllinux-cross ]
|
||||
needs: [linux]
|
||||
steps:
|
||||
- uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 #v4.1.8
|
||||
- uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: wheels
|
||||
- name: List Files
|
||||
@@ -107,14 +98,14 @@ jobs:
|
||||
# publishes to PyPI
|
||||
- name: Publish package distributions to PyPI
|
||||
continue-on-error: true
|
||||
uses: pypa/gh-action-pypi-publish@76f52bc884231f62b9a034ebfe128415bbaabdfc #v1.12.4
|
||||
uses: pypa/gh-action-pypi-publish@release/v1
|
||||
with:
|
||||
packages-dir: ./wheels
|
||||
packages-dir: ./
|
||||
|
||||
# publishes to TestPyPI
|
||||
- name: Publish package distribution to TestPyPI
|
||||
continue-on-error: true
|
||||
uses: pypa/gh-action-pypi-publish@76f52bc884231f62b9a034ebfe128415bbaabdfc #v1.12.4
|
||||
uses: pypa/gh-action-pypi-publish@release/v1
|
||||
with:
|
||||
repository-url: https://test.pypi.org/legacy/
|
||||
packages-dir: ./wheels
|
||||
packages-dir: ./
|
||||
|
||||
227
.github/workflows/pypi.yml
vendored
227
.github/workflows/pypi.yml
vendored
@@ -16,94 +16,63 @@ defaults:
|
||||
|
||||
jobs:
|
||||
macos:
|
||||
permissions:
|
||||
contents: read
|
||||
runs-on: macos-latest
|
||||
if: startsWith(github.ref, 'refs/tags/')
|
||||
strategy:
|
||||
matrix:
|
||||
target: [x86_64, universal2-apple-darwin]
|
||||
env:
|
||||
RELEASE_TAG: ${{ github.ref_name }}
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 #v4.2.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: actions/setup-python@b64ffcaf5b410884ad320a9cfac8866006a109aa #v4.8.0
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: 3.12
|
||||
architecture: x64
|
||||
|
||||
- name: Set pyproject.toml version to match github tag
|
||||
shell: bash
|
||||
run: |
|
||||
mv pyproject.toml pyproject.toml.orig
|
||||
sed "s/0\\.0\\.0/${RELEASE_TAG//v}/" pyproject.toml.orig >pyproject.toml
|
||||
|
||||
- name: Set Cargo.toml version to match github tag
|
||||
shell: bash
|
||||
env:
|
||||
RELEASE_TAG: ${{ github.ref_name }}
|
||||
run: |
|
||||
mv Cargo.toml Cargo.toml.orig
|
||||
sed "s/0\\.0\\.0/${RELEASE_TAG//v}/" Cargo.toml.orig >Cargo.toml
|
||||
mv Cargo.lock Cargo.lock.orig
|
||||
sed "s/0\\.0\\.0/${RELEASE_TAG//v}/" Cargo.lock.orig >Cargo.lock
|
||||
|
||||
- uses: actions-rust-lang/setup-rust-toolchain@fb51252c7ba57d633bc668f941da052e410add48 #v1.0.6
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: nightly-2025-06-16
|
||||
toolchain: nightly-2024-02-06
|
||||
override: true
|
||||
components: rustfmt, clippy
|
||||
cache: false
|
||||
|
||||
- name: Build wheels
|
||||
if: matrix.target == 'universal2-apple-darwin'
|
||||
uses: PyO3/maturin-action@5f8a1b3b0aad13193f46c9131f9b9e663def8ce5 #v1.46.0
|
||||
with:
|
||||
target: ${{ matrix.target }}
|
||||
args: --release --out dist --features python-bindings
|
||||
- name: Build wheels
|
||||
if: matrix.target == 'x86_64'
|
||||
uses: PyO3/maturin-action@5f8a1b3b0aad13193f46c9131f9b9e663def8ce5 #v1.46.0
|
||||
uses: PyO3/maturin-action@v1
|
||||
with:
|
||||
target: ${{ matrix.target }}
|
||||
args: --release --out dist --features python-bindings
|
||||
- name: Install built wheel
|
||||
if: matrix.target == 'universal2-apple-darwin'
|
||||
run: |
|
||||
pip install ezkl --no-index --find-links dist --force-reinstall
|
||||
python -c "import ezkl"
|
||||
|
||||
- name: Upload wheels
|
||||
uses: actions/upload-artifact@65c4c4a1ddee5b72f698fdd19549f0f0fb45cf08 #v4.6.0
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: dist-macos-${{ matrix.target }}
|
||||
name: wheels
|
||||
path: dist
|
||||
|
||||
windows:
|
||||
permissions:
|
||||
contents: read
|
||||
runs-on: windows-latest
|
||||
if: startsWith(github.ref, 'refs/tags/')
|
||||
strategy:
|
||||
matrix:
|
||||
target: [x64, x86]
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 #v4.2.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: actions/setup-python@b64ffcaf5b410884ad320a9cfac8866006a109aa #v4.8.0
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: 3.12
|
||||
architecture: ${{ matrix.target }}
|
||||
|
||||
- name: Set pyproject.toml version to match github tag
|
||||
shell: bash
|
||||
env:
|
||||
RELEASE_TAG: ${{ github.ref_name }}
|
||||
run: |
|
||||
mv pyproject.toml pyproject.toml.orig
|
||||
sed "s/0\\.0\\.0/${RELEASE_TAG//v}/" pyproject.toml.orig >pyproject.toml
|
||||
|
||||
- name: Set Cargo.toml version to match github tag
|
||||
shell: bash
|
||||
env:
|
||||
@@ -114,15 +83,14 @@ jobs:
|
||||
mv Cargo.lock Cargo.lock.orig
|
||||
sed "s/0\\.0\\.0/${RELEASE_TAG//v}/" Cargo.lock.orig >Cargo.lock
|
||||
|
||||
- uses: actions-rust-lang/setup-rust-toolchain@fb51252c7ba57d633bc668f941da052e410add48 #v1.0.6
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: nightly-2025-06-16
|
||||
toolchain: nightly-2024-02-06
|
||||
override: true
|
||||
components: rustfmt, clippy
|
||||
cache: false
|
||||
|
||||
- name: Build wheels
|
||||
uses: PyO3/maturin-action@5f8a1b3b0aad13193f46c9131f9b9e663def8ce5 #v1.46.0
|
||||
uses: PyO3/maturin-action@v1
|
||||
with:
|
||||
target: ${{ matrix.target }}
|
||||
args: --release --out dist --features python-bindings
|
||||
@@ -132,36 +100,24 @@ jobs:
|
||||
python -c "import ezkl"
|
||||
|
||||
- name: Upload wheels
|
||||
uses: actions/upload-artifact@65c4c4a1ddee5b72f698fdd19549f0f0fb45cf08 #v4.6.0 #v4.6.0
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: dist-windows-${{ matrix.target }}
|
||||
name: wheels
|
||||
path: dist
|
||||
|
||||
linux:
|
||||
permissions:
|
||||
contents: read
|
||||
runs-on: ubuntu-latest
|
||||
if: startsWith(github.ref, 'refs/tags/')
|
||||
strategy:
|
||||
matrix:
|
||||
target: [x86_64]
|
||||
target: [x86_64, i686]
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 #v4.2.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: actions/setup-python@b64ffcaf5b410884ad320a9cfac8866006a109aa #v4.8.0
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: 3.12
|
||||
architecture: x64
|
||||
|
||||
- name: Set pyproject.toml version to match github tag
|
||||
shell: bash
|
||||
env:
|
||||
RELEASE_TAG: ${{ github.ref_name }}
|
||||
run: |
|
||||
mv pyproject.toml pyproject.toml.orig
|
||||
sed "s/0\\.0\\.0/${RELEASE_TAG//v}/" pyproject.toml.orig >pyproject.toml
|
||||
|
||||
- name: Set Cargo.toml version to match github tag
|
||||
shell: bash
|
||||
env:
|
||||
@@ -172,13 +128,14 @@ jobs:
|
||||
mv Cargo.lock Cargo.lock.orig
|
||||
sed "s/0\\.0\\.0/${RELEASE_TAG//v}/" Cargo.lock.orig >Cargo.lock
|
||||
|
||||
|
||||
- name: Install required libraries
|
||||
shell: bash
|
||||
run: |
|
||||
sudo apt-get update && sudo apt-get install -y openssl pkg-config libssl-dev
|
||||
|
||||
- name: Build wheels
|
||||
uses: PyO3/maturin-action@5f8a1b3b0aad13193f46c9131f9b9e663def8ce5 #v1.46.0
|
||||
uses: PyO3/maturin-action@v1
|
||||
with:
|
||||
target: ${{ matrix.target }}
|
||||
manylinux: auto
|
||||
@@ -205,14 +162,63 @@ jobs:
|
||||
python -c "import ezkl"
|
||||
|
||||
- name: Upload wheels
|
||||
uses: actions/upload-artifact@65c4c4a1ddee5b72f698fdd19549f0f0fb45cf08 #v4.6.0
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: dist-linux-${{ matrix.target }}
|
||||
name: wheels
|
||||
path: dist
|
||||
|
||||
# TODO: There's a problem with the maturin-action toolchain for arm arch leading to failed builds
|
||||
# linux-cross:
|
||||
# runs-on: ubuntu-latest
|
||||
# strategy:
|
||||
# matrix:
|
||||
# target: [aarch64, armv7]
|
||||
# steps:
|
||||
# - uses: actions/checkout@v4
|
||||
# - uses: actions/setup-python@v4
|
||||
# with:
|
||||
# python-version: 3.12
|
||||
|
||||
# - name: Install cross-compilation tools for aarch64
|
||||
# if: matrix.target == 'aarch64'
|
||||
# run: |
|
||||
# sudo apt-get update
|
||||
# sudo apt-get install -y gcc make gcc-aarch64-linux-gnu binutils-aarch64-linux-gnu libc6-dev-arm64-cross libusb-1.0-0-dev libatomic1-arm64-cross
|
||||
|
||||
# - name: Install cross-compilation tools for armv7
|
||||
# if: matrix.target == 'armv7'
|
||||
# run: |
|
||||
# sudo apt-get update
|
||||
# sudo apt-get install -y gcc make gcc-arm-linux-gnueabihf binutils-arm-linux-gnueabihf libc6-dev-armhf-cross libusb-1.0-0-dev libatomic1-armhf-cross
|
||||
|
||||
# - name: Build wheels
|
||||
# uses: PyO3/maturin-action@v1
|
||||
# with:
|
||||
# target: ${{ matrix.target }}
|
||||
# manylinux: auto
|
||||
# args: --release --out dist --features python-bindings
|
||||
|
||||
# - uses: uraimo/run-on-arch-action@v2.5.0
|
||||
# name: Install built wheel
|
||||
# with:
|
||||
# arch: ${{ matrix.target }}
|
||||
# distro: ubuntu20.04
|
||||
# githubToken: ${{ github.token }}
|
||||
# install: |
|
||||
# apt-get update
|
||||
# apt-get install -y --no-install-recommends python3 python3-pip
|
||||
# pip3 install -U pip
|
||||
# run: |
|
||||
# pip3 install ezkl --no-index --find-links dist/ --force-reinstall
|
||||
# python3 -c "import ezkl"
|
||||
|
||||
# - name: Upload wheels
|
||||
# uses: actions/upload-artifact@v3
|
||||
# with:
|
||||
# name: wheels
|
||||
# path: dist
|
||||
|
||||
musllinux:
|
||||
permissions:
|
||||
contents: read
|
||||
runs-on: ubuntu-latest
|
||||
if: startsWith(github.ref, 'refs/tags/')
|
||||
strategy:
|
||||
@@ -220,22 +226,12 @@ jobs:
|
||||
target:
|
||||
- x86_64-unknown-linux-musl
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 #v4.2.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: actions/setup-python@b64ffcaf5b410884ad320a9cfac8866006a109aa #v4.8.0
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: 3.12
|
||||
architecture: x64
|
||||
|
||||
- name: Set pyproject.toml version to match github tag
|
||||
shell: bash
|
||||
env:
|
||||
RELEASE_TAG: ${{ github.ref_name }}
|
||||
run: |
|
||||
mv pyproject.toml pyproject.toml.orig
|
||||
sed "s/0\\.0\\.0/${RELEASE_TAG//v}/" pyproject.toml.orig >pyproject.toml
|
||||
|
||||
- name: Set Cargo.toml version to match github tag
|
||||
shell: bash
|
||||
env:
|
||||
@@ -252,7 +248,7 @@ jobs:
|
||||
sudo apt-get update && sudo apt-get install -y pkg-config libssl-dev
|
||||
|
||||
- name: Build wheels
|
||||
uses: PyO3/maturin-action@5f8a1b3b0aad13193f46c9131f9b9e663def8ce5 #v1.46.0
|
||||
uses: PyO3/maturin-action@v1
|
||||
with:
|
||||
target: ${{ matrix.target }}
|
||||
manylinux: musllinux_1_2
|
||||
@@ -260,7 +256,7 @@ jobs:
|
||||
|
||||
- name: Install built wheel
|
||||
if: matrix.target == 'x86_64-unknown-linux-musl'
|
||||
uses: addnab/docker-run-action@3e77f186b7a929ef010f183a9e24c0f9955ea609
|
||||
uses: addnab/docker-run-action@v3
|
||||
with:
|
||||
image: alpine:latest
|
||||
options: -v ${{ github.workspace }}:/io -w /io
|
||||
@@ -273,14 +269,12 @@ jobs:
|
||||
python3 -c "import ezkl"
|
||||
|
||||
- name: Upload wheels
|
||||
uses: actions/upload-artifact@65c4c4a1ddee5b72f698fdd19549f0f0fb45cf08 #v4.6.0
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: dist-musllinux-${{ matrix.target }}
|
||||
name: wheels
|
||||
path: dist
|
||||
|
||||
musllinux-cross:
|
||||
permissions:
|
||||
contents: read
|
||||
runs-on: ubuntu-latest
|
||||
if: startsWith(github.ref, 'refs/tags/')
|
||||
strategy:
|
||||
@@ -288,22 +282,14 @@ jobs:
|
||||
platform:
|
||||
- target: aarch64-unknown-linux-musl
|
||||
arch: aarch64
|
||||
- target: armv7-unknown-linux-musleabihf
|
||||
arch: armv7
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 #v4.2.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: actions/setup-python@b64ffcaf5b410884ad320a9cfac8866006a109aa #v4.8.0
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: 3.12
|
||||
|
||||
- name: Set pyproject.toml version to match github tag
|
||||
shell: bash
|
||||
env:
|
||||
RELEASE_TAG: ${{ github.ref_name }}
|
||||
run: |
|
||||
mv pyproject.toml pyproject.toml.orig
|
||||
sed "s/0\\.0\\.0/${RELEASE_TAG//v}/" pyproject.toml.orig >pyproject.toml
|
||||
|
||||
- name: Set Cargo.toml version to match github tag
|
||||
shell: bash
|
||||
env:
|
||||
@@ -315,13 +301,13 @@ jobs:
|
||||
sed "s/0\\.0\\.0/${RELEASE_TAG//v}/" Cargo.lock.orig >Cargo.lock
|
||||
|
||||
- name: Build wheels
|
||||
uses: PyO3/maturin-action@5f8a1b3b0aad13193f46c9131f9b9e663def8ce5 #v1.46.0
|
||||
uses: PyO3/maturin-action@v1
|
||||
with:
|
||||
target: ${{ matrix.platform.target }}
|
||||
manylinux: musllinux_1_2
|
||||
args: --release --out dist --features python-bindings
|
||||
|
||||
- uses: uraimo/run-on-arch-action@5397f9e30a9b62422f302092631c99ae1effcd9e #v2.8.1
|
||||
- uses: uraimo/run-on-arch-action@v2.5.0
|
||||
name: Install built wheel
|
||||
with:
|
||||
arch: ${{ matrix.platform.arch }}
|
||||
@@ -336,9 +322,9 @@ jobs:
|
||||
python3 -c "import ezkl"
|
||||
|
||||
- name: Upload wheels
|
||||
uses: actions/upload-artifact@65c4c4a1ddee5b72f698fdd19549f0f0fb45cf08 #v4.6.0
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: dist-musllinux-${{ matrix.platform.target }}
|
||||
name: wheels
|
||||
path: dist
|
||||
|
||||
pypi-publish:
|
||||
@@ -347,43 +333,44 @@ jobs:
|
||||
permissions:
|
||||
id-token: write
|
||||
if: "startsWith(github.ref, 'refs/tags/')"
|
||||
# TODO: Uncomment if linux-cross is working
|
||||
# needs: [ macos, windows, linux, linux-cross, musllinux, musllinux-cross ]
|
||||
needs: [macos, windows, linux, musllinux, musllinux-cross]
|
||||
steps:
|
||||
- uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 #v4.1.8
|
||||
- uses: actions/download-artifact@v3
|
||||
with:
|
||||
pattern: dist-*
|
||||
merge-multiple: true
|
||||
path: wheels
|
||||
name: wheels
|
||||
- name: List Files
|
||||
run: ls -R
|
||||
|
||||
# # publishes to TestPyPI
|
||||
# - name: Publish package distribution to TestPyPI
|
||||
# uses: pypa/gh-action-pypi-publish@76f52bc884231f62b9a034ebfe128415bbaabdfc #v1.12.4
|
||||
# with:
|
||||
# repository-url: https://test.pypi.org/legacy/
|
||||
# packages-dir: ./
|
||||
# Both publish steps will fail if there is no trusted publisher setup
|
||||
# On failure the publish step will then simply continue to the next one
|
||||
|
||||
# publishes to PyPI
|
||||
- name: Publish package distributions to PyPI
|
||||
uses: pypa/gh-action-pypi-publish@76f52bc884231f62b9a034ebfe128415bbaabdfc #v1.12.4
|
||||
continue-on-error: true
|
||||
uses: pypa/gh-action-pypi-publish@release/v1
|
||||
with:
|
||||
packages-dir: ./wheels
|
||||
packages-dir: ./
|
||||
|
||||
# publishes to TestPyPI
|
||||
- name: Publish package distribution to TestPyPI
|
||||
continue-on-error: true
|
||||
uses: pypa/gh-action-pypi-publish@release/v1
|
||||
with:
|
||||
repository-url: https://test.pypi.org/legacy/
|
||||
packages-dir: ./
|
||||
|
||||
doc-publish:
|
||||
permissions:
|
||||
contents: read
|
||||
name: Trigger ReadTheDocs Build
|
||||
runs-on: ubuntu-latest
|
||||
needs: pypi-publish
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
- name: Trigger RTDs build
|
||||
uses: dfm/rtds-action@618148c547f4b56cdf4fa4dcf3a94c91ce025f2d
|
||||
uses: dfm/rtds-action@v1
|
||||
with:
|
||||
webhook_url: ${{ secrets.RTDS_WEBHOOK_URL }}
|
||||
webhook_token: ${{ secrets.RTDS_WEBHOOK_TOKEN }}
|
||||
commit_ref: ${{ github.ref_name }}
|
||||
commit_ref: ${{ github.ref_name }}
|
||||
|
||||
66
.github/workflows/release.yml
vendored
66
.github/workflows/release.yml
vendored
@@ -10,9 +10,6 @@ on:
|
||||
- "*"
|
||||
jobs:
|
||||
create-release:
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
name: create-release
|
||||
runs-on: ubuntu-22.04
|
||||
if: startsWith(github.ref, 'refs/tags/')
|
||||
@@ -26,18 +23,16 @@ jobs:
|
||||
shell: bash
|
||||
run: |
|
||||
echo "EZKL_VERSION=${GITHUB_REF#refs/*/}" >> $GITHUB_ENV
|
||||
echo "version is: ${{ env.EZKL_VERSION }}"
|
||||
|
||||
- name: Create Github Release
|
||||
id: create-release
|
||||
uses: softprops/action-gh-release@c95fe1489396fe8a9eb87c0abf8aa5b2ef267fda #v2.2.1
|
||||
uses: softprops/action-gh-release@v1
|
||||
with:
|
||||
token: ${{ secrets.RELEASE_TOKEN }}
|
||||
tag_name: ${{ env.EZKL_VERSION }}
|
||||
|
||||
build-release-gpu:
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
name: build-release-gpu
|
||||
needs: ["create-release"]
|
||||
runs-on: GPU
|
||||
@@ -48,22 +43,19 @@ jobs:
|
||||
RUST_BACKTRACE: 1
|
||||
PCRE2_SYS_STATIC: 1
|
||||
steps:
|
||||
- uses: actions-rust-lang/setup-rust-toolchain@fb51252c7ba57d633bc668f941da052e410add48 #v1.0.6
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: nightly-2025-06-16
|
||||
toolchain: nightly-2024-02-06
|
||||
override: true
|
||||
components: rustfmt, clippy
|
||||
cache: false
|
||||
- name: Checkout repo
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 #v4.2.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Get release version from tag
|
||||
shell: bash
|
||||
run: |
|
||||
echo "EZKL_VERSION=${GITHUB_REF#refs/*/}" >> $GITHUB_ENV
|
||||
echo "version is: ${{ env.EZKL_VERSION }}"
|
||||
|
||||
- name: Set Cargo.toml version to match github tag
|
||||
shell: bash
|
||||
@@ -89,7 +81,7 @@ jobs:
|
||||
echo "ASSET=build-artifacts/ezkl-linux-gpu.tar.gz" >> $GITHUB_ENV
|
||||
|
||||
- name: Upload release archive
|
||||
uses: actions/upload-release-asset@e8f9f06c4b078e705bd2ea027f0926603fc9b4d5 #v1.0.2
|
||||
uses: actions/upload-release-asset@v1.0.2
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.RELEASE_TOKEN }}
|
||||
with:
|
||||
@@ -99,10 +91,6 @@ jobs:
|
||||
asset_content_type: application/octet-stream
|
||||
|
||||
build-release:
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
issues: write
|
||||
name: build-release
|
||||
needs: ["create-release"]
|
||||
runs-on: ${{ matrix.os }}
|
||||
@@ -118,39 +106,38 @@ jobs:
|
||||
include:
|
||||
- build: windows-msvc
|
||||
os: windows-latest
|
||||
rust: nightly-2025-06-16
|
||||
rust: nightly-2024-02-06
|
||||
target: x86_64-pc-windows-msvc
|
||||
- build: macos
|
||||
os: macos-13
|
||||
rust: nightly-2025-06-16
|
||||
rust: nightly-2024-02-06
|
||||
target: x86_64-apple-darwin
|
||||
- build: macos-aarch64
|
||||
os: macos-13
|
||||
rust: nightly-2025-06-16
|
||||
rust: nightly-2024-02-06
|
||||
target: aarch64-apple-darwin
|
||||
- build: linux-musl
|
||||
os: ubuntu-latest-64-cores
|
||||
rust: nightly-2025-06-16
|
||||
os: ubuntu-22.04
|
||||
rust: nightly-2024-02-06
|
||||
target: x86_64-unknown-linux-musl
|
||||
- build: linux-gnu
|
||||
os: ubuntu-latest-64-cores
|
||||
rust: nightly-2025-06-16
|
||||
os: ubuntu-22.04
|
||||
rust: nightly-2024-02-06
|
||||
target: x86_64-unknown-linux-gnu
|
||||
- build: linux-aarch64
|
||||
os: ubuntu-latest-64-cores
|
||||
rust: nightly-2025-06-16
|
||||
os: ubuntu-22.04
|
||||
rust: nightly-2024-02-06
|
||||
target: aarch64-unknown-linux-gnu
|
||||
|
||||
steps:
|
||||
- name: Checkout repo
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 #v4.2.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Get release version from tag
|
||||
shell: bash
|
||||
run: |
|
||||
echo "EZKL_VERSION=${GITHUB_REF#refs/*/}" >> $GITHUB_ENV
|
||||
echo "version is: ${{ env.EZKL_VERSION }}"
|
||||
|
||||
- name: Set Cargo.toml version to match github tag
|
||||
shell: bash
|
||||
@@ -168,7 +155,7 @@ jobs:
|
||||
fi
|
||||
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@4f94fbe7e03939b0e674bcc9ca609a16088f63ff #nightly branch, TODO: update when required
|
||||
uses: dtolnay/rust-toolchain@nightly
|
||||
with:
|
||||
target: ${{ matrix.target }}
|
||||
|
||||
@@ -194,17 +181,8 @@ jobs:
|
||||
echo "target flag is: ${{ env.TARGET_FLAGS }}"
|
||||
echo "target dir is: ${{ env.TARGET_DIR }}"
|
||||
|
||||
- name: Build release binary (no asm or metal)
|
||||
if: matrix.build != 'linux-gnu' && matrix.build != 'macos-aarch64'
|
||||
run: ${{ env.CARGO }} build --release ${{ env.TARGET_FLAGS }} -Z sparse-registry --features mimalloc
|
||||
|
||||
- name: Build release binary (asm)
|
||||
if: matrix.build == 'linux-gnu'
|
||||
run: ${{ env.CARGO }} build --release ${{ env.TARGET_FLAGS }} -Z sparse-registry --features asm,mimalloc
|
||||
|
||||
- name: Build release binary (metal)
|
||||
if: matrix.build == 'macos-aarch64'
|
||||
run: ${{ env.CARGO }} build --release ${{ env.TARGET_FLAGS }} -Z sparse-registry --features macos-metal,mimalloc
|
||||
- name: Build release binary
|
||||
run: ${{ env.CARGO }} build --release ${{ env.TARGET_FLAGS }} -Z sparse-registry
|
||||
|
||||
- name: Strip release binary
|
||||
if: matrix.build != 'windows-msvc' && matrix.build != 'linux-aarch64'
|
||||
@@ -231,7 +209,7 @@ jobs:
|
||||
echo "ASSET=build-artifacts/ezkl-win.zip" >> $GITHUB_ENV
|
||||
|
||||
- name: Upload release archive
|
||||
uses: actions/upload-release-asset@e8f9f06c4b078e705bd2ea027f0926603fc9b4d5 #v1.0.2
|
||||
uses: actions/upload-release-asset@v1.0.2
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.RELEASE_TOKEN }}
|
||||
with:
|
||||
|
||||
958
.github/workflows/rust.yml
vendored
958
.github/workflows/rust.yml
vendored
File diff suppressed because it is too large
Load Diff
32
.github/workflows/static-analysis.yml
vendored
32
.github/workflows/static-analysis.yml
vendored
@@ -1,32 +0,0 @@
|
||||
name: Static Analysis
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ main ]
|
||||
pull_request:
|
||||
branches: [ main ]
|
||||
|
||||
jobs:
|
||||
analyze:
|
||||
permissions:
|
||||
contents: read
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 #v4.2.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: actions-rust-lang/setup-rust-toolchain@fb51252c7ba57d633bc668f941da052e410add48 #v1.0.6
|
||||
with:
|
||||
toolchain: nightly-2025-06-16
|
||||
override: true
|
||||
components: rustfmt, clippy
|
||||
|
||||
# Run Zizmor static analysis
|
||||
|
||||
- name: Install Zizmor
|
||||
run: cargo install --locked zizmor
|
||||
|
||||
- name: Run Zizmor Analysis
|
||||
run: zizmor .
|
||||
|
||||
|
||||
134
.github/workflows/swift-pm.yml
vendored
134
.github/workflows/swift-pm.yml
vendored
@@ -1,134 +0,0 @@
|
||||
name: Build and Publish EZKL iOS SPM package
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
# Only support SemVer versioning tags
|
||||
- 'v[0-9]+.[0-9]+.[0-9]+'
|
||||
- '[0-9]+.[0-9]+.[0-9]+'
|
||||
|
||||
jobs:
|
||||
build-and-update:
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
runs-on: macos-latest
|
||||
env:
|
||||
EZKL_SWIFT_PACKAGE_REPO: github.com/zkonduit/ezkl-swift-package.git
|
||||
RELEASE_TAG: ${{ github.ref_name }}
|
||||
|
||||
steps:
|
||||
- name: Checkout EZKL
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 #v4.2.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
- name: Extract TAG from github.ref_name
|
||||
run: |
|
||||
# github.ref_name is provided by GitHub Actions and contains the tag name directly.
|
||||
TAG="${RELEASE_TAG}"
|
||||
echo "Original TAG: $TAG"
|
||||
# Remove leading 'v' if present to match the Swift Package Manager version format.
|
||||
NEW_TAG=${TAG#v}
|
||||
echo "Stripped TAG: $NEW_TAG"
|
||||
echo "TAG=$NEW_TAG" >> $GITHUB_ENV
|
||||
|
||||
- name: Install Rust (nightly)
|
||||
uses: actions-rs/toolchain@b2417cde72dcf67f306c0ae8e0828a81bf0b189f #v1.0.6
|
||||
with:
|
||||
toolchain: nightly
|
||||
override: true
|
||||
|
||||
- name: Build EzklCoreBindings
|
||||
run: CONFIGURATION=release cargo run --bin ios_gen_bindings --features "ios-bindings uuid camino uniffi_bindgen" --no-default-features
|
||||
|
||||
- name: Clone ezkl-swift-package repository
|
||||
run: |
|
||||
git clone https://${{ env.EZKL_SWIFT_PACKAGE_REPO }}
|
||||
|
||||
- name: Copy EzklCoreBindings
|
||||
run: |
|
||||
rm -rf ezkl-swift-package/Sources/EzklCoreBindings
|
||||
cp -r build/EzklCoreBindings ezkl-swift-package/Sources/
|
||||
|
||||
- name: Copy Test Files
|
||||
run: |
|
||||
rm -rf ezkl-swift-package/Tests/EzklAssets/
|
||||
mkdir -p ezkl-swift-package/Tests/EzklAssets/
|
||||
cp tests/assets/kzg ezkl-swift-package/Tests/EzklAssets/kzg.srs
|
||||
cp tests/assets/input.json ezkl-swift-package/Tests/EzklAssets/input.json
|
||||
cp tests/assets/model.compiled ezkl-swift-package/Tests/EzklAssets/network.ezkl
|
||||
cp tests/assets/settings.json ezkl-swift-package/Tests/EzklAssets/settings.json
|
||||
|
||||
- name: Check for changes
|
||||
id: check_changes
|
||||
run: |
|
||||
cd ezkl-swift-package
|
||||
if git diff --quiet Sources/EzklCoreBindings Tests/EzklAssets; then
|
||||
echo "no_changes=true" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "no_changes=false" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
|
||||
- name: Set up Xcode environment
|
||||
if: steps.check_changes.outputs.no_changes == 'false'
|
||||
run: |
|
||||
sudo xcode-select -s /Applications/Xcode.app/Contents/Developer
|
||||
sudo xcodebuild -license accept
|
||||
|
||||
- name: Run Package Tests
|
||||
if: steps.check_changes.outputs.no_changes == 'false'
|
||||
run: |
|
||||
cd ezkl-swift-package
|
||||
xcodebuild test \
|
||||
-scheme EzklPackage \
|
||||
-destination 'platform=iOS Simulator,name=iPhone 15 Pro,OS=17.5' \
|
||||
-resultBundlePath ../testResults
|
||||
|
||||
- name: Run Example App Tests
|
||||
if: steps.check_changes.outputs.no_changes == 'false'
|
||||
run: |
|
||||
cd ezkl-swift-package/Example
|
||||
xcodebuild test \
|
||||
-project Example.xcodeproj \
|
||||
-scheme EzklApp \
|
||||
-destination 'platform=iOS Simulator,name=iPhone 15 Pro,OS=17.5' \
|
||||
-parallel-testing-enabled NO \
|
||||
-resultBundlePath ../../exampleTestResults \
|
||||
-skip-testing:EzklAppUITests/EzklAppUITests/testButtonClicksInOrder
|
||||
|
||||
- name: Setup Git
|
||||
run: |
|
||||
cd ezkl-swift-package
|
||||
git config user.name "GitHub Action"
|
||||
git config user.email "action@github.com"
|
||||
git remote set-url origin https://zkonduit:${EZKL_SWIFT_PACKAGE_REPO_TOKEN}@${{ env.EZKL_SWIFT_PACKAGE_REPO }}
|
||||
env:
|
||||
EZKL_SWIFT_PACKAGE_REPO_TOKEN: ${{ secrets.EZKL_PORTER_TOKEN }}
|
||||
|
||||
- name: Commit and Push Changes
|
||||
if: steps.check_changes.outputs.no_changes == 'false'
|
||||
run: |
|
||||
cd ezkl-swift-package
|
||||
git add Sources/EzklCoreBindings Tests/EzklAssets
|
||||
git commit -m "Automatically updated EzklCoreBindings for EZKL"
|
||||
if ! git push origin; then
|
||||
echo "::error::Failed to push changes to ${{ env.EZKL_SWIFT_PACKAGE_REPO }}. Please ensure that EZKL_PORTER_TOKEN has the correct permissions."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
- name: Tag the latest commit
|
||||
run: |
|
||||
cd ezkl-swift-package
|
||||
source $GITHUB_ENV
|
||||
# Tag the latest commit on the current branch
|
||||
if git rev-parse "$TAG" >/dev/null 2>&1; then
|
||||
echo "Tag $TAG already exists locally. Skipping tag creation."
|
||||
else
|
||||
git tag "$TAG"
|
||||
fi
|
||||
|
||||
if ! git push origin "$TAG"; then
|
||||
echo "::error::Failed to push tag '$TAG' to ${{ env.EZKL_SWIFT_PACKAGE_REPO }}. Please ensure EZKL_PORTER_TOKEN has correct permissions."
|
||||
exit 1
|
||||
fi
|
||||
8
.github/workflows/tagging.yml
vendored
8
.github/workflows/tagging.yml
vendored
@@ -11,12 +11,10 @@ jobs:
|
||||
contents: write
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 #v4.2.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: actions/checkout@v4
|
||||
- name: Bump version and push tag
|
||||
id: tag_version
|
||||
uses: mathieudutour/github-tag-action@a22cf08638b34d5badda920f9daf6e72c477b07b #v6.2
|
||||
uses: mathieudutour/github-tag-action@v6.2
|
||||
with:
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
@@ -46,7 +44,7 @@ jobs:
|
||||
git tag $RELEASE_TAG
|
||||
|
||||
- name: Push changes
|
||||
uses: ad-m/github-push-action@77c5b412c50b723d2a4fbc6d71fb5723bcd439aa #master
|
||||
uses: ad-m/github-push-action@master
|
||||
env:
|
||||
RELEASE_TAG: ${{ steps.tag_version.outputs.new_tag }}
|
||||
with:
|
||||
|
||||
65
.github/workflows/verify.yml
vendored
Normal file
65
.github/workflows/verify.yml
vendored
Normal file
@@ -0,0 +1,65 @@
|
||||
name: Build and Publish EZKL npm packages (wasm bindings and in-browser evm verifier)
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
tag:
|
||||
description: "The tag to release"
|
||||
required: true
|
||||
push:
|
||||
tags:
|
||||
- "*"
|
||||
|
||||
defaults:
|
||||
run:
|
||||
working-directory: .
|
||||
jobs:
|
||||
in-browser-evm-ver-publish:
|
||||
name: publish-in-browser-evm-verifier-package
|
||||
runs-on: ubuntu-latest
|
||||
if: startsWith(github.ref, 'refs/tags/')
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Update version in package.json
|
||||
shell: bash
|
||||
env:
|
||||
RELEASE_TAG: ${{ github.ref_name }}
|
||||
run: |
|
||||
sed -i "s|\"version\": \".*\"|\"version\": \"${{ github.ref_name }}\"|" in-browser-evm-verifier/package.json
|
||||
- name: Update @ezkljs/engine version in package.json
|
||||
shell: bash
|
||||
env:
|
||||
RELEASE_TAG: ${{ github.ref_name }}
|
||||
run: |
|
||||
sed -i "s|\"@ezkljs/engine\": \".*\"|\"@ezkljs/engine\": \"${{ github.ref_name#v }}\"|" in-browser-evm-verifier/package.json
|
||||
- name: Update the engine import in in-browser-evm-verifier to use @ezkljs/engine package instead of the local one;
|
||||
run: |
|
||||
sed -i "s|import { encodeVerifierCalldata } from '../nodejs/ezkl';|import { encodeVerifierCalldata } from '@ezkljs/engine';|" in-browser-evm-verifier/src/index.ts
|
||||
- name: Fetch integrity
|
||||
run: |
|
||||
ENGINE_INTEGRITY=$(npm view @ezkljs/engine@${{ github.ref_name#v }} dist.integrity)
|
||||
echo "ENGINE_INTEGRITY=$ENGINE_INTEGRITY" >> $GITHUB_ENV
|
||||
- name: Update pnpm-lock.yaml versions and integrity
|
||||
run: |
|
||||
awk -v integrity="$ENGINE_INTEGRITY" -v tag="${{ github.ref_name#v }}" '
|
||||
NR==30{$0=" specifier: \"" tag "\""}
|
||||
NR==31{$0=" version: \"" tag "\""}
|
||||
NR==400{$0=" /@ezkljs/engine@" tag ":"}
|
||||
NR==401{$0=" resolution: {integrity: \"" integrity "\"}"} 1' in-browser-evm-verifier/pnpm-lock.yaml > temp.yaml && mv temp.yaml in-browser-evm-verifier/pnpm-lock.yaml
|
||||
- name: Use pnpm 8
|
||||
uses: pnpm/action-setup@v2
|
||||
with:
|
||||
version: 8
|
||||
- name: Set up Node.js
|
||||
uses: actions/setup-node@v3
|
||||
with:
|
||||
node-version: "18.12.1"
|
||||
registry-url: "https://registry.npmjs.org"
|
||||
- name: Publish to npm
|
||||
run: |
|
||||
cd in-browser-evm-verifier
|
||||
pnpm install --frozen-lockfile
|
||||
pnpm run build
|
||||
pnpm publish
|
||||
env:
|
||||
NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }}
|
||||
12
.gitignore
vendored
12
.gitignore
vendored
@@ -9,7 +9,6 @@ pkg
|
||||
!AttestData.sol
|
||||
!VerifierBase.sol
|
||||
!LoadInstances.sol
|
||||
!AttestData.t.sol
|
||||
*.pf
|
||||
*.vk
|
||||
*.pk
|
||||
@@ -28,6 +27,7 @@ __pycache__/
|
||||
*.pyc
|
||||
*.pyo
|
||||
*.py[cod]
|
||||
bin/
|
||||
build/
|
||||
develop-eggs/
|
||||
dist/
|
||||
@@ -46,11 +46,7 @@ var/
|
||||
node_modules
|
||||
/dist
|
||||
timingData.json
|
||||
!tests/assets/pk.key
|
||||
!tests/assets/vk.key
|
||||
!tests/wasm/pk.key
|
||||
!tests/wasm/vk.key
|
||||
docs/python/build
|
||||
!tests/assets/vk_aggr.key
|
||||
cache
|
||||
out
|
||||
!tests/assets/wasm.code
|
||||
!tests/assets/wasm.sol
|
||||
!tests/wasm/vk_aggr.key
|
||||
3861
Cargo.lock
generated
3861
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
240
Cargo.toml
240
Cargo.toml
@@ -4,7 +4,6 @@ cargo-features = ["profile-rustflags"]
|
||||
name = "ezkl"
|
||||
version = "0.0.0"
|
||||
edition = "2021"
|
||||
default-run = "ezkl"
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
@@ -12,109 +11,89 @@ default-run = "ezkl"
|
||||
# Name to be imported within python
|
||||
# Example: import ezkl
|
||||
name = "ezkl"
|
||||
crate-type = ["cdylib", "rlib", "staticlib"]
|
||||
crate-type = ["cdylib", "rlib"]
|
||||
|
||||
|
||||
[dependencies]
|
||||
halo2_gadgets = { git = "https://github.com/zkonduit/halo2" }
|
||||
halo2curves = { git = "https://github.com/privacy-scaling-explorations/halo2curves", rev = "b753a832e92d5c86c5c997327a9cf9de86a18851", features = [
|
||||
halo2_gadgets = { git = "https://github.com/zkonduit/halo2", branch = "ac/optional-selector-poly" }
|
||||
halo2_proofs = { git = "https://github.com/zkonduit/halo2", branch = "ac/optional-selector-poly" }
|
||||
halo2curves = { git = "https://github.com/privacy-scaling-explorations/halo2curves", rev = "9fff22c", features = [
|
||||
"derive_serde",
|
||||
] }
|
||||
halo2_proofs = { git = "https://github.com/zkonduit/halo2", package = "halo2_proofs", features = [
|
||||
"circuit-params",
|
||||
] }
|
||||
rand = { version = "0.8", default-features = false }
|
||||
itertools = { version = "0.10.3", default-features = false }
|
||||
clap = { version = "4.5.3", features = ["derive"], optional = true }
|
||||
serde = { version = "1.0.126", features = ["derive"] }
|
||||
clap_complete = { version = "4.5.2", optional = true }
|
||||
log = { version = "0.4.17", default-features = false }
|
||||
thiserror = { version = "1.0.38", default-features = false }
|
||||
hex = { version = "0.4.3", default-features = false }
|
||||
rand = { version = "0.8", default_features = false }
|
||||
itertools = { version = "0.10.3", default_features = false }
|
||||
clap = { version = "4.5.3", features = ["derive"] }
|
||||
serde = { version = "1.0.126", features = ["derive"], optional = true }
|
||||
serde_json = { version = "1.0.97", default_features = false, features = [
|
||||
"float_roundtrip",
|
||||
"raw_value",
|
||||
], optional = true }
|
||||
log = { version = "0.4.17", default_features = false, optional = true }
|
||||
thiserror = { version = "1.0.38", default_features = false }
|
||||
hex = { version = "0.4.3", default_features = false }
|
||||
halo2_wrong_ecc = { git = "https://github.com/zkonduit/halo2wrong", branch = "ac/chunked-mv-lookup", package = "ecc" }
|
||||
snark-verifier = { git = "https://github.com/zkonduit/snark-verifier", branch = "ac/chunked-mv-lookup", features = [
|
||||
"derive_serde",
|
||||
] }
|
||||
halo2_solidity_verifier = { git = "https://github.com/zkonduit/ezkl-verifier", branch = "main", optional = true, features = [
|
||||
"evm",
|
||||
] }
|
||||
maybe-rayon = { version = "0.1.1", default-features = false }
|
||||
bincode = { version = "1.3.3", default-features = false }
|
||||
halo2_solidity_verifier = { git = "https://github.com/alexander-camuto/halo2-solidity-verifier", branch = "main" }
|
||||
maybe-rayon = { version = "0.1.1", default_features = false }
|
||||
bincode = { version = "1.3.3", default_features = false }
|
||||
ark-std = { version = "^0.3.0", default-features = false }
|
||||
unzip-n = "0.1.2"
|
||||
num = "0.4.1"
|
||||
tosubcommand = { git = "https://github.com/zkonduit/enum_to_subcommand", package = "tosubcommand", optional = true }
|
||||
semver = { version = "1.0.22", optional = true }
|
||||
|
||||
|
||||
[target.'cfg(not(target_arch = "wasm32"))'.dependencies]
|
||||
portable-atomic = "1.6.0"
|
||||
tosubcommand = { git = "https://github.com/zkonduit/enum_to_subcommand", package = "tosubcommand" }
|
||||
metal = { git = "https://github.com/gfx-rs/metal-rs", optional = true }
|
||||
|
||||
# evm related deps
|
||||
serde_json = { version = "1.0.97", features = ["float_roundtrip", "raw_value"] }
|
||||
|
||||
alloy = { git = "https://github.com/alloy-rs/alloy", version = "0.1.0", rev = "5fbf57bac99edef9d8475190109a7ea9fb7e5e83", features = [
|
||||
"provider-http",
|
||||
"signers",
|
||||
"contract",
|
||||
"rpc-types-eth",
|
||||
"signer-wallet",
|
||||
"node-bindings",
|
||||
|
||||
], optional = true }
|
||||
|
||||
foundry-compilers = { version = "0.4.1", features = [
|
||||
"svm-solc",
|
||||
], optional = true }
|
||||
ethabi = { version = "18", optional = true }
|
||||
indicatif = { version = "0.17.5", features = ["rayon"], optional = true }
|
||||
gag = { version = "1.0.0", default-features = false, optional = true }
|
||||
[target.'cfg(not(target_arch = "wasm32"))'.dependencies]
|
||||
alloy = { git = "https://github.com/alloy-rs/alloy", version = "0.1.0", features = ["provider-http", "signers", "contract", "rpc-types-eth", "signer-wallet", "node-bindings"] }
|
||||
foundry-compilers = {version = "0.4.1", features = ["svm-solc"]}
|
||||
ethabi = "18"
|
||||
indicatif = { version = "0.17.5", features = ["rayon"] }
|
||||
gag = { version = "1.0.0", default_features = false }
|
||||
instant = { version = "0.1" }
|
||||
reqwest = { version = "0.12.4", default-features = false, features = [
|
||||
"default-tls",
|
||||
"multipart",
|
||||
"stream",
|
||||
], optional = true }
|
||||
openssl = { version = "0.10.55", features = ["vendored"], optional = true }
|
||||
lazy_static = { version = "1.4.0", optional = true }
|
||||
colored_json = { version = "3.0.1", default-features = false, optional = true }
|
||||
tokio = { version = "1.35.0", default-features = false, features = [
|
||||
] }
|
||||
openssl = { version = "0.10.55", features = ["vendored"] }
|
||||
tokio-postgres = "0.7.10"
|
||||
pg_bigdecimal = "0.1.5"
|
||||
futures-util = "0.3.30"
|
||||
lazy_static = "1.4.0"
|
||||
colored_json = { version = "3.0.1", default_features = false, optional = true }
|
||||
plotters = { version = "0.3.0", default_features = false, optional = true }
|
||||
regex = { version = "1", default_features = false }
|
||||
tokio = { version = "1.35", default_features = false, features = [
|
||||
"macros",
|
||||
"rt-multi-thread",
|
||||
], optional = true }
|
||||
pyo3 = { version = "0.24.2", features = [
|
||||
"rt-multi-thread"
|
||||
] }
|
||||
tokio-util = { version = "0.7.9", features = ["codec"] }
|
||||
pyo3 = { version = "0.21.2", features = [
|
||||
"extension-module",
|
||||
"abi3-py37",
|
||||
"macros",
|
||||
], default-features = false, optional = true }
|
||||
pyo3-async-runtimes = { git = "https://github.com/PyO3/pyo3-async-runtimes", version = "0.24.0", features = [
|
||||
], default_features = false, optional = true }
|
||||
pyo3-asyncio = { git = "https://github.com/jopemachine/pyo3-asyncio/", branch="migration-pyo3-0.21", features = [
|
||||
"attributes",
|
||||
"tokio-runtime",
|
||||
], default-features = false, optional = true }
|
||||
pyo3-log = { version = "0.12.0", default-features = false, optional = true }
|
||||
tract-onnx = { git = "https://github.com/sonos/tract/", rev = "37132e0397d0a73e5bd3a8615d932dabe44f6736", default-features = false, optional = true }
|
||||
], default_features = false, optional = true }
|
||||
pyo3-log = { version = "0.10.0", default_features = false, optional = true }
|
||||
tract-onnx = { git = "https://github.com/sonos/tract/", rev = "05ebf550aa9922b221af4635c21a67a8d2af12a9", default_features = false, optional = true }
|
||||
tabled = { version = "0.12.0", optional = true }
|
||||
|
||||
objc = { version = "0.2.4", optional = true }
|
||||
pyo3-stub-gen = { version = "0.6.0", optional = true }
|
||||
jemallocator = { version = "0.5", optional = true }
|
||||
mimalloc = { version = "0.1", optional = true }
|
||||
# universal bindings
|
||||
uniffi = { version = "=0.28.0", optional = true }
|
||||
getrandom = { version = "0.2.8", optional = true }
|
||||
uniffi_bindgen = { version = "=0.28.0", optional = true }
|
||||
camino = { version = "^1.1", optional = true }
|
||||
uuid = { version = "1.10.0", features = ["v4"], optional = true }
|
||||
|
||||
|
||||
[target.'cfg(not(all(target_arch = "wasm32", target_os = "unknown")))'.dependencies]
|
||||
colored = { version = "2.0.0", default-features = false, optional = true }
|
||||
env_logger = { version = "0.10.0", default-features = false, optional = true }
|
||||
chrono = { version = "0.4.31", optional = true }
|
||||
sha256 = { version = "1.4.0", optional = true }
|
||||
|
||||
colored = { version = "2.0.0", default_features = false, optional = true }
|
||||
env_logger = { version = "0.10.0", default_features = false, optional = true }
|
||||
chrono = "0.4.31"
|
||||
sha256 = "1.4.0"
|
||||
|
||||
[target.'cfg(target_arch = "wasm32")'.dependencies]
|
||||
serde_json = { version = "1.0.97", default-features = false, features = [
|
||||
"float_roundtrip",
|
||||
"raw_value",
|
||||
] }
|
||||
getrandom = { version = "0.2.8", features = ["js"] }
|
||||
instant = { version = "0.1", features = ["wasm-bindgen", "inaccurate"] }
|
||||
|
||||
@@ -127,14 +106,8 @@ console_error_panic_hook = "0.1.7"
|
||||
wasm-bindgen-console-logger = "0.1.1"
|
||||
|
||||
|
||||
[target.'cfg(not(all(target_arch = "wasm32", target_os = "unknown")))'.dev-dependencies]
|
||||
criterion = { version = "0.5.1", features = ["html_reports"] }
|
||||
|
||||
|
||||
[build-dependencies]
|
||||
uniffi = { version = "0.28", features = ["build"], optional = true }
|
||||
|
||||
[dev-dependencies]
|
||||
criterion = { version = "0.3", features = ["html_reports"] }
|
||||
tempfile = "3.3.0"
|
||||
lazy_static = "1.4.0"
|
||||
mnist = "0.5"
|
||||
@@ -147,10 +120,6 @@ shellexpand = "3.1.0"
|
||||
runner = 'wasm-bindgen-test-runner'
|
||||
|
||||
|
||||
[[bench]]
|
||||
name = "zero_finder"
|
||||
harness = false
|
||||
|
||||
[[bench]]
|
||||
name = "accum_dot"
|
||||
harness = false
|
||||
@@ -189,20 +158,16 @@ harness = false
|
||||
|
||||
|
||||
[[bench]]
|
||||
name = "sigmoid"
|
||||
name = "relu"
|
||||
harness = false
|
||||
|
||||
[[bench]]
|
||||
name = "relu_lookupless"
|
||||
harness = false
|
||||
|
||||
[[bench]]
|
||||
name = "accum_matmul_sigmoid"
|
||||
name = "accum_matmul_relu"
|
||||
harness = false
|
||||
|
||||
|
||||
[[bench]]
|
||||
name = "accum_matmul_sigmoid_overflow"
|
||||
name = "accum_matmul_relu_overflow"
|
||||
harness = false
|
||||
|
||||
[[bin]]
|
||||
@@ -211,93 +176,40 @@ test = false
|
||||
bench = false
|
||||
required-features = ["ezkl"]
|
||||
|
||||
[[bin]]
|
||||
name = "ios_gen_bindings"
|
||||
required-features = ["ios-bindings", "uuid", "camino", "uniffi_bindgen"]
|
||||
|
||||
[[bin]]
|
||||
name = "py_stub_gen"
|
||||
required-features = ["python-bindings"]
|
||||
|
||||
[features]
|
||||
web = ["wasm-bindgen-rayon"]
|
||||
default = [
|
||||
"eth-mv-lookup",
|
||||
"ezkl",
|
||||
"precompute-coset",
|
||||
"no-banner",
|
||||
"parallel-poly-read",
|
||||
]
|
||||
default = ["ezkl", "mv-lookup", "no-banner"]
|
||||
onnx = ["dep:tract-onnx"]
|
||||
python-bindings = ["pyo3", "pyo3-log", "pyo3-async-runtimes", "pyo3-stub-gen"]
|
||||
universal-bindings = [
|
||||
"uniffi",
|
||||
"mv-lookup",
|
||||
"precompute-coset",
|
||||
"parallel-poly-read",
|
||||
"solidity-verifier-mv-lookup",
|
||||
]
|
||||
logging = ["dep:colored", "dep:env_logger", "dep:chrono"]
|
||||
ios-bindings = ["universal-bindings"]
|
||||
ios-bindings-test = ["ios-bindings", "uniffi/bindgen-tests"]
|
||||
python-bindings = ["pyo3", "pyo3-log", "pyo3-asyncio"]
|
||||
ezkl = [
|
||||
"onnx",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"log",
|
||||
"colored",
|
||||
"env_logger",
|
||||
"tabled/color",
|
||||
"serde_json/std",
|
||||
"colored_json",
|
||||
"dep:ethabi",
|
||||
"dep:indicatif",
|
||||
"dep:gag",
|
||||
"dep:reqwest",
|
||||
"dep:lazy_static",
|
||||
"dep:tokio",
|
||||
"dep:openssl",
|
||||
"dep:chrono",
|
||||
"dep:sha256",
|
||||
"dep:clap_complete",
|
||||
"dep:semver",
|
||||
"dep:clap",
|
||||
"dep:tosubcommand",
|
||||
"logging",
|
||||
]
|
||||
eth = ["dep:alloy", "dep:foundry-compilers", "dep:ethabi"]
|
||||
solidity-verifier = ["dep:halo2_solidity_verifier"]
|
||||
solidity-verifier-mv-lookup = ["halo2_solidity_verifier/mv-lookup"]
|
||||
eth-mv-lookup = ["solidity-verifier-mv-lookup", "mv-lookup", "eth"]
|
||||
eth-original-lookup = ["eth", "solidity-verifier"]
|
||||
parallel-poly-read = [
|
||||
"halo2_proofs/circuit-params",
|
||||
"halo2_proofs/parallel-poly-read",
|
||||
]
|
||||
mv-lookup = ["halo2_proofs/mv-lookup", "snark-verifier/mv-lookup"]
|
||||
asm = ["halo2curves/asm", "halo2_proofs/asm"]
|
||||
precompute-coset = ["halo2_proofs/precompute-coset"]
|
||||
mv-lookup = [
|
||||
"halo2_proofs/mv-lookup",
|
||||
"snark-verifier/mv-lookup",
|
||||
"halo2_solidity_verifier/mv-lookup",
|
||||
]
|
||||
det-prove = []
|
||||
icicle = ["halo2_proofs/icicle_gpu"]
|
||||
empty-cmd = []
|
||||
no-banner = []
|
||||
no-update = []
|
||||
macos-metal = ["halo2_proofs/macos"]
|
||||
ios-metal = ["halo2_proofs/ios"]
|
||||
jemalloc = ["dep:jemallocator"]
|
||||
mimalloc = ["dep:mimalloc"]
|
||||
reusable-verifier = []
|
||||
metal = ["dep:metal", "dep:objc"]
|
||||
|
||||
# icicle patch to 0.1.0 if feature icicle is enabled
|
||||
[patch.'https://github.com/ingonyama-zk/icicle']
|
||||
icicle = { git = "https://github.com/ingonyama-zk/icicle?rev=45b00fb", package = "icicle", branch = "fix/vhnat/ezkl-build-fix" }
|
||||
|
||||
[patch.'https://github.com/zkonduit/halo2']
|
||||
halo2_proofs = { git = "https://github.com/zkonduit/halo2?branch=ac/optional-selector-poly#54f54453cf186aa5d89579c4e7663f9a27cfb89a", package = "halo2_proofs", branch = "ac/optional-selector-poly" }
|
||||
|
||||
[patch.crates-io]
|
||||
uniffi_testing = { git = "https://github.com/ElusAegis/uniffi-rs", branch = "feat/testing-feature-build-fix" }
|
||||
|
||||
[profile.release]
|
||||
# debug = true
|
||||
rustflags = ["-C", "relocation-model=pic"]
|
||||
lto = "fat"
|
||||
codegen-units = 1
|
||||
# panic = "abort"
|
||||
|
||||
|
||||
[profile.test-runs]
|
||||
inherits = "dev"
|
||||
opt-level = 3
|
||||
|
||||
[package.metadata.wasm-pack.profile.release]
|
||||
wasm-opt = ["-O4", "--flexible-inline-max-function-size", "4294967295"]
|
||||
|
||||
31
README.md
31
README.md
@@ -43,7 +43,7 @@ The generated proofs can then be verified with much less computational resources
|
||||
|
||||
----------------------
|
||||
|
||||
### Getting Started ⚙️
|
||||
### getting started ⚙️
|
||||
|
||||
The easiest way to get started is to try out a notebook.
|
||||
|
||||
@@ -76,7 +76,12 @@ For more details visit the [docs](https://docs.ezkl.xyz). The CLI is faster than
|
||||
|
||||
Build the auto-generated rust documentation and open the docs in your browser locally. `cargo doc --open`
|
||||
|
||||
### Building the Project 🔨
|
||||
#### In-browser EVM verifier
|
||||
|
||||
As an alternative to running the native Halo2 verifier as a WASM binding in the browser, you can use the in-browser EVM verifier. The source code of which you can find in the `in-browser-evm-verifier` directory and a README with instructions on how to use it.
|
||||
|
||||
|
||||
### building the project 🔨
|
||||
|
||||
#### Rust CLI
|
||||
|
||||
@@ -91,7 +96,7 @@ cargo install --locked --path .
|
||||
|
||||
|
||||
|
||||
#### Building Python Bindings
|
||||
#### building python bindings
|
||||
Python bindings exists and can be built using `maturin`. You will need `rust` and `cargo` to be installed.
|
||||
|
||||
```bash
|
||||
@@ -121,7 +126,7 @@ unset ENABLE_ICICLE_GPU
|
||||
|
||||
**NOTE:** Even with the above environment variable set, icicle is disabled for circuits where k <= 8. To change the value of `k` where icicle is enabled, you can set the environment variable `ICICLE_SMALL_K`.
|
||||
|
||||
### Contributing 🌎
|
||||
### contributing 🌎
|
||||
|
||||
If you're interested in contributing and are unsure where to start, reach out to one of the maintainers:
|
||||
|
||||
@@ -139,21 +144,13 @@ More broadly:
|
||||
|
||||
Any contribution intentionally submitted for inclusion in the work by you shall be licensed to Zkonduit Inc. under the terms and conditions specified in the [CLA](https://github.com/zkonduit/ezkl/blob/main/cla.md), which you agree to by intentionally submitting a contribution. In particular, you have the right to submit the contribution and we can distribute it, among other terms and conditions.
|
||||
|
||||
### no security guarantees
|
||||
|
||||
### Audits & Security
|
||||
Ezkl is unaudited, beta software undergoing rapid development. There may be bugs. No guarantees of security are made and it should not be relied on in production.
|
||||
|
||||
[v21.0.0](https://github.com/zkonduit/ezkl/releases/tag/v21.0.0) has been audited by Trail of Bits, the report can be found [here](https://github.com/trailofbits/publications/blob/master/reviews/2025-03-zkonduit-ezkl-securityreview.pdf).
|
||||
> NOTE: Because operations are quantized when they are converted from an onnx file to a zk-circuit, outputs in python and ezkl may differ slightly.
|
||||
|
||||
> NOTE: Because operations are quantized when they are converted from an onnx file to a zk-circuit, outputs in python and ezkl may differ slightly.
|
||||
### no warranty
|
||||
|
||||
|
||||
Check out `docs/advanced_security` for more advanced information on potential threat vectors that are specific to zero-knowledge inference, quantization, and to machine learning models generally.
|
||||
|
||||
|
||||
### No Warranty
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
Copyright (c) 2024 Zkonduit Inc. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
|
||||
Copyright (c) 2025 Zkonduit Inc.
|
||||
|
||||
|
||||
167
abis/DataAttestation.json
Normal file
167
abis/DataAttestation.json
Normal file
@@ -0,0 +1,167 @@
|
||||
[
|
||||
{
|
||||
"inputs": [
|
||||
{
|
||||
"internalType": "address[]",
|
||||
"name": "_contractAddresses",
|
||||
"type": "address[]"
|
||||
},
|
||||
{
|
||||
"internalType": "bytes[][]",
|
||||
"name": "_callData",
|
||||
"type": "bytes[][]"
|
||||
},
|
||||
{
|
||||
"internalType": "uint256[][]",
|
||||
"name": "_decimals",
|
||||
"type": "uint256[][]"
|
||||
},
|
||||
{
|
||||
"internalType": "uint256[]",
|
||||
"name": "_scales",
|
||||
"type": "uint256[]"
|
||||
},
|
||||
{
|
||||
"internalType": "uint8",
|
||||
"name": "_instanceOffset",
|
||||
"type": "uint8"
|
||||
},
|
||||
{
|
||||
"internalType": "address",
|
||||
"name": "_admin",
|
||||
"type": "address"
|
||||
}
|
||||
],
|
||||
"stateMutability": "nonpayable",
|
||||
"type": "constructor"
|
||||
},
|
||||
{
|
||||
"inputs": [
|
||||
{
|
||||
"internalType": "uint256",
|
||||
"name": "",
|
||||
"type": "uint256"
|
||||
}
|
||||
],
|
||||
"name": "accountCalls",
|
||||
"outputs": [
|
||||
{
|
||||
"internalType": "address",
|
||||
"name": "contractAddress",
|
||||
"type": "address"
|
||||
},
|
||||
{
|
||||
"internalType": "uint256",
|
||||
"name": "callCount",
|
||||
"type": "uint256"
|
||||
}
|
||||
],
|
||||
"stateMutability": "view",
|
||||
"type": "function"
|
||||
},
|
||||
{
|
||||
"inputs": [],
|
||||
"name": "admin",
|
||||
"outputs": [
|
||||
{
|
||||
"internalType": "address",
|
||||
"name": "",
|
||||
"type": "address"
|
||||
}
|
||||
],
|
||||
"stateMutability": "view",
|
||||
"type": "function"
|
||||
},
|
||||
{
|
||||
"inputs": [],
|
||||
"name": "instanceOffset",
|
||||
"outputs": [
|
||||
{
|
||||
"internalType": "uint8",
|
||||
"name": "",
|
||||
"type": "uint8"
|
||||
}
|
||||
],
|
||||
"stateMutability": "view",
|
||||
"type": "function"
|
||||
},
|
||||
{
|
||||
"inputs": [
|
||||
{
|
||||
"internalType": "uint256",
|
||||
"name": "",
|
||||
"type": "uint256"
|
||||
}
|
||||
],
|
||||
"name": "scales",
|
||||
"outputs": [
|
||||
{
|
||||
"internalType": "uint256",
|
||||
"name": "",
|
||||
"type": "uint256"
|
||||
}
|
||||
],
|
||||
"stateMutability": "view",
|
||||
"type": "function"
|
||||
},
|
||||
{
|
||||
"inputs": [
|
||||
{
|
||||
"internalType": "address[]",
|
||||
"name": "_contractAddresses",
|
||||
"type": "address[]"
|
||||
},
|
||||
{
|
||||
"internalType": "bytes[][]",
|
||||
"name": "_callData",
|
||||
"type": "bytes[][]"
|
||||
},
|
||||
{
|
||||
"internalType": "uint256[][]",
|
||||
"name": "_decimals",
|
||||
"type": "uint256[][]"
|
||||
}
|
||||
],
|
||||
"name": "updateAccountCalls",
|
||||
"outputs": [],
|
||||
"stateMutability": "nonpayable",
|
||||
"type": "function"
|
||||
},
|
||||
{
|
||||
"inputs": [
|
||||
{
|
||||
"internalType": "address",
|
||||
"name": "_admin",
|
||||
"type": "address"
|
||||
}
|
||||
],
|
||||
"name": "updateAdmin",
|
||||
"outputs": [],
|
||||
"stateMutability": "nonpayable",
|
||||
"type": "function"
|
||||
},
|
||||
{
|
||||
"inputs": [
|
||||
{
|
||||
"internalType": "address",
|
||||
"name": "verifier",
|
||||
"type": "address"
|
||||
},
|
||||
{
|
||||
"internalType": "bytes",
|
||||
"name": "encoded",
|
||||
"type": "bytes"
|
||||
}
|
||||
],
|
||||
"name": "verifyWithDataAttestation",
|
||||
"outputs": [
|
||||
{
|
||||
"internalType": "bool",
|
||||
"name": "",
|
||||
"type": "bool"
|
||||
}
|
||||
],
|
||||
"stateMutability": "view",
|
||||
"type": "function"
|
||||
}
|
||||
]
|
||||
50
abis/QuantizeData.json
Normal file
50
abis/QuantizeData.json
Normal file
@@ -0,0 +1,50 @@
|
||||
[
|
||||
{
|
||||
"inputs": [
|
||||
{
|
||||
"internalType": "bytes[]",
|
||||
"name": "data",
|
||||
"type": "bytes[]"
|
||||
},
|
||||
{
|
||||
"internalType": "uint256[]",
|
||||
"name": "decimals",
|
||||
"type": "uint256[]"
|
||||
},
|
||||
{
|
||||
"internalType": "uint256[]",
|
||||
"name": "scales",
|
||||
"type": "uint256[]"
|
||||
}
|
||||
],
|
||||
"name": "quantize_data",
|
||||
"outputs": [
|
||||
{
|
||||
"internalType": "int64[]",
|
||||
"name": "quantized_data",
|
||||
"type": "int64[]"
|
||||
}
|
||||
],
|
||||
"stateMutability": "pure",
|
||||
"type": "function"
|
||||
},
|
||||
{
|
||||
"inputs": [
|
||||
{
|
||||
"internalType": "int64[]",
|
||||
"name": "quantized_data",
|
||||
"type": "int64[]"
|
||||
}
|
||||
],
|
||||
"name": "to_field_element",
|
||||
"outputs": [
|
||||
{
|
||||
"internalType": "uint256[]",
|
||||
"name": "output",
|
||||
"type": "uint256[]"
|
||||
}
|
||||
],
|
||||
"stateMutability": "pure",
|
||||
"type": "function"
|
||||
}
|
||||
]
|
||||
32
abis/TestReads.json
Normal file
32
abis/TestReads.json
Normal file
@@ -0,0 +1,32 @@
|
||||
[
|
||||
{
|
||||
"inputs": [
|
||||
{
|
||||
"internalType": "int256[]",
|
||||
"name": "_numbers",
|
||||
"type": "int256[]"
|
||||
}
|
||||
],
|
||||
"stateMutability": "nonpayable",
|
||||
"type": "constructor"
|
||||
},
|
||||
{
|
||||
"inputs": [
|
||||
{
|
||||
"internalType": "uint256",
|
||||
"name": "",
|
||||
"type": "uint256"
|
||||
}
|
||||
],
|
||||
"name": "arr",
|
||||
"outputs": [
|
||||
{
|
||||
"internalType": "int256",
|
||||
"name": "",
|
||||
"type": "int256"
|
||||
}
|
||||
],
|
||||
"stateMutability": "view",
|
||||
"type": "function"
|
||||
}
|
||||
]
|
||||
@@ -64,17 +64,14 @@ impl Circuit<Fr> for MyCircuit {
|
||||
layouter.assign_region(
|
||||
|| "",
|
||||
|region| {
|
||||
let mut region = region::RegionCtx::new(region, 0, 1, 1024, 2);
|
||||
let mut region = region::RegionCtx::new(region, 0, 1);
|
||||
config
|
||||
.layout(
|
||||
&mut region,
|
||||
&[&self.image, &self.kernel, &self.bias],
|
||||
&[self.image.clone(), self.kernel.clone(), self.bias.clone()],
|
||||
Box::new(PolyOp::Conv {
|
||||
padding: vec![(0, 0)],
|
||||
stride: vec![1; 2],
|
||||
group: 1,
|
||||
data_format: DataFormat::NCHW,
|
||||
kernel_format: KernelFormat::OIHW,
|
||||
}),
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
@@ -15,7 +15,6 @@ use halo2_proofs::{
|
||||
plonk::{Circuit, ConstraintSystem, Error},
|
||||
};
|
||||
use halo2curves::bn256::{Bn256, Fr};
|
||||
use itertools::Itertools;
|
||||
use rand::rngs::OsRng;
|
||||
use snark_verifier::system::halo2::transcript::evm::EvmTranscript;
|
||||
use std::marker::PhantomData;
|
||||
@@ -56,11 +55,11 @@ impl Circuit<Fr> for MyCircuit {
|
||||
layouter.assign_region(
|
||||
|| "",
|
||||
|region| {
|
||||
let mut region = region::RegionCtx::new(region, 0, 1, 1024, 2);
|
||||
let mut region = region::RegionCtx::new(region, 0, 1);
|
||||
config
|
||||
.layout(
|
||||
&mut region,
|
||||
&self.inputs.iter().collect_vec(),
|
||||
&self.inputs,
|
||||
Box::new(PolyOp::Einsum {
|
||||
equation: "i,i->".to_string(),
|
||||
}),
|
||||
|
||||
@@ -15,7 +15,6 @@ use halo2_proofs::{
|
||||
plonk::{Circuit, ConstraintSystem, Error},
|
||||
};
|
||||
use halo2curves::bn256::{Bn256, Fr};
|
||||
use itertools::Itertools;
|
||||
use rand::rngs::OsRng;
|
||||
use snark_verifier::system::halo2::transcript::evm::EvmTranscript;
|
||||
use std::marker::PhantomData;
|
||||
@@ -58,11 +57,11 @@ impl Circuit<Fr> for MyCircuit {
|
||||
layouter.assign_region(
|
||||
|| "",
|
||||
|region| {
|
||||
let mut region = region::RegionCtx::new(region, 0, 1, 1024, 2);
|
||||
let mut region = region::RegionCtx::new(region, 0, 1);
|
||||
config
|
||||
.layout(
|
||||
&mut region,
|
||||
&self.inputs.iter().collect_vec(),
|
||||
&self.inputs,
|
||||
Box::new(PolyOp::Einsum {
|
||||
equation: "ab,bc->ac".to_string(),
|
||||
}),
|
||||
|
||||
@@ -17,7 +17,6 @@ use halo2_proofs::{
|
||||
plonk::{Circuit, ConstraintSystem, Error},
|
||||
};
|
||||
use halo2curves::bn256::{Bn256, Fr};
|
||||
use itertools::Itertools;
|
||||
use snark_verifier::system::halo2::transcript::evm::EvmTranscript;
|
||||
use std::marker::PhantomData;
|
||||
|
||||
@@ -58,15 +57,7 @@ impl Circuit<Fr> for MyCircuit {
|
||||
|
||||
// sets up a new relu table
|
||||
base_config
|
||||
.configure_lookup(
|
||||
cs,
|
||||
&b,
|
||||
&output,
|
||||
&a,
|
||||
BITS,
|
||||
K,
|
||||
&LookupOp::Sigmoid { scale: 1.0.into() },
|
||||
)
|
||||
.configure_lookup(cs, &b, &output, &a, BITS, K, &LookupOp::ReLU)
|
||||
.unwrap();
|
||||
|
||||
MyConfig { base_config }
|
||||
@@ -84,18 +75,14 @@ impl Circuit<Fr> for MyCircuit {
|
||||
let op = PolyOp::Einsum {
|
||||
equation: "ij,jk->ik".to_string(),
|
||||
};
|
||||
let mut region = region::RegionCtx::new(region, 0, 1, 1024, 2);
|
||||
let mut region = region::RegionCtx::new(region, 0, 1);
|
||||
let output = config
|
||||
.base_config
|
||||
.layout(&mut region, &self.inputs.iter().collect_vec(), Box::new(op))
|
||||
.layout(&mut region, &self.inputs, Box::new(op))
|
||||
.unwrap();
|
||||
let _output = config
|
||||
.base_config
|
||||
.layout(
|
||||
&mut region,
|
||||
&[&output.unwrap()],
|
||||
Box::new(LookupOp::Sigmoid { scale: 1.0.into() }),
|
||||
)
|
||||
.layout(&mut region, &[output.unwrap()], Box::new(LookupOp::ReLU))
|
||||
.unwrap();
|
||||
Ok(())
|
||||
},
|
||||
@@ -17,7 +17,6 @@ use halo2_proofs::{
|
||||
plonk::{Circuit, ConstraintSystem, Error},
|
||||
};
|
||||
use halo2curves::bn256::{Bn256, Fr};
|
||||
use itertools::Itertools;
|
||||
use snark_verifier::system::halo2::transcript::evm::EvmTranscript;
|
||||
use std::marker::PhantomData;
|
||||
|
||||
@@ -59,15 +58,7 @@ impl Circuit<Fr> for MyCircuit {
|
||||
|
||||
// sets up a new relu table
|
||||
base_config
|
||||
.configure_lookup(
|
||||
cs,
|
||||
&b,
|
||||
&output,
|
||||
&a,
|
||||
BITS,
|
||||
k,
|
||||
&LookupOp::Sigmoid { scale: 1.0.into() },
|
||||
)
|
||||
.configure_lookup(cs, &b, &output, &a, BITS, k, &LookupOp::ReLU)
|
||||
.unwrap();
|
||||
|
||||
MyConfig { base_config }
|
||||
@@ -85,18 +76,14 @@ impl Circuit<Fr> for MyCircuit {
|
||||
let op = PolyOp::Einsum {
|
||||
equation: "ij,jk->ik".to_string(),
|
||||
};
|
||||
let mut region = region::RegionCtx::new(region, 0, 1, 1024, 2);
|
||||
let mut region = region::RegionCtx::new(region, 0, 1);
|
||||
let output = config
|
||||
.base_config
|
||||
.layout(&mut region, &self.inputs.iter().collect_vec(), Box::new(op))
|
||||
.layout(&mut region, &self.inputs, Box::new(op))
|
||||
.unwrap();
|
||||
let _output = config
|
||||
.base_config
|
||||
.layout(
|
||||
&mut region,
|
||||
&[&output.unwrap()],
|
||||
Box::new(LookupOp::Sigmoid { scale: 1.0.into() }),
|
||||
)
|
||||
.layout(&mut region, &[output.unwrap()], Box::new(LookupOp::ReLU))
|
||||
.unwrap();
|
||||
Ok(())
|
||||
},
|
||||
@@ -15,7 +15,6 @@ use halo2_proofs::{
|
||||
plonk::{Circuit, ConstraintSystem, Error},
|
||||
};
|
||||
use halo2curves::bn256::{Bn256, Fr};
|
||||
use itertools::Itertools;
|
||||
use rand::rngs::OsRng;
|
||||
use snark_verifier::system::halo2::transcript::evm::EvmTranscript;
|
||||
use std::marker::PhantomData;
|
||||
@@ -56,11 +55,11 @@ impl Circuit<Fr> for MyCircuit {
|
||||
layouter.assign_region(
|
||||
|| "",
|
||||
|region| {
|
||||
let mut region = region::RegionCtx::new(region, 0, 1, 1024, 2);
|
||||
let mut region = region::RegionCtx::new(region, 0, 1);
|
||||
config
|
||||
.layout(
|
||||
&mut region,
|
||||
&self.inputs.iter().collect_vec(),
|
||||
&self.inputs,
|
||||
Box::new(PolyOp::Sum { axes: vec![0] }),
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
@@ -59,17 +59,16 @@ impl Circuit<Fr> for MyCircuit {
|
||||
layouter.assign_region(
|
||||
|| "",
|
||||
|region| {
|
||||
let mut region = region::RegionCtx::new(region, 0, 1, 1024, 2);
|
||||
let mut region = region::RegionCtx::new(region, 0, 1);
|
||||
config
|
||||
.layout(
|
||||
&mut region,
|
||||
&[&self.image],
|
||||
&[self.image.clone()],
|
||||
Box::new(HybridOp::SumPool {
|
||||
padding: vec![(0, 0); 2],
|
||||
stride: vec![1, 1],
|
||||
kernel_shape: vec![2, 2],
|
||||
normalized: false,
|
||||
data_format: DataFormat::NCHW,
|
||||
}),
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
@@ -15,7 +15,6 @@ use halo2_proofs::{
|
||||
plonk::{Circuit, ConstraintSystem, Error},
|
||||
};
|
||||
use halo2curves::bn256::{Bn256, Fr};
|
||||
use itertools::Itertools;
|
||||
use rand::rngs::OsRng;
|
||||
use snark_verifier::system::halo2::transcript::evm::EvmTranscript;
|
||||
use std::marker::PhantomData;
|
||||
@@ -56,13 +55,9 @@ impl Circuit<Fr> for MyCircuit {
|
||||
layouter.assign_region(
|
||||
|| "",
|
||||
|region| {
|
||||
let mut region = region::RegionCtx::new(region, 0, 1, 1024, 2);
|
||||
let mut region = region::RegionCtx::new(region, 0, 1);
|
||||
config
|
||||
.layout(
|
||||
&mut region,
|
||||
&self.inputs.iter().collect_vec(),
|
||||
Box::new(PolyOp::Add),
|
||||
)
|
||||
.layout(&mut region, &self.inputs, Box::new(PolyOp::Add))
|
||||
.unwrap();
|
||||
Ok(())
|
||||
},
|
||||
|
||||
@@ -16,7 +16,6 @@ use halo2_proofs::{
|
||||
plonk::{Circuit, ConstraintSystem, Error},
|
||||
};
|
||||
use halo2curves::bn256::{Bn256, Fr};
|
||||
use itertools::Itertools;
|
||||
use rand::rngs::OsRng;
|
||||
use snark_verifier::system::halo2::transcript::evm::EvmTranscript;
|
||||
use std::marker::PhantomData;
|
||||
@@ -57,13 +56,9 @@ impl Circuit<Fr> for MyCircuit {
|
||||
layouter.assign_region(
|
||||
|| "",
|
||||
|region| {
|
||||
let mut region = RegionCtx::new(region, 0, 1, 1024, 2);
|
||||
let mut region = RegionCtx::new(region, 0, 1);
|
||||
config
|
||||
.layout(
|
||||
&mut region,
|
||||
&self.inputs.iter().collect_vec(),
|
||||
Box::new(PolyOp::Pow(4)),
|
||||
)
|
||||
.layout(&mut region, &self.inputs, Box::new(PolyOp::Pow(4)))
|
||||
.unwrap();
|
||||
Ok(())
|
||||
},
|
||||
|
||||
@@ -23,6 +23,8 @@ use halo2curves::bn256::{Bn256, Fr};
|
||||
use rand::rngs::OsRng;
|
||||
use snark_verifier::system::halo2::transcript::evm::EvmTranscript;
|
||||
|
||||
const L: usize = 10;
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
struct MyCircuit {
|
||||
image: ValTensor<Fr>,
|
||||
@@ -38,7 +40,7 @@ impl Circuit<Fr> for MyCircuit {
|
||||
}
|
||||
|
||||
fn configure(cs: &mut ConstraintSystem<Fr>) -> Self::Config {
|
||||
PoseidonChip::<PoseidonSpec, POSEIDON_WIDTH, POSEIDON_RATE>::configure(cs, ())
|
||||
PoseidonChip::<PoseidonSpec, POSEIDON_WIDTH, POSEIDON_RATE, 10>::configure(cs, ())
|
||||
}
|
||||
|
||||
fn synthesize(
|
||||
@@ -46,7 +48,7 @@ impl Circuit<Fr> for MyCircuit {
|
||||
config: Self::Config,
|
||||
mut layouter: impl Layouter<Fr>,
|
||||
) -> Result<(), Error> {
|
||||
let chip: PoseidonChip<PoseidonSpec, POSEIDON_WIDTH, POSEIDON_RATE> =
|
||||
let chip: PoseidonChip<PoseidonSpec, POSEIDON_WIDTH, POSEIDON_RATE, L> =
|
||||
PoseidonChip::new(config);
|
||||
chip.layout(&mut layouter, &[self.image.clone()], 0, &mut HashMap::new())?;
|
||||
Ok(())
|
||||
@@ -57,7 +59,7 @@ fn runposeidon(c: &mut Criterion) {
|
||||
let mut group = c.benchmark_group("poseidon");
|
||||
|
||||
for size in [64, 784, 2352, 12288].iter() {
|
||||
let k = (PoseidonChip::<PoseidonSpec, POSEIDON_WIDTH, POSEIDON_RATE>::num_rows(*size)
|
||||
let k = (PoseidonChip::<PoseidonSpec, POSEIDON_WIDTH, POSEIDON_RATE, L>::num_rows(*size)
|
||||
as f32)
|
||||
.log2()
|
||||
.ceil() as u32;
|
||||
@@ -65,7 +67,7 @@ fn runposeidon(c: &mut Criterion) {
|
||||
|
||||
let message = (0..*size).map(|_| Fr::random(OsRng)).collect::<Vec<_>>();
|
||||
let _output =
|
||||
PoseidonChip::<PoseidonSpec, POSEIDON_WIDTH, POSEIDON_RATE>::run(message.to_vec())
|
||||
PoseidonChip::<PoseidonSpec, POSEIDON_WIDTH, POSEIDON_RATE, L>::run(message.to_vec())
|
||||
.unwrap();
|
||||
|
||||
let mut image = Tensor::from(message.into_iter().map(Value::known));
|
||||
|
||||
@@ -2,7 +2,6 @@ use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion, Through
|
||||
use ezkl::circuit::region::RegionCtx;
|
||||
use ezkl::circuit::table::Range;
|
||||
use ezkl::circuit::{ops::lookup::LookupOp, BaseConfig as Config, CheckMode};
|
||||
use ezkl::fieldutils::IntegerRep;
|
||||
use ezkl::pfsys::create_proof_circuit;
|
||||
use ezkl::pfsys::TranscriptType;
|
||||
use ezkl::pfsys::{create_keys, srs::gen_srs};
|
||||
@@ -42,7 +41,7 @@ impl Circuit<Fr> for NLCircuit {
|
||||
.map(|_| VarTensor::new_advice(cs, K, 1, LEN))
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let nl = LookupOp::Sigmoid { scale: 1.0.into() };
|
||||
let nl = LookupOp::ReLU;
|
||||
|
||||
let mut config = Config::default();
|
||||
|
||||
@@ -63,13 +62,9 @@ impl Circuit<Fr> for NLCircuit {
|
||||
layouter.assign_region(
|
||||
|| "",
|
||||
|region| {
|
||||
let mut region = RegionCtx::new(region, 0, 1, 1024, 2);
|
||||
let mut region = RegionCtx::new(region, 0, 1);
|
||||
config
|
||||
.layout(
|
||||
&mut region,
|
||||
&[&self.input],
|
||||
Box::new(LookupOp::Sigmoid { scale: 1.0.into() }),
|
||||
)
|
||||
.layout(&mut region, &[self.input.clone()], Box::new(LookupOp::ReLU))
|
||||
.unwrap();
|
||||
Ok(())
|
||||
},
|
||||
@@ -89,7 +84,7 @@ fn runrelu(c: &mut Criterion) {
|
||||
};
|
||||
|
||||
let input: Tensor<Value<Fr>> =
|
||||
Tensor::<IntegerRep>::from((0..len).map(|_| rng.gen_range(0..10))).into();
|
||||
Tensor::<i32>::from((0..len).map(|_| rng.gen_range(0..10))).into();
|
||||
|
||||
let circuit = NLCircuit {
|
||||
input: ValTensor::from(input.clone()),
|
||||
@@ -1,150 +0,0 @@
|
||||
use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion, Throughput};
|
||||
use ezkl::circuit::poly::PolyOp;
|
||||
use ezkl::circuit::region::RegionCtx;
|
||||
use ezkl::circuit::{BaseConfig as Config, CheckMode};
|
||||
use ezkl::fieldutils::IntegerRep;
|
||||
use ezkl::pfsys::create_proof_circuit;
|
||||
use ezkl::pfsys::TranscriptType;
|
||||
use ezkl::pfsys::{create_keys, srs::gen_srs};
|
||||
use ezkl::tensor::*;
|
||||
use halo2_proofs::poly::kzg::commitment::KZGCommitmentScheme;
|
||||
use halo2_proofs::poly::kzg::multiopen::{ProverSHPLONK, VerifierSHPLONK};
|
||||
use halo2_proofs::poly::kzg::strategy::SingleStrategy;
|
||||
use halo2_proofs::{
|
||||
circuit::{Layouter, SimpleFloorPlanner, Value},
|
||||
plonk::{Circuit, ConstraintSystem, Error},
|
||||
};
|
||||
use halo2curves::bn256::{Bn256, Fr};
|
||||
use rand::Rng;
|
||||
use snark_verifier::system::halo2::transcript::evm::EvmTranscript;
|
||||
|
||||
static mut LEN: usize = 4;
|
||||
const K: usize = 16;
|
||||
|
||||
#[derive(Clone)]
|
||||
struct NLCircuit {
|
||||
pub input: ValTensor<Fr>,
|
||||
}
|
||||
|
||||
impl Circuit<Fr> for NLCircuit {
|
||||
type Config = Config<Fr>;
|
||||
type FloorPlanner = SimpleFloorPlanner;
|
||||
type Params = ();
|
||||
|
||||
fn without_witnesses(&self) -> Self {
|
||||
self.clone()
|
||||
}
|
||||
|
||||
fn configure(cs: &mut ConstraintSystem<Fr>) -> Self::Config {
|
||||
unsafe {
|
||||
let advices = (0..3)
|
||||
.map(|_| VarTensor::new_advice(cs, K, 1, LEN))
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let mut config = Config::default();
|
||||
|
||||
config
|
||||
.configure_range_check(cs, &advices[0], &advices[1], (-1, 1), K)
|
||||
.unwrap();
|
||||
|
||||
config
|
||||
.configure_range_check(cs, &advices[0], &advices[1], (0, 1023), K)
|
||||
.unwrap();
|
||||
|
||||
let _constant = VarTensor::constant_cols(cs, K, LEN, false);
|
||||
|
||||
config
|
||||
}
|
||||
}
|
||||
|
||||
fn synthesize(
|
||||
&self,
|
||||
mut config: Self::Config,
|
||||
mut layouter: impl Layouter<Fr>, // layouter is our 'write buffer' for the circuit
|
||||
) -> Result<(), Error> {
|
||||
config.layout_range_checks(&mut layouter).unwrap();
|
||||
layouter.assign_region(
|
||||
|| "",
|
||||
|region| {
|
||||
let mut region = RegionCtx::new(region, 0, 1, 1024, 2);
|
||||
config
|
||||
.layout(
|
||||
&mut region,
|
||||
&[&self.input],
|
||||
Box::new(PolyOp::LeakyReLU {
|
||||
slope: 0.0.into(),
|
||||
scale: 1,
|
||||
}),
|
||||
)
|
||||
.unwrap();
|
||||
Ok(())
|
||||
},
|
||||
)?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
fn runrelu(c: &mut Criterion) {
|
||||
let mut group = c.benchmark_group("relu");
|
||||
|
||||
let mut rng = rand::thread_rng();
|
||||
let params = gen_srs::<KZGCommitmentScheme<_>>(17);
|
||||
for &len in [4, 8].iter() {
|
||||
unsafe {
|
||||
LEN = len;
|
||||
};
|
||||
|
||||
let input: Tensor<Value<Fr>> =
|
||||
Tensor::<IntegerRep>::from((0..len).map(|_| rng.gen_range(0..10))).into();
|
||||
|
||||
let circuit = NLCircuit {
|
||||
input: ValTensor::from(input.clone()),
|
||||
};
|
||||
|
||||
group.throughput(Throughput::Elements(len as u64));
|
||||
group.bench_with_input(BenchmarkId::new("pk", len), &len, |b, &_| {
|
||||
b.iter(|| {
|
||||
create_keys::<KZGCommitmentScheme<Bn256>, NLCircuit>(&circuit, ¶ms, true)
|
||||
.unwrap();
|
||||
});
|
||||
});
|
||||
|
||||
let pk =
|
||||
create_keys::<KZGCommitmentScheme<Bn256>, NLCircuit>(&circuit, ¶ms, true).unwrap();
|
||||
|
||||
group.throughput(Throughput::Elements(len as u64));
|
||||
group.bench_with_input(BenchmarkId::new("prove", len), &len, |b, &_| {
|
||||
b.iter(|| {
|
||||
let prover = create_proof_circuit::<
|
||||
KZGCommitmentScheme<_>,
|
||||
NLCircuit,
|
||||
ProverSHPLONK<_>,
|
||||
VerifierSHPLONK<_>,
|
||||
SingleStrategy<_>,
|
||||
_,
|
||||
EvmTranscript<_, _, _, _>,
|
||||
EvmTranscript<_, _, _, _>,
|
||||
>(
|
||||
circuit.clone(),
|
||||
vec![],
|
||||
¶ms,
|
||||
&pk,
|
||||
CheckMode::UNSAFE,
|
||||
ezkl::Commitments::KZG,
|
||||
TranscriptType::EVM,
|
||||
None,
|
||||
None,
|
||||
);
|
||||
prover.unwrap();
|
||||
});
|
||||
});
|
||||
}
|
||||
group.finish();
|
||||
}
|
||||
|
||||
criterion_group! {
|
||||
name = benches;
|
||||
config = Criterion::default().with_plots();
|
||||
targets = runrelu
|
||||
}
|
||||
criterion_main!(benches);
|
||||
@@ -1,117 +0,0 @@
|
||||
use std::thread;
|
||||
|
||||
use criterion::{black_box, criterion_group, criterion_main, Criterion};
|
||||
use halo2curves::{bn256::Fr as F, ff::Field};
|
||||
use maybe_rayon::{
|
||||
iter::{IndexedParallelIterator, IntoParallelRefIterator, ParallelIterator},
|
||||
slice::ParallelSlice,
|
||||
};
|
||||
use rand::Rng;
|
||||
|
||||
// Assuming these are your types
|
||||
#[derive(Clone)]
|
||||
#[allow(dead_code)]
|
||||
enum ValType {
|
||||
Constant(F),
|
||||
AssignedConstant(usize, F),
|
||||
Other,
|
||||
}
|
||||
|
||||
// Helper to generate test data
|
||||
fn generate_test_data(size: usize, zero_probability: f64) -> Vec<ValType> {
|
||||
let mut rng = rand::thread_rng();
|
||||
(0..size)
|
||||
.map(|_i| {
|
||||
if rng.r#gen::<f64>() < zero_probability {
|
||||
ValType::Constant(F::ZERO)
|
||||
} else {
|
||||
ValType::Constant(F::ONE) // Or some other non-zero value
|
||||
}
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
fn bench_zero_finding(c: &mut Criterion) {
|
||||
let sizes = [
|
||||
1_000, // 1K
|
||||
10_000, // 10K
|
||||
100_000, // 100K
|
||||
256 * 256 * 2, // Our specific case
|
||||
1_000_000, // 1M
|
||||
10_000_000, // 10M
|
||||
];
|
||||
|
||||
let zero_probability = 0.1; // 10% zeros
|
||||
|
||||
let mut group = c.benchmark_group("zero_finding");
|
||||
group.sample_size(10); // Adjust based on your needs
|
||||
|
||||
for &size in &sizes {
|
||||
let data = generate_test_data(size, zero_probability);
|
||||
|
||||
// Benchmark sequential version
|
||||
group.bench_function(format!("sequential_{}", size), |b| {
|
||||
b.iter(|| {
|
||||
let result = data
|
||||
.iter()
|
||||
.enumerate()
|
||||
.filter_map(|(i, e)| match e {
|
||||
ValType::Constant(r) | ValType::AssignedConstant(_, r) => {
|
||||
(*r == F::ZERO).then_some(i)
|
||||
}
|
||||
_ => None,
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
black_box(result)
|
||||
})
|
||||
});
|
||||
|
||||
// Benchmark parallel version
|
||||
group.bench_function(format!("parallel_{}", size), |b| {
|
||||
b.iter(|| {
|
||||
let result = data
|
||||
.par_iter()
|
||||
.enumerate()
|
||||
.filter_map(|(i, e)| match e {
|
||||
ValType::Constant(r) | ValType::AssignedConstant(_, r) => {
|
||||
(*r == F::ZERO).then_some(i)
|
||||
}
|
||||
_ => None,
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
black_box(result)
|
||||
})
|
||||
});
|
||||
|
||||
// Benchmark chunked parallel version
|
||||
group.bench_function(format!("chunked_parallel_{}", size), |b| {
|
||||
b.iter(|| {
|
||||
let num_cores = thread::available_parallelism()
|
||||
.map(|n| n.get())
|
||||
.unwrap_or(1);
|
||||
let chunk_size = (size / num_cores).max(100);
|
||||
|
||||
let result = data
|
||||
.par_chunks(chunk_size)
|
||||
.enumerate()
|
||||
.flat_map(|(chunk_idx, chunk)| {
|
||||
chunk
|
||||
.par_iter() // Make sure we use par_iter() here
|
||||
.enumerate()
|
||||
.filter_map(move |(i, e)| match e {
|
||||
ValType::Constant(r) | ValType::AssignedConstant(_, r) => {
|
||||
(*r == F::ZERO).then_some(chunk_idx * chunk_size + i)
|
||||
}
|
||||
_ => None,
|
||||
})
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
black_box(result)
|
||||
})
|
||||
});
|
||||
}
|
||||
group.finish();
|
||||
}
|
||||
|
||||
criterion_group!(benches, bench_zero_finding);
|
||||
criterion_main!(benches);
|
||||
7
build.rs
7
build.rs
@@ -1,7 +0,0 @@
|
||||
fn main() {
|
||||
if cfg!(feature = "ios-bindings-test") {
|
||||
println!("cargo::rustc-env=UNIFFI_CARGO_BUILD_EXTRA_ARGS=--features=ios-bindings --no-default-features");
|
||||
}
|
||||
|
||||
println!("cargo::rerun-if-changed=build.rs");
|
||||
}
|
||||
368
contracts/AttestData.sol
Normal file
368
contracts/AttestData.sol
Normal file
@@ -0,0 +1,368 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
pragma solidity ^0.8.20;
|
||||
contract LoadInstances {
|
||||
/**
|
||||
* @dev Parse the instances array from the Halo2Verifier encoded calldata.
|
||||
* @notice must pass encoded bytes from memory
|
||||
* @param encoded - verifier calldata
|
||||
*/
|
||||
function getInstancesMemory(
|
||||
bytes memory encoded
|
||||
) internal pure returns (uint256[] memory instances) {
|
||||
bytes4 funcSig;
|
||||
uint256 instances_offset;
|
||||
uint256 instances_length;
|
||||
assembly {
|
||||
// fetch function sig. Either `verifyProof(bytes,uint256[])` or `verifyProof(address,bytes,uint256[])`
|
||||
funcSig := mload(add(encoded, 0x20))
|
||||
|
||||
// Fetch instances offset which is 4 + 32 + 32 bytes away from
|
||||
// start of encoded for `verifyProof(bytes,uint256[])`,
|
||||
// and 4 + 32 + 32 +32 away for `verifyProof(address,bytes,uint256[])`
|
||||
|
||||
instances_offset := mload(
|
||||
add(encoded, add(0x44, mul(0x20, eq(funcSig, 0xaf83a18d))))
|
||||
)
|
||||
|
||||
instances_length := mload(add(add(encoded, 0x24), instances_offset))
|
||||
}
|
||||
instances = new uint256[](instances_length); // Allocate memory for the instances array.
|
||||
assembly {
|
||||
// Now instances points to the start of the array data
|
||||
// (right after the length field).
|
||||
for {
|
||||
let i := 0x20
|
||||
} lt(i, add(mul(instances_length, 0x20), 0x20)) {
|
||||
i := add(i, 0x20)
|
||||
} {
|
||||
mstore(
|
||||
add(instances, i),
|
||||
mload(add(add(encoded, add(i, 0x24)), instances_offset))
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
/**
|
||||
* @dev Parse the instances array from the Halo2Verifier encoded calldata.
|
||||
* @notice must pass encoded bytes from calldata
|
||||
* @param encoded - verifier calldata
|
||||
*/
|
||||
function getInstancesCalldata(
|
||||
bytes calldata encoded
|
||||
) internal pure returns (uint256[] memory instances) {
|
||||
bytes4 funcSig;
|
||||
uint256 instances_offset;
|
||||
uint256 instances_length;
|
||||
assembly {
|
||||
// fetch function sig. Either `verifyProof(bytes,uint256[])` or `verifyProof(address,bytes,uint256[])`
|
||||
funcSig := calldataload(encoded.offset)
|
||||
|
||||
// Fetch instances offset which is 4 + 32 + 32 bytes away from
|
||||
// start of encoded for `verifyProof(bytes,uint256[])`,
|
||||
// and 4 + 32 + 32 +32 away for `verifyProof(address,bytes,uint256[])`
|
||||
|
||||
instances_offset := calldataload(
|
||||
add(
|
||||
encoded.offset,
|
||||
add(0x24, mul(0x20, eq(funcSig, 0xaf83a18d)))
|
||||
)
|
||||
)
|
||||
|
||||
instances_length := calldataload(
|
||||
add(add(encoded.offset, 0x04), instances_offset)
|
||||
)
|
||||
}
|
||||
instances = new uint256[](instances_length); // Allocate memory for the instances array.
|
||||
assembly {
|
||||
// Now instances points to the start of the array data
|
||||
// (right after the length field).
|
||||
|
||||
for {
|
||||
let i := 0x20
|
||||
} lt(i, add(mul(instances_length, 0x20), 0x20)) {
|
||||
i := add(i, 0x20)
|
||||
} {
|
||||
mstore(
|
||||
add(instances, i),
|
||||
calldataload(
|
||||
add(add(encoded.offset, add(i, 0x04)), instances_offset)
|
||||
)
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// This contract serves as a Data Attestation Verifier for the EZKL model.
|
||||
// It is designed to read and attest to instances of proofs generated from a specified circuit.
|
||||
// It is particularly constructed to read only int256 data from specified on-chain contracts' view functions.
|
||||
|
||||
// Overview of the contract functionality:
|
||||
// 1. Initialization: Through the constructor, it sets up the contract calls that the EZKL model will read from.
|
||||
// 2. Data Quantization: Quantizes the returned data into a scaled fixed-point representation. See the `quantizeData` method for details.
|
||||
// 3. Static Calls: Makes static calls to fetch data from other contracts. See the `staticCall` method.
|
||||
// 4. Field Element Conversion: The fixed-point representation is then converted into a field element modulo P using the `toFieldElement` method.
|
||||
// 5. Data Attestation: The `attestData` method validates that the public instances match the data fetched and processed by the contract.
|
||||
// 6. Proof Verification: The `verifyWithDataAttestation` method parses the instances out of the encoded calldata and calls the `attestData` method to validate the public instances,
|
||||
// then calls the `verifyProof` method to verify the proof on the verifier.
|
||||
|
||||
contract DataAttestation is LoadInstances {
|
||||
/**
|
||||
* @notice Struct used to make view only calls to accounts to fetch the data that EZKL reads from.
|
||||
* @param the address of the account to make calls to
|
||||
* @param the abi encoded function calls to make to the `contractAddress`
|
||||
*/
|
||||
struct AccountCall {
|
||||
address contractAddress;
|
||||
mapping(uint256 => bytes) callData;
|
||||
mapping(uint256 => uint256) decimals;
|
||||
uint callCount;
|
||||
}
|
||||
AccountCall[] public accountCalls;
|
||||
|
||||
uint[] public scales;
|
||||
|
||||
address public admin;
|
||||
|
||||
/**
|
||||
* @notice EZKL P value
|
||||
* @dev In order to prevent the verifier from accepting two version of the same pubInput, n and the quantity (n + P), where n + P <= 2^256, we require that all instances are stricly less than P. a
|
||||
* @dev The reason for this is that the assmebly code of the verifier performs all arithmetic operations modulo P and as a consequence can't distinguish between n and n + P.
|
||||
*/
|
||||
uint256 constant ORDER =
|
||||
uint256(
|
||||
0x30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f0000001
|
||||
);
|
||||
|
||||
uint256 constant INPUT_CALLS = 0;
|
||||
|
||||
uint256 constant OUTPUT_CALLS = 0;
|
||||
|
||||
uint8 public instanceOffset;
|
||||
|
||||
/**
|
||||
* @dev Initialize the contract with account calls the EZKL model will read from.
|
||||
* @param _contractAddresses - The calls to all the contracts EZKL reads storage from.
|
||||
* @param _callData - The abi encoded function calls to make to the `contractAddress` that EZKL reads storage from.
|
||||
*/
|
||||
constructor(
|
||||
address[] memory _contractAddresses,
|
||||
bytes[][] memory _callData,
|
||||
uint256[][] memory _decimals,
|
||||
uint[] memory _scales,
|
||||
uint8 _instanceOffset,
|
||||
address _admin
|
||||
) {
|
||||
admin = _admin;
|
||||
for (uint i; i < _scales.length; i++) {
|
||||
scales.push(1 << _scales[i]);
|
||||
}
|
||||
populateAccountCalls(_contractAddresses, _callData, _decimals);
|
||||
instanceOffset = _instanceOffset;
|
||||
}
|
||||
|
||||
function updateAdmin(address _admin) external {
|
||||
require(msg.sender == admin, "Only admin can update admin");
|
||||
if (_admin == address(0)) {
|
||||
revert();
|
||||
}
|
||||
admin = _admin;
|
||||
}
|
||||
|
||||
function updateAccountCalls(
|
||||
address[] memory _contractAddresses,
|
||||
bytes[][] memory _callData,
|
||||
uint256[][] memory _decimals
|
||||
) external {
|
||||
require(msg.sender == admin, "Only admin can update account calls");
|
||||
populateAccountCalls(_contractAddresses, _callData, _decimals);
|
||||
}
|
||||
|
||||
function populateAccountCalls(
|
||||
address[] memory _contractAddresses,
|
||||
bytes[][] memory _callData,
|
||||
uint256[][] memory _decimals
|
||||
) internal {
|
||||
require(
|
||||
_contractAddresses.length == _callData.length &&
|
||||
accountCalls.length == _contractAddresses.length,
|
||||
"Invalid input length"
|
||||
);
|
||||
require(
|
||||
_decimals.length == _contractAddresses.length,
|
||||
"Invalid number of decimals"
|
||||
);
|
||||
// fill in the accountCalls storage array
|
||||
uint counter = 0;
|
||||
for (uint256 i = 0; i < _contractAddresses.length; i++) {
|
||||
AccountCall storage accountCall = accountCalls[i];
|
||||
accountCall.contractAddress = _contractAddresses[i];
|
||||
accountCall.callCount = _callData[i].length;
|
||||
for (uint256 j = 0; j < _callData[i].length; j++) {
|
||||
accountCall.callData[j] = _callData[i][j];
|
||||
accountCall.decimals[j] = 10 ** _decimals[i][j];
|
||||
}
|
||||
// count the total number of storage reads across all of the accounts
|
||||
counter += _callData[i].length;
|
||||
}
|
||||
require(
|
||||
counter == INPUT_CALLS + OUTPUT_CALLS,
|
||||
"Invalid number of calls"
|
||||
);
|
||||
}
|
||||
|
||||
function mulDiv(
|
||||
uint256 x,
|
||||
uint256 y,
|
||||
uint256 denominator
|
||||
) internal pure returns (uint256 result) {
|
||||
unchecked {
|
||||
uint256 prod0;
|
||||
uint256 prod1;
|
||||
assembly {
|
||||
let mm := mulmod(x, y, not(0))
|
||||
prod0 := mul(x, y)
|
||||
prod1 := sub(sub(mm, prod0), lt(mm, prod0))
|
||||
}
|
||||
|
||||
if (prod1 == 0) {
|
||||
return prod0 / denominator;
|
||||
}
|
||||
|
||||
require(denominator > prod1, "Math: mulDiv overflow");
|
||||
|
||||
uint256 remainder;
|
||||
assembly {
|
||||
remainder := mulmod(x, y, denominator)
|
||||
prod1 := sub(prod1, gt(remainder, prod0))
|
||||
prod0 := sub(prod0, remainder)
|
||||
}
|
||||
|
||||
uint256 twos = denominator & (~denominator + 1);
|
||||
assembly {
|
||||
denominator := div(denominator, twos)
|
||||
prod0 := div(prod0, twos)
|
||||
twos := add(div(sub(0, twos), twos), 1)
|
||||
}
|
||||
|
||||
prod0 |= prod1 * twos;
|
||||
|
||||
uint256 inverse = (3 * denominator) ^ 2;
|
||||
|
||||
inverse *= 2 - denominator * inverse;
|
||||
inverse *= 2 - denominator * inverse;
|
||||
inverse *= 2 - denominator * inverse;
|
||||
inverse *= 2 - denominator * inverse;
|
||||
inverse *= 2 - denominator * inverse;
|
||||
inverse *= 2 - denominator * inverse;
|
||||
|
||||
result = prod0 * inverse;
|
||||
return result;
|
||||
}
|
||||
}
|
||||
/**
|
||||
* @dev Quantize the data returned from the account calls to the scale used by the EZKL model.
|
||||
* @param data - The data returned from the account calls.
|
||||
* @param decimals - The number of decimals the data returned from the account calls has (for floating point representation).
|
||||
* @param scale - The scale used to convert the floating point value into a fixed point value.
|
||||
*/
|
||||
function quantizeData(
|
||||
bytes memory data,
|
||||
uint256 decimals,
|
||||
uint256 scale
|
||||
) internal pure returns (int256 quantized_data) {
|
||||
int x = abi.decode(data, (int256));
|
||||
bool neg = x < 0;
|
||||
if (neg) x = -x;
|
||||
uint output = mulDiv(uint256(x), scale, decimals);
|
||||
if (mulmod(uint256(x), scale, decimals) * 2 >= decimals) {
|
||||
output += 1;
|
||||
}
|
||||
quantized_data = neg ? -int256(output) : int256(output);
|
||||
}
|
||||
/**
|
||||
* @dev Make a static call to the account to fetch the data that EZKL reads from.
|
||||
* @param target - The address of the account to make calls to.
|
||||
* @param data - The abi encoded function calls to make to the `contractAddress` that EZKL reads storage from.
|
||||
* @return The data returned from the account calls. (Must come from either a view or pure function. Will throw an error otherwise)
|
||||
*/
|
||||
function staticCall(
|
||||
address target,
|
||||
bytes memory data
|
||||
) internal view returns (bytes memory) {
|
||||
(bool success, bytes memory returndata) = target.staticcall(data);
|
||||
if (success) {
|
||||
if (returndata.length == 0) {
|
||||
require(
|
||||
target.code.length > 0,
|
||||
"Address: call to non-contract"
|
||||
);
|
||||
}
|
||||
return returndata;
|
||||
} else {
|
||||
revert("Address: low-level call failed");
|
||||
}
|
||||
}
|
||||
/**
|
||||
* @dev Convert the fixed point quantized data into a field element.
|
||||
* @param x - The quantized data.
|
||||
* @return field_element - The field element.
|
||||
*/
|
||||
function toFieldElement(
|
||||
int256 x
|
||||
) internal pure returns (uint256 field_element) {
|
||||
// The casting down to uint256 is safe because the order is about 2^254, and the value
|
||||
// of x ranges of -2^127 to 2^127, so x + int(ORDER) is always positive.
|
||||
return uint256(x + int(ORDER)) % ORDER;
|
||||
}
|
||||
|
||||
/**
|
||||
* @dev Make the account calls to fetch the data that EZKL reads from and attest to the data.
|
||||
* @param instances - The public instances to the proof (the data in the proof that publicly accessible to the verifier).
|
||||
*/
|
||||
function attestData(uint256[] memory instances) internal view {
|
||||
require(
|
||||
instances.length >= INPUT_CALLS + OUTPUT_CALLS,
|
||||
"Invalid public inputs length"
|
||||
);
|
||||
uint256 _accountCount = accountCalls.length;
|
||||
uint counter = 0;
|
||||
for (uint8 i = 0; i < _accountCount; ++i) {
|
||||
address account = accountCalls[i].contractAddress;
|
||||
for (uint8 j = 0; j < accountCalls[i].callCount; j++) {
|
||||
bytes memory returnData = staticCall(
|
||||
account,
|
||||
accountCalls[i].callData[j]
|
||||
);
|
||||
uint256 scale = scales[counter];
|
||||
int256 quantized_data = quantizeData(
|
||||
returnData,
|
||||
accountCalls[i].decimals[j],
|
||||
scale
|
||||
);
|
||||
uint256 field_element = toFieldElement(quantized_data);
|
||||
require(
|
||||
field_element == instances[counter + instanceOffset],
|
||||
"Public input does not match"
|
||||
);
|
||||
counter++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function verifyWithDataAttestation(
|
||||
address verifier,
|
||||
bytes calldata encoded
|
||||
) public view returns (bool) {
|
||||
require(verifier.code.length > 0, "Address: call to non-contract");
|
||||
attestData(getInstancesCalldata(encoded));
|
||||
// static call the verifier contract to verify the proof
|
||||
(bool success, bytes memory returndata) = verifier.staticcall(encoded);
|
||||
|
||||
if (success) {
|
||||
return abi.decode(returndata, (bool));
|
||||
} else {
|
||||
revert("low-level call to verifier failed");
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,41 +0,0 @@
|
||||
## EZKL Security Note: Public Commitments and Low-Entropy Data
|
||||
|
||||
> **Disclaimer:** this a more technical post that requires some prior knowledge of how ZK proving systems like Halo2 operate, and in particular in how these APIs are constructed. For background reading we highly recommend the [Halo2 book](https://zcash.github.io/halo2/) and [Halo2 Club](https://halo2.club/).
|
||||
|
||||
## Overview of commitments in EZKL
|
||||
|
||||
A common design pattern in a zero knowledge (zk) application is thus:
|
||||
- A prover has some data which is used within a circuit.
|
||||
- This data, as it may be high-dimensional or somewhat private, is pre-committed to using some hash function.
|
||||
- The zk-circuit which forms the core of the application then proves (para-phrasing) a statement of the form:
|
||||
>"I know some data D which when hashed corresponds to the pre-committed to value H + whatever else the circuit is proving over D".
|
||||
|
||||
From our own experience, we've implemented such patterns using snark-friendly hash functions like [Poseidon](https://www.poseidon-hash.info/), for which there is a relatively well vetted [implementation](https://docs.rs/halo2_gadgets/latest/halo2_gadgets/poseidon/index.html) in Halo2. Even then these hash functions can introduce lots of overhead and can be very expensive to generate proofs for if the dimensionality of the data D is large.
|
||||
|
||||
You can also implement such a pattern using Halo2's `Fixed` columns _if the privacy preservation of the pre-image is not necessary_. These are Halo2 columns (i.e in reality just polynomials) that are left unblinded (unlike the blinded `Advice` columns), and whose commitments are shared with the verifier by way of the verifying key for the application's zk-circuit. These commitments are much lower cost to generate than implementing a hashing function, such as Poseidon, within a circuit.
|
||||
|
||||
> **Note:** Blinding is the process whereby a certain set of the final elements (i.e rows) of a Halo2 column are set to random field elements. This is the mechanism by which Halo2 achieves its zero knowledge properties for `Advice` columns. By contrast `Fixed` columns aren't zero-knowledge in that they are vulnerable to dictionary attacks in the same manner a hash function is. Given some set of known or popular data D an attacker can attempt to recover the pre-image of a hash by running D through the hash function to see if the outputs match a public commitment. These attacks aren't "possible" on blinded `Advice` columns.
|
||||
|
||||
> **Further Note:** Note that without blinding, with access to `M` proofs, each of which contains an evaluation of the polynomial at a different point, an attacker can more easily recover a non blinded column's pre-image. This is because each proof generates a new query and evaluation of the polynomial represented by the column and as such with repetition a clearer picture can emerge of the column's pre-image. Thus unblinded columns should only be used for privacy preservation, in the manner of a hash, if the number of proofs generated against a fixed set of values is limited. More formally if M independent and _unique_ queries are generated; if M is equal to the degree + 1 of the polynomial represented by the column (i.e the unique lagrange interpolation of the values in the columns), then the column's pre-image can be recovered. As such as the logrows K increases, the more queries are required to recover the pre-image (as 2^K unique queries are required). This assumes that the entries in the column are not structured, as if they are then the number of queries required to recover the pre-image is reduced (eg. if all rows above a certain point are known to be nil).
|
||||
|
||||
The annoyance in using `Fixed` columns comes from the fact that they require generating a new verifying key every time a new set of commitments is generated.
|
||||
|
||||
> **Example:** Say for instance an application leverages a zero-knowledge circuit to prove the correct execution of a neural network. Every week the neural network is finetuned or retrained on new data. If the architecture remains the same then commiting to the new network parameters, along with a new proof of performance on a test set, would be an ideal setup. If we leverage `Fixed` columns to commit to the model parameters, each new commitment will require re-generating a verifying key and sharing the new key with the verifier(s). This is not-ideal UX and can become expensive if the verifier is deployed on-chain.
|
||||
|
||||
An ideal commitment would thus have the low cost of a `Fixed` column but wouldn't require regenerating a new verifying key for each new commitment.
|
||||
|
||||
### Unblinded Advice Columns
|
||||
|
||||
A first step in designing such a commitment is to allow for optionally unblinded `Advice` columns within the Halo2 API. These won't be included in the verifying key, AND are blinded with a constant factor `1` -- such that if someone knows the pre-image to the commitment, they can recover it by running it through the corresponding polynomial commitment scheme (in ezkl's case [KZG commitments](https://dankradfeist.de/ethereum/2020/06/16/kate-polynomial-commitments.html)).
|
||||
|
||||
This is implemented using the `polycommit` visibility parameter in the ezkl API.
|
||||
|
||||
## The Vulnerability of Public Commitments
|
||||
|
||||
|
||||
Public commitments in EZKL (both Poseidon-hashed inputs and KZG commitments) can be vulnerable to brute-force attacks when input data has low entropy. A malicious actor could reveal committed data by searching through possible input values, compromising privacy in applications like anonymous credentials. This is particularly relevant when input data comes from known finite sets (e.g., names, dates).
|
||||
|
||||
Example Risk: In an anonymous credential system using EZKL for ID verification, an attacker could match hashed outputs against a database of common identifying information to deanonymize users.
|
||||
|
||||
|
||||
|
||||
@@ -1,54 +0,0 @@
|
||||
# EZKL Security Note: Quantization-Activated Model Backdoors
|
||||
|
||||
## Model backdoors and provenance
|
||||
|
||||
Machine learning models inherently suffer from robustness issues, which can lead to various
|
||||
kinds of attacks, from backdoors to evasion attacks. These vulnerabilities are a direct byproductof how machine learning models learn and cannot be remediated.
|
||||
|
||||
We say a model has a backdoor whenever a specific attacker-chosen trigger in the input leads
|
||||
to the model misbehaving. For instance, if we have an image classifier discriminating cats from dogs, the ability to turn any image of a cat into an image classified as a dog by changing a specific pixel pattern constitutes a backdoor.
|
||||
|
||||
Backdoors can be introduced using many different vectors. An attacker can introduce a
|
||||
backdoor using traditional security vulnerabilities. For instance, they could directly alter the file containing model weights or dynamically hack the Python code of the model. In addition, backdoors can be introduced by the training data through a process known as poisoning. In this case, an attacker adds malicious data points to the dataset before the model is trained so that the model learns to associate the backdoor trigger with the intended misbehavior.
|
||||
|
||||
All these vectors constitute a whole range of provenance challenges, as any component of an
|
||||
AI system can virtually be an entrypoint for a backdoor. Although provenance is already a
|
||||
concern with traditional code, the issue is exacerbated with AI, as retraining a model is
|
||||
cost-prohibitive. It is thus impractical to translate the “recompile it yourself” thinking to AI.
|
||||
|
||||
## Quantization activated backdoors
|
||||
|
||||
Backdoors are a generic concern in AI that is outside the scope of EZKL. However, EZKL may
|
||||
activate a specific subset of backdoors. Several academic papers have demonstrated the
|
||||
possibility, both in theory and in practice, of implanting undetectable and inactive backdoors in a full precision model that can be reactivated by quantization.
|
||||
|
||||
An external attacker may trick the user of an application running EZKL into loading a model
|
||||
containing a quantization backdoor. This backdoor is active in the resulting model and circuit but not in the full-precision model supplied to EZKL, compromising the integrity of the target application and the resulting proof.
|
||||
|
||||
### When is this a concern for me as a user?
|
||||
|
||||
Any untrusted component in your AI stack may be a backdoor vector. In practice, the most
|
||||
sensitive parts include:
|
||||
|
||||
- Datasets downloaded from the web or containing crowdsourced data
|
||||
- Models downloaded from the web even after finetuning
|
||||
- Untrusted software dependencies (well-known frameworks such as PyTorch can typically
|
||||
be considered trusted)
|
||||
- Any component loaded through an unsafe serialization format, such as Pickle.
|
||||
Because backdoors are inherent to ML and cannot be eliminated, reviewing the provenance of
|
||||
these sensitive components is especially important.
|
||||
|
||||
### Responsibilities of the user and EZKL
|
||||
|
||||
As EZKL cannot prevent backdoored models from being used, it is the responsibility of the user to review the provenance of all the components in their AI stack to ensure that no backdoor could have been implanted. EZKL shall not be held responsible for misleading prediction proofs resulting from using a backdoored model or for any harm caused to a system or its users due to a misbehaving model.
|
||||
|
||||
### Limitations:
|
||||
|
||||
- Attack effectiveness depends on calibration settings and internal rescaling operations.
|
||||
- Further research needed on backdoor persistence through witness/proof stages.
|
||||
- Can be mitigated by evaluating the quantized model (using `ezkl gen-witness`), rather than relying on the evaluation of the original model in pytorch or onnx-runtime as difference in evaluation could reveal a backdoor.
|
||||
|
||||
References:
|
||||
|
||||
1. [Quantization Backdoors to Deep Learning Commercial Frameworks (Ma et al., 2021)](https://arxiv.org/abs/2108.09187)
|
||||
2. [Planting Undetectable Backdoors in Machine Learning Models (Goldwasser et al., 2022)](https://arxiv.org/abs/2204.06974)
|
||||
@@ -1,4 +1,4 @@
|
||||
ezkl
|
||||
ezkl==11.0.7
|
||||
sphinx
|
||||
sphinx-rtd-theme
|
||||
sphinxcontrib-napoleon
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import ezkl
|
||||
|
||||
project = 'ezkl'
|
||||
release = '0.0.0'
|
||||
release = '11.0.7'
|
||||
version = release
|
||||
|
||||
|
||||
|
||||
@@ -2,7 +2,8 @@ use ezkl::circuit::region::RegionCtx;
|
||||
use ezkl::circuit::{
|
||||
ops::lookup::LookupOp, ops::poly::PolyOp, BaseConfig as PolyConfig, CheckMode,
|
||||
};
|
||||
use ezkl::fieldutils::{self, integer_rep_to_felt, IntegerRep};
|
||||
use ezkl::fieldutils;
|
||||
use ezkl::fieldutils::i32_to_felt;
|
||||
use ezkl::tensor::*;
|
||||
use halo2_proofs::dev::MockProver;
|
||||
use halo2_proofs::poly::commitment::Params;
|
||||
@@ -41,8 +42,8 @@ const NUM_INNER_COLS: usize = 1;
|
||||
struct Config<
|
||||
const LEN: usize, //LEN = CHOUT x OH x OW flattened //not supported yet in rust stable
|
||||
const CLASSES: usize,
|
||||
const LOOKUP_MIN: IntegerRep,
|
||||
const LOOKUP_MAX: IntegerRep,
|
||||
const LOOKUP_MIN: i64,
|
||||
const LOOKUP_MAX: i64,
|
||||
// Convolution
|
||||
const KERNEL_HEIGHT: usize,
|
||||
const KERNEL_WIDTH: usize,
|
||||
@@ -65,8 +66,8 @@ struct Config<
|
||||
struct MyCircuit<
|
||||
const LEN: usize, //LEN = CHOUT x OH x OW flattened
|
||||
const CLASSES: usize,
|
||||
const LOOKUP_MIN: IntegerRep,
|
||||
const LOOKUP_MAX: IntegerRep,
|
||||
const LOOKUP_MIN: i64,
|
||||
const LOOKUP_MAX: i64,
|
||||
// Convolution
|
||||
const KERNEL_HEIGHT: usize,
|
||||
const KERNEL_WIDTH: usize,
|
||||
@@ -89,8 +90,8 @@ struct MyCircuit<
|
||||
impl<
|
||||
const LEN: usize,
|
||||
const CLASSES: usize,
|
||||
const LOOKUP_MIN: IntegerRep,
|
||||
const LOOKUP_MAX: IntegerRep,
|
||||
const LOOKUP_MIN: i64,
|
||||
const LOOKUP_MAX: i64,
|
||||
// Convolution
|
||||
const KERNEL_HEIGHT: usize,
|
||||
const KERNEL_WIDTH: usize,
|
||||
@@ -146,8 +147,6 @@ where
|
||||
let params = VarTensor::new_advice(cs, K, NUM_INNER_COLS, LEN);
|
||||
let output = VarTensor::new_advice(cs, K, NUM_INNER_COLS, LEN);
|
||||
|
||||
let _constant = VarTensor::constant_cols(cs, K, LEN, false);
|
||||
|
||||
println!("INPUT COL {:#?}", input);
|
||||
|
||||
let mut layer_config = PolyConfig::configure(
|
||||
@@ -158,11 +157,15 @@ where
|
||||
);
|
||||
|
||||
layer_config
|
||||
.configure_range_check(cs, &input, ¶ms, (-1, 1), K)
|
||||
.unwrap();
|
||||
|
||||
layer_config
|
||||
.configure_range_check(cs, &input, ¶ms, (0, 1023), K)
|
||||
.configure_lookup(
|
||||
cs,
|
||||
&input,
|
||||
&output,
|
||||
¶ms,
|
||||
(LOOKUP_MIN, LOOKUP_MAX),
|
||||
K,
|
||||
&LookupOp::ReLU,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
layer_config
|
||||
@@ -193,50 +196,39 @@ where
|
||||
) -> Result<(), Error> {
|
||||
config.layer_config.layout_tables(&mut layouter).unwrap();
|
||||
|
||||
config
|
||||
.layer_config
|
||||
.layout_range_checks(&mut layouter)
|
||||
.unwrap();
|
||||
|
||||
let x = layouter
|
||||
.assign_region(
|
||||
|| "mlp_4d",
|
||||
|region| {
|
||||
let mut region = RegionCtx::new(region, 0, NUM_INNER_COLS, 1024, 2);
|
||||
let mut region = RegionCtx::new(region, 0, NUM_INNER_COLS);
|
||||
|
||||
let op = PolyOp::Conv {
|
||||
padding: vec![(PADDING, PADDING); 2],
|
||||
stride: vec![STRIDE; 2],
|
||||
group: 1,
|
||||
data_format: DataFormat::NCHW,
|
||||
kernel_format: KernelFormat::OIHW,
|
||||
};
|
||||
let x = config
|
||||
.layer_config
|
||||
.layout(
|
||||
&mut region,
|
||||
&[&self.input, &self.l0_params[0], &self.l0_params[1]],
|
||||
&[
|
||||
self.input.clone(),
|
||||
self.l0_params[0].clone(),
|
||||
self.l0_params[1].clone(),
|
||||
],
|
||||
Box::new(op),
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let x = config
|
||||
.layer_config
|
||||
.layout(
|
||||
&mut region,
|
||||
&[&x.unwrap()],
|
||||
Box::new(PolyOp::LeakyReLU {
|
||||
slope: 0.0.into(),
|
||||
scale: 1,
|
||||
}),
|
||||
)
|
||||
.layout(&mut region, &[x.unwrap()], Box::new(LookupOp::ReLU))
|
||||
.unwrap();
|
||||
|
||||
let mut x = config
|
||||
.layer_config
|
||||
.layout(
|
||||
&mut region,
|
||||
&[&x.unwrap()],
|
||||
&[x.unwrap()],
|
||||
Box::new(LookupOp::Div { denom: 32.0.into() }),
|
||||
)
|
||||
.unwrap()
|
||||
@@ -248,7 +240,7 @@ where
|
||||
.layer_config
|
||||
.layout(
|
||||
&mut region,
|
||||
&[&self.l2_params[0], &x],
|
||||
&[self.l2_params[0].clone(), x],
|
||||
Box::new(PolyOp::Einsum {
|
||||
equation: "ij,j->ik".to_string(),
|
||||
}),
|
||||
@@ -260,7 +252,7 @@ where
|
||||
.layer_config
|
||||
.layout(
|
||||
&mut region,
|
||||
&[&x, &self.l2_params[1]],
|
||||
&[x, self.l2_params[1].clone()],
|
||||
Box::new(PolyOp::Add),
|
||||
)
|
||||
.unwrap()
|
||||
@@ -289,7 +281,7 @@ where
|
||||
}
|
||||
|
||||
pub fn runconv() {
|
||||
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
|
||||
#[cfg(not(target_arch = "wasm32"))]
|
||||
env_logger::init();
|
||||
|
||||
const KERNEL_HEIGHT: usize = 5;
|
||||
@@ -323,11 +315,7 @@ pub fn runconv() {
|
||||
.test_set_length(10_000)
|
||||
.finalize();
|
||||
|
||||
let mut train_data = Tensor::from(
|
||||
trn_img
|
||||
.iter()
|
||||
.map(|x| integer_rep_to_felt::<F>(*x as IntegerRep / 16)),
|
||||
);
|
||||
let mut train_data = Tensor::from(trn_img.iter().map(|x| i32_to_felt::<F>(*x as i32 / 16)));
|
||||
train_data.reshape(&[50_000, 28, 28]).unwrap();
|
||||
|
||||
let mut train_labels = Tensor::from(trn_lbl.iter().map(|x| *x as f32));
|
||||
@@ -355,8 +343,8 @@ pub fn runconv() {
|
||||
.map(|fl| {
|
||||
let dx = fl * 32_f32;
|
||||
let rounded = dx.round();
|
||||
let integral: IntegerRep = unsafe { rounded.to_int_unchecked() };
|
||||
fieldutils::integer_rep_to_felt(integral)
|
||||
let integral: i32 = unsafe { rounded.to_int_unchecked() };
|
||||
fieldutils::i32_to_felt(integral)
|
||||
}),
|
||||
);
|
||||
|
||||
@@ -367,8 +355,7 @@ pub fn runconv() {
|
||||
|
||||
let l0_kernels = l0_kernels.try_into().unwrap();
|
||||
|
||||
let mut l0_bias =
|
||||
Tensor::<F>::from((0..OUT_CHANNELS).map(|_| fieldutils::integer_rep_to_felt(0)));
|
||||
let mut l0_bias = Tensor::<F>::from((0..OUT_CHANNELS).map(|_| fieldutils::i32_to_felt(0)));
|
||||
l0_bias.set_visibility(&ezkl::graph::Visibility::Private);
|
||||
|
||||
let l0_bias = l0_bias.try_into().unwrap();
|
||||
@@ -376,8 +363,8 @@ pub fn runconv() {
|
||||
let mut l2_biases = Tensor::<F>::from(myparams.biases.into_iter().map(|fl| {
|
||||
let dx = fl * 32_f32;
|
||||
let rounded = dx.round();
|
||||
let integral: IntegerRep = unsafe { rounded.to_int_unchecked() };
|
||||
fieldutils::integer_rep_to_felt(integral)
|
||||
let integral: i32 = unsafe { rounded.to_int_unchecked() };
|
||||
fieldutils::i32_to_felt(integral)
|
||||
}));
|
||||
l2_biases.set_visibility(&ezkl::graph::Visibility::Private);
|
||||
l2_biases.reshape(&[l2_biases.len(), 1]).unwrap();
|
||||
@@ -387,8 +374,8 @@ pub fn runconv() {
|
||||
let mut l2_weights = Tensor::<F>::from(myparams.weights.into_iter().flatten().map(|fl| {
|
||||
let dx = fl * 32_f32;
|
||||
let rounded = dx.round();
|
||||
let integral: IntegerRep = unsafe { rounded.to_int_unchecked() };
|
||||
fieldutils::integer_rep_to_felt(integral)
|
||||
let integral: i32 = unsafe { rounded.to_int_unchecked() };
|
||||
fieldutils::i32_to_felt(integral)
|
||||
}));
|
||||
l2_weights.set_visibility(&ezkl::graph::Visibility::Private);
|
||||
l2_weights.reshape(&[CLASSES, LEN]).unwrap();
|
||||
@@ -414,13 +401,13 @@ pub fn runconv() {
|
||||
l2_params: [l2_weights, l2_biases],
|
||||
};
|
||||
|
||||
let public_input: Tensor<IntegerRep> = vec![
|
||||
-25124, -19304, -16668, -4399, -6209, -4548, -2317, -8349, -6117, -23461,
|
||||
let public_input: Tensor<i32> = vec![
|
||||
-25124i32, -19304, -16668, -4399, -6209, -4548, -2317, -8349, -6117, -23461,
|
||||
]
|
||||
.into_iter()
|
||||
.into();
|
||||
|
||||
let pi_inner: Tensor<F> = public_input.map(integer_rep_to_felt::<F>);
|
||||
let pi_inner: Tensor<F> = public_input.map(i32_to_felt::<F>);
|
||||
|
||||
println!("MOCK PROVING");
|
||||
let now = Instant::now();
|
||||
|
||||
@@ -2,7 +2,7 @@ use ezkl::circuit::region::RegionCtx;
|
||||
use ezkl::circuit::{
|
||||
ops::lookup::LookupOp, ops::poly::PolyOp, BaseConfig as PolyConfig, CheckMode,
|
||||
};
|
||||
use ezkl::fieldutils::{integer_rep_to_felt, IntegerRep};
|
||||
use ezkl::fieldutils::i32_to_felt;
|
||||
use ezkl::tensor::*;
|
||||
use halo2_proofs::dev::MockProver;
|
||||
use halo2_proofs::{
|
||||
@@ -23,8 +23,8 @@ struct MyConfig {
|
||||
#[derive(Clone)]
|
||||
struct MyCircuit<
|
||||
const LEN: usize, //LEN = CHOUT x OH x OW flattened
|
||||
const LOOKUP_MIN: IntegerRep,
|
||||
const LOOKUP_MAX: IntegerRep,
|
||||
const LOOKUP_MIN: i64,
|
||||
const LOOKUP_MAX: i64,
|
||||
> {
|
||||
// Given the stateless MyConfig type information, a DNN trace is determined by its input and the parameters of its layers.
|
||||
// Computing the trace still requires a forward pass. The intermediate activations are stored only by the layouter.
|
||||
@@ -34,7 +34,7 @@ struct MyCircuit<
|
||||
_marker: PhantomData<F>,
|
||||
}
|
||||
|
||||
impl<const LEN: usize, const LOOKUP_MIN: IntegerRep, const LOOKUP_MAX: IntegerRep> Circuit<F>
|
||||
impl<const LEN: usize, const LOOKUP_MIN: i64, const LOOKUP_MAX: i64> Circuit<F>
|
||||
for MyCircuit<LEN, LOOKUP_MIN, LOOKUP_MAX>
|
||||
{
|
||||
type Config = MyConfig;
|
||||
@@ -53,10 +53,6 @@ impl<const LEN: usize, const LOOKUP_MIN: IntegerRep, const LOOKUP_MAX: IntegerRe
|
||||
let output = VarTensor::new_advice(cs, K, 1, LEN);
|
||||
// tells the config layer to add an affine op to the circuit gate
|
||||
|
||||
let _constant = VarTensor::constant_cols(cs, K, LEN, false);
|
||||
|
||||
println!("INPUT COL {:#?}", input);
|
||||
|
||||
let mut layer_config = PolyConfig::<F>::configure(
|
||||
cs,
|
||||
&[input.clone(), params.clone()],
|
||||
@@ -64,12 +60,17 @@ impl<const LEN: usize, const LOOKUP_MIN: IntegerRep, const LOOKUP_MAX: IntegerRe
|
||||
CheckMode::SAFE,
|
||||
);
|
||||
|
||||
// sets up a new ReLU table and resuses it for l1 and l3 non linearities
|
||||
layer_config
|
||||
.configure_range_check(cs, &input, ¶ms, (-1, 1), K)
|
||||
.unwrap();
|
||||
|
||||
layer_config
|
||||
.configure_range_check(cs, &input, ¶ms, (0, 1023), K)
|
||||
.configure_lookup(
|
||||
cs,
|
||||
&input,
|
||||
&output,
|
||||
¶ms,
|
||||
(LOOKUP_MIN, LOOKUP_MAX),
|
||||
K,
|
||||
&LookupOp::ReLU,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
// sets up a new ReLU table and resuses it for l1 and l3 non linearities
|
||||
@@ -103,21 +104,19 @@ impl<const LEN: usize, const LOOKUP_MIN: IntegerRep, const LOOKUP_MAX: IntegerRe
|
||||
) -> Result<(), Error> {
|
||||
config.layer_config.layout_tables(&mut layouter).unwrap();
|
||||
|
||||
config
|
||||
.layer_config
|
||||
.layout_range_checks(&mut layouter)
|
||||
.unwrap();
|
||||
|
||||
let x = layouter
|
||||
.assign_region(
|
||||
|| "mlp_4d",
|
||||
|region| {
|
||||
let mut region = RegionCtx::new(region, 0, 1, 1024, 2);
|
||||
let mut region = RegionCtx::new(region, 0, 1);
|
||||
let x = config
|
||||
.layer_config
|
||||
.layout(
|
||||
&mut region,
|
||||
&[&self.l0_params[0].clone().try_into().unwrap(), &self.input],
|
||||
&[
|
||||
self.l0_params[0].clone().try_into().unwrap(),
|
||||
self.input.clone(),
|
||||
],
|
||||
Box::new(PolyOp::Einsum {
|
||||
equation: "ab,bc->ac".to_string(),
|
||||
}),
|
||||
@@ -132,7 +131,7 @@ impl<const LEN: usize, const LOOKUP_MIN: IntegerRep, const LOOKUP_MAX: IntegerRe
|
||||
.layer_config
|
||||
.layout(
|
||||
&mut region,
|
||||
&[&x, &self.l0_params[1].clone().try_into().unwrap()],
|
||||
&[x, self.l0_params[1].clone().try_into().unwrap()],
|
||||
Box::new(PolyOp::Add),
|
||||
)
|
||||
.unwrap()
|
||||
@@ -142,14 +141,7 @@ impl<const LEN: usize, const LOOKUP_MIN: IntegerRep, const LOOKUP_MAX: IntegerRe
|
||||
println!("x shape: {:?}", x.dims());
|
||||
let mut x = config
|
||||
.layer_config
|
||||
.layout(
|
||||
&mut region,
|
||||
&[&x],
|
||||
Box::new(PolyOp::LeakyReLU {
|
||||
scale: 1,
|
||||
slope: 0.0.into(),
|
||||
}),
|
||||
)
|
||||
.layout(&mut region, &[x], Box::new(LookupOp::ReLU))
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
println!("3");
|
||||
@@ -160,7 +152,7 @@ impl<const LEN: usize, const LOOKUP_MIN: IntegerRep, const LOOKUP_MAX: IntegerRe
|
||||
.layer_config
|
||||
.layout(
|
||||
&mut region,
|
||||
&[&self.l2_params[0].clone().try_into().unwrap(), &x],
|
||||
&[self.l2_params[0].clone().try_into().unwrap(), x],
|
||||
Box::new(PolyOp::Einsum {
|
||||
equation: "ab,bc->ac".to_string(),
|
||||
}),
|
||||
@@ -175,7 +167,7 @@ impl<const LEN: usize, const LOOKUP_MIN: IntegerRep, const LOOKUP_MAX: IntegerRe
|
||||
.layer_config
|
||||
.layout(
|
||||
&mut region,
|
||||
&[&x, &self.l2_params[1].clone().try_into().unwrap()],
|
||||
&[x, self.l2_params[1].clone().try_into().unwrap()],
|
||||
Box::new(PolyOp::Add),
|
||||
)
|
||||
.unwrap()
|
||||
@@ -185,14 +177,7 @@ impl<const LEN: usize, const LOOKUP_MIN: IntegerRep, const LOOKUP_MAX: IntegerRe
|
||||
println!("x shape: {:?}", x.dims());
|
||||
let x = config
|
||||
.layer_config
|
||||
.layout(
|
||||
&mut region,
|
||||
&[&x],
|
||||
Box::new(PolyOp::LeakyReLU {
|
||||
scale: 1,
|
||||
slope: 0.0.into(),
|
||||
}),
|
||||
)
|
||||
.layout(&mut region, &[x], Box::new(LookupOp::ReLU))
|
||||
.unwrap();
|
||||
println!("6");
|
||||
println!("offset: {}", region.row());
|
||||
@@ -200,7 +185,7 @@ impl<const LEN: usize, const LOOKUP_MIN: IntegerRep, const LOOKUP_MAX: IntegerRe
|
||||
.layer_config
|
||||
.layout(
|
||||
&mut region,
|
||||
&[&x.unwrap()],
|
||||
&[x.unwrap()],
|
||||
Box::new(LookupOp::Div {
|
||||
denom: ezkl::circuit::utils::F32::from(128.),
|
||||
}),
|
||||
@@ -227,36 +212,36 @@ impl<const LEN: usize, const LOOKUP_MIN: IntegerRep, const LOOKUP_MAX: IntegerRe
|
||||
}
|
||||
|
||||
pub fn runmlp() {
|
||||
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
|
||||
#[cfg(not(target_arch = "wasm32"))]
|
||||
env_logger::init();
|
||||
// parameters
|
||||
let mut l0_kernel: Tensor<F> = Tensor::<IntegerRep>::new(
|
||||
let mut l0_kernel: Tensor<F> = Tensor::<i32>::new(
|
||||
Some(&[10, 0, 0, -1, 0, 10, 1, 0, 0, 1, 10, 0, 1, 0, 0, 10]),
|
||||
&[4, 4],
|
||||
)
|
||||
.unwrap()
|
||||
.map(integer_rep_to_felt);
|
||||
.map(i32_to_felt);
|
||||
l0_kernel.set_visibility(&ezkl::graph::Visibility::Private);
|
||||
|
||||
let mut l0_bias: Tensor<F> = Tensor::<IntegerRep>::new(Some(&[0, 0, 0, 1]), &[4, 1])
|
||||
let mut l0_bias: Tensor<F> = Tensor::<i32>::new(Some(&[0, 0, 0, 1]), &[4, 1])
|
||||
.unwrap()
|
||||
.map(integer_rep_to_felt);
|
||||
.map(i32_to_felt);
|
||||
l0_bias.set_visibility(&ezkl::graph::Visibility::Private);
|
||||
|
||||
let mut l2_kernel: Tensor<F> = Tensor::<IntegerRep>::new(
|
||||
let mut l2_kernel: Tensor<F> = Tensor::<i32>::new(
|
||||
Some(&[0, 3, 10, -1, 0, 10, 1, 0, 0, 1, 0, 12, 1, -2, 32, 0]),
|
||||
&[4, 4],
|
||||
)
|
||||
.unwrap()
|
||||
.map(integer_rep_to_felt);
|
||||
.map(i32_to_felt);
|
||||
l2_kernel.set_visibility(&ezkl::graph::Visibility::Private);
|
||||
// input data, with 1 padding to allow for bias
|
||||
let input: Tensor<Value<F>> = Tensor::<IntegerRep>::new(Some(&[-30, -21, 11, 40]), &[4, 1])
|
||||
let input: Tensor<Value<F>> = Tensor::<i32>::new(Some(&[-30, -21, 11, 40]), &[4, 1])
|
||||
.unwrap()
|
||||
.into();
|
||||
let mut l2_bias: Tensor<F> = Tensor::<IntegerRep>::new(Some(&[0, 0, 0, 1]), &[4, 1])
|
||||
let mut l2_bias: Tensor<F> = Tensor::<i32>::new(Some(&[0, 0, 0, 1]), &[4, 1])
|
||||
.unwrap()
|
||||
.map(integer_rep_to_felt);
|
||||
.map(i32_to_felt);
|
||||
l2_bias.set_visibility(&ezkl::graph::Visibility::Private);
|
||||
|
||||
let circuit = MyCircuit::<4, -8192, 8192> {
|
||||
@@ -266,12 +251,12 @@ pub fn runmlp() {
|
||||
_marker: PhantomData,
|
||||
};
|
||||
|
||||
let public_input: Vec<IntegerRep> = unsafe {
|
||||
let public_input: Vec<i32> = unsafe {
|
||||
vec![
|
||||
(531f32 / 128f32).round().to_int_unchecked::<IntegerRep>(),
|
||||
(103f32 / 128f32).round().to_int_unchecked::<IntegerRep>(),
|
||||
(4469f32 / 128f32).round().to_int_unchecked::<IntegerRep>(),
|
||||
(2849f32 / 128f32).to_int_unchecked::<IntegerRep>(),
|
||||
(531f32 / 128f32).round().to_int_unchecked::<i32>(),
|
||||
(103f32 / 128f32).round().to_int_unchecked::<i32>(),
|
||||
(4469f32 / 128f32).round().to_int_unchecked::<i32>(),
|
||||
(2849f32 / 128f32).to_int_unchecked::<i32>(),
|
||||
]
|
||||
};
|
||||
|
||||
@@ -280,10 +265,7 @@ pub fn runmlp() {
|
||||
let prover = MockProver::run(
|
||||
K as u32,
|
||||
&circuit,
|
||||
vec![public_input
|
||||
.iter()
|
||||
.map(|x| integer_rep_to_felt::<F>(*x))
|
||||
.collect()],
|
||||
vec![public_input.iter().map(|x| i32_to_felt::<F>(*x)).collect()],
|
||||
)
|
||||
.unwrap();
|
||||
prover.assert_satisfied();
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,13 +0,0 @@
|
||||
# download tess data
|
||||
# check if first argument has been set
|
||||
if [ ! -z "$1" ]; then
|
||||
DATA_DIR=$1
|
||||
else
|
||||
DATA_DIR=data
|
||||
fi
|
||||
|
||||
echo "Downloading data to $DATA_DIR"
|
||||
|
||||
if [ ! -d "$DATA_DIR/CATDOG" ]; then
|
||||
kaggle datasets download tongpython/cat-and-dog -p $DATA_DIR/CATDOG --unzip
|
||||
fi
|
||||
601
examples/notebooks/data_attest.ipynb
Normal file
601
examples/notebooks/data_attest.ipynb
Normal file
@@ -0,0 +1,601 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# data-attest-ezkl\n",
|
||||
"\n",
|
||||
"Here's an example leveraging EZKL whereby the inputs to the model are read and attested to from an on-chain source.\n",
|
||||
"\n",
|
||||
"In this setup:\n",
|
||||
"- the inputs and outputs are publicly known to the prover and verifier\n",
|
||||
"- the on chain inputs will be fetched and then fed directly into the circuit\n",
|
||||
"- the quantization of the on-chain inputs happens within the evm and is replicated at proving time \n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"First we import the necessary dependencies and set up logging to be as informative as possible. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# check if notebook is in colab\n",
|
||||
"try:\n",
|
||||
" # install ezkl\n",
|
||||
" import google.colab\n",
|
||||
" import subprocess\n",
|
||||
" import sys\n",
|
||||
" subprocess.check_call([sys.executable, \"-m\", \"pip\", \"install\", \"ezkl\"])\n",
|
||||
" subprocess.check_call([sys.executable, \"-m\", \"pip\", \"install\", \"onnx\"])\n",
|
||||
"\n",
|
||||
"# rely on local installation of ezkl if the notebook is not in colab\n",
|
||||
"except:\n",
|
||||
" pass\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"from torch import nn\n",
|
||||
"import ezkl\n",
|
||||
"import os\n",
|
||||
"import json\n",
|
||||
"import logging\n",
|
||||
"\n",
|
||||
"# uncomment for more descriptive logging \n",
|
||||
"FORMAT = '%(levelname)s %(name)s %(asctime)-15s %(filename)s:%(lineno)d %(message)s'\n",
|
||||
"logging.basicConfig(format=FORMAT)\n",
|
||||
"logging.getLogger().setLevel(logging.DEBUG)\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Now we define our model. It is a very simple PyTorch model that has just one layer, an average pooling 2D layer. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import torch\n",
|
||||
"# Defines the model\n",
|
||||
"\n",
|
||||
"class MyModel(nn.Module):\n",
|
||||
" def __init__(self):\n",
|
||||
" super(MyModel, self).__init__()\n",
|
||||
" self.layer = nn.AvgPool2d(2, 1, (1, 1))\n",
|
||||
"\n",
|
||||
" def forward(self, x):\n",
|
||||
" return self.layer(x)[0]\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"circuit = MyModel()\n",
|
||||
"\n",
|
||||
"# this is where you'd train your model"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We omit training for purposes of this demonstration. We've marked where training would happen in the cell above. \n",
|
||||
"Now we export the model to onnx and create a corresponding (randomly generated) input. This input data will eventually be stored on chain and read from according to the call_data field in the graph input.\n",
|
||||
"\n",
|
||||
"You can replace the random `x` with real data if you so wish. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"x = 0.1*torch.rand(1,*[3, 2, 2], requires_grad=True)\n",
|
||||
"\n",
|
||||
"# Flips the neural net into inference mode\n",
|
||||
"circuit.eval()\n",
|
||||
"\n",
|
||||
" # Export the model\n",
|
||||
"torch.onnx.export(circuit, # model being run\n",
|
||||
" x, # model input (or a tuple for multiple inputs)\n",
|
||||
" \"network.onnx\", # where to save the model (can be a file or file-like object)\n",
|
||||
" export_params=True, # store the trained parameter weights inside the model file\n",
|
||||
" opset_version=10, # the ONNX version to export the model to\n",
|
||||
" do_constant_folding=True, # whether to execute constant folding for optimization\n",
|
||||
" input_names = ['input'], # the model's input names\n",
|
||||
" output_names = ['output'], # the model's output names\n",
|
||||
" dynamic_axes={'input' : {0 : 'batch_size'}, # variable length axes\n",
|
||||
" 'output' : {0 : 'batch_size'}})\n",
|
||||
"\n",
|
||||
"data_array = ((x).detach().numpy()).reshape([-1]).tolist()\n",
|
||||
"\n",
|
||||
"data = dict(input_data = [data_array])\n",
|
||||
"\n",
|
||||
" # Serialize data into file:\n",
|
||||
"json.dump(data, open(\"input.json\", 'w' ))\n",
|
||||
"\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We now define a function that will create a new anvil instance which we will deploy our test contract too. This contract will contain in its storage the data that we will read from and attest to. In production you would not need to set up a local anvil instance. Instead you would replace RPC_URL with the actual RPC endpoint of the chain you are deploying your verifiers too, reading from the data on said chain."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import subprocess\n",
|
||||
"import time\n",
|
||||
"import threading\n",
|
||||
"\n",
|
||||
"# make sure anvil is running locally\n",
|
||||
"# $ anvil -p 3030\n",
|
||||
"\n",
|
||||
"RPC_URL = \"http://localhost:3030\"\n",
|
||||
"\n",
|
||||
"# Save process globally\n",
|
||||
"anvil_process = None\n",
|
||||
"\n",
|
||||
"def start_anvil():\n",
|
||||
" global anvil_process\n",
|
||||
" if anvil_process is None:\n",
|
||||
" anvil_process = subprocess.Popen([\"anvil\", \"-p\", \"3030\", \"--code-size-limit=41943040\"])\n",
|
||||
" if anvil_process.returncode is not None:\n",
|
||||
" raise Exception(\"failed to start anvil process\")\n",
|
||||
" time.sleep(3)\n",
|
||||
"\n",
|
||||
"def stop_anvil():\n",
|
||||
" global anvil_process\n",
|
||||
" if anvil_process is not None:\n",
|
||||
" anvil_process.terminate()\n",
|
||||
" anvil_process = None\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We define our `PyRunArgs` objects which contains the visibility parameters for out model. \n",
|
||||
"- `input_visibility` defines the visibility of the model inputs\n",
|
||||
"- `param_visibility` defines the visibility of the model weights and constants and parameters \n",
|
||||
"- `output_visibility` defines the visibility of the model outputs\n",
|
||||
"\n",
|
||||
"Here we create the following setup:\n",
|
||||
"- `input_visibility`: \"public\"\n",
|
||||
"- `param_visibility`: \"private\"\n",
|
||||
"- `output_visibility`: public\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import ezkl\n",
|
||||
"\n",
|
||||
"model_path = os.path.join('network.onnx')\n",
|
||||
"compiled_model_path = os.path.join('network.compiled')\n",
|
||||
"pk_path = os.path.join('test.pk')\n",
|
||||
"vk_path = os.path.join('test.vk')\n",
|
||||
"settings_path = os.path.join('settings.json')\n",
|
||||
"srs_path = os.path.join('kzg.srs')\n",
|
||||
"data_path = os.path.join('input.json')\n",
|
||||
"\n",
|
||||
"run_args = ezkl.PyRunArgs()\n",
|
||||
"run_args.input_visibility = \"public\"\n",
|
||||
"run_args.param_visibility = \"private\"\n",
|
||||
"run_args.output_visibility = \"public\"\n",
|
||||
"run_args.num_inner_cols = 1\n",
|
||||
"run_args.variables = [(\"batch_size\", 1)]\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Now we generate a settings file. This file basically instantiates a bunch of parameters that determine their circuit shape, size etc... Because of the way we represent nonlinearities in the circuit (using Halo2's [lookup tables](https://zcash.github.io/halo2/design/proving-system/lookup.html)), it is often best to _calibrate_ this settings file as some data can fall out of range of these lookups.\n",
|
||||
"\n",
|
||||
"You can pass a dataset for calibration that will be representative of real inputs you might find if and when you deploy the prover. Here we create a dummy calibration dataset for demonstration purposes. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!RUST_LOG=trace\n",
|
||||
"# TODO: Dictionary outputs\n",
|
||||
"res = ezkl.gen_settings(model_path, settings_path, py_run_args=run_args)\n",
|
||||
"assert res == True"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# generate a bunch of dummy calibration data\n",
|
||||
"cal_data = {\n",
|
||||
" \"input_data\": [(0.1*torch.rand(2, *[3, 2, 2])).flatten().tolist()],\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"cal_path = os.path.join('val_data.json')\n",
|
||||
"# save as json file\n",
|
||||
"with open(cal_path, \"w\") as f:\n",
|
||||
" json.dump(cal_data, f)\n",
|
||||
"\n",
|
||||
"res = await ezkl.calibrate_settings(cal_path, model_path, settings_path, \"resources\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"res = ezkl.compile_circuit(model_path, compiled_model_path, settings_path)\n",
|
||||
"assert res == True"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The graph input for on chain data sources is formatted completely differently compared to file based data sources.\n",
|
||||
"\n",
|
||||
"- For file data sources, the raw floating point values that eventually get quantized, converted into field elements and stored in `witness.json` to be consumed by the circuit are stored. The output data contains the expected floating point values returned as outputs from running your vanilla pytorch model on the given inputs.\n",
|
||||
"- For on chain data sources, the input_data field contains all the data necessary to read and format the on chain data into something digestable by EZKL (aka field elements :-D). \n",
|
||||
"Here is what the schema for an on-chain data source graph input file should look like:\n",
|
||||
" \n",
|
||||
"```json\n",
|
||||
"{\n",
|
||||
" \"input_data\": {\n",
|
||||
" \"rpc\": \"http://localhost:3030\", // The rpc endpoint of the chain you are deploying your verifier to\n",
|
||||
" \"calls\": [\n",
|
||||
" {\n",
|
||||
" \"call_data\": [\n",
|
||||
" [\n",
|
||||
" \"71e5ee5f0000000000000000000000000000000000000000000000000000000000000000\", // The abi encoded call data to a view function that returns a single on-chain data point (we only support uint256 returns for now)\n",
|
||||
" 7 // The number of decimal places of the large uint256 value. This is our way of representing large wei values as floating points on chain, since the evm only natively supports integer values.\n",
|
||||
" ],\n",
|
||||
" [\n",
|
||||
" \"71e5ee5f0000000000000000000000000000000000000000000000000000000000000001\",\n",
|
||||
" 5\n",
|
||||
" ],\n",
|
||||
" [\n",
|
||||
" \"71e5ee5f0000000000000000000000000000000000000000000000000000000000000002\",\n",
|
||||
" 5\n",
|
||||
" ]\n",
|
||||
" ],\n",
|
||||
" \"address\": \"5fbdb2315678afecb367f032d93f642f64180aa3\" // The address of the contract that we are calling to get the data. \n",
|
||||
" }\n",
|
||||
" ]\n",
|
||||
" }\n",
|
||||
"}"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"await ezkl.setup_test_evm_witness(\n",
|
||||
" data_path,\n",
|
||||
" compiled_model_path,\n",
|
||||
" # we write the call data to the same file as the input data\n",
|
||||
" data_path,\n",
|
||||
" input_source=ezkl.PyTestDataSource.OnChain,\n",
|
||||
" output_source=ezkl.PyTestDataSource.File,\n",
|
||||
" rpc_url=RPC_URL)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"As we use Halo2 with KZG-commitments we need an SRS string from (preferably) a multi-party trusted setup ceremony. For an overview of the procedures for such a ceremony check out [this page](https://blog.ethereum.org/2023/01/16/announcing-kzg-ceremony). The `get_srs` command retrieves a correctly sized SRS given the calibrated settings file from [here](https://github.com/han0110/halo2-kzg-srs). \n",
|
||||
"\n",
|
||||
"These SRS were generated with [this](https://github.com/privacy-scaling-explorations/perpetualpowersoftau) ceremony. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"res = await ezkl.get_srs( settings_path)\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We now need to generate the circuit witness. These are the model outputs (and any hashes) that are generated when feeding the previously generated `input.json` through the circuit / model. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!export RUST_BACKTRACE=1\n",
|
||||
"\n",
|
||||
"witness_path = \"witness.json\"\n",
|
||||
"\n",
|
||||
"res = await ezkl.gen_witness(data_path, compiled_model_path, witness_path)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Here we setup verifying and proving keys for the circuit. As the name suggests the proving key is needed for ... proving and the verifying key is needed for ... verifying. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# HERE WE SETUP THE CIRCUIT PARAMS\n",
|
||||
"# WE GOT KEYS\n",
|
||||
"# WE GOT CIRCUIT PARAMETERS\n",
|
||||
"# EVERYTHING ANYONE HAS EVER NEEDED FOR ZK\n",
|
||||
"res = ezkl.setup(\n",
|
||||
" compiled_model_path,\n",
|
||||
" vk_path,\n",
|
||||
" pk_path,\n",
|
||||
" \n",
|
||||
" )\n",
|
||||
"\n",
|
||||
"assert res == True\n",
|
||||
"assert os.path.isfile(vk_path)\n",
|
||||
"assert os.path.isfile(pk_path)\n",
|
||||
"assert os.path.isfile(settings_path)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Now we generate a full proof. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# GENERATE A PROOF\n",
|
||||
"\n",
|
||||
"proof_path = os.path.join('test.pf')\n",
|
||||
"\n",
|
||||
"res = ezkl.prove(\n",
|
||||
" witness_path,\n",
|
||||
" compiled_model_path,\n",
|
||||
" pk_path,\n",
|
||||
" proof_path,\n",
|
||||
" \n",
|
||||
" \"single\",\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
"print(res)\n",
|
||||
"assert os.path.isfile(proof_path)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"And verify it as a sanity check. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# VERIFY IT\n",
|
||||
"\n",
|
||||
"res = ezkl.verify(\n",
|
||||
" proof_path,\n",
|
||||
" settings_path,\n",
|
||||
" vk_path,\n",
|
||||
" \n",
|
||||
" )\n",
|
||||
"\n",
|
||||
"assert res == True\n",
|
||||
"print(\"verified\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We can now create and then deploy a vanilla evm verifier."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"abi_path = 'test.abi'\n",
|
||||
"sol_code_path = 'test.sol'\n",
|
||||
"\n",
|
||||
"res = await ezkl.create_evm_verifier(\n",
|
||||
" vk_path,\n",
|
||||
" \n",
|
||||
" settings_path,\n",
|
||||
" sol_code_path,\n",
|
||||
" abi_path,\n",
|
||||
" )\n",
|
||||
"assert res == True"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import json\n",
|
||||
"\n",
|
||||
"addr_path_verifier = \"addr_verifier.txt\"\n",
|
||||
"\n",
|
||||
"res = await ezkl.deploy_evm(\n",
|
||||
" addr_path_verifier,\n",
|
||||
" sol_code_path,\n",
|
||||
" 'http://127.0.0.1:3030'\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"assert res == True"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"With the vanilla verifier deployed, we can now create the data attestation contract, which will read in the instances from the calldata to the verifier, attest to them, call the verifier and then return the result. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"\n",
|
||||
"abi_path = 'test.abi'\n",
|
||||
"sol_code_path = 'test.sol'\n",
|
||||
"input_path = 'input.json'\n",
|
||||
"\n",
|
||||
"res = await ezkl.create_evm_data_attestation(\n",
|
||||
" input_path,\n",
|
||||
" settings_path,\n",
|
||||
" sol_code_path,\n",
|
||||
" abi_path,\n",
|
||||
" )"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Now we can deploy the data attest verifier contract. For security reasons, this binding will only deploy to a local anvil instance, using accounts generated by anvil. \n",
|
||||
"So should only be used for testing purposes."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"addr_path_da = \"addr_da.txt\"\n",
|
||||
"\n",
|
||||
"res = await ezkl.deploy_da_evm(\n",
|
||||
" addr_path_da,\n",
|
||||
" input_path,\n",
|
||||
" settings_path,\n",
|
||||
" sol_code_path,\n",
|
||||
" RPC_URL,\n",
|
||||
" )\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Call the view only verify method on the contract to verify the proof. Since it is a view function this is safe to use in production since you don't have to pass your private key."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# read the verifier address\n",
|
||||
"addr_verifier = None\n",
|
||||
"with open(addr_path_verifier, 'r') as f:\n",
|
||||
" addr = f.read()\n",
|
||||
"#read the data attestation address\n",
|
||||
"addr_da = None\n",
|
||||
"with open(addr_path_da, 'r') as f:\n",
|
||||
" addr_da = f.read()\n",
|
||||
"\n",
|
||||
"res = await ezkl.verify_evm(\n",
|
||||
" addr,\n",
|
||||
" proof_path,\n",
|
||||
" RPC_URL,\n",
|
||||
" addr_da,\n",
|
||||
")"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "ezkl",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.12.2"
|
||||
},
|
||||
"orig_nbformat": 4
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
657
examples/notebooks/data_attest_hashed.ipynb
Normal file
657
examples/notebooks/data_attest_hashed.ipynb
Normal file
@@ -0,0 +1,657 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# data-attest-ezkl hashed\n",
|
||||
"\n",
|
||||
"Here's an example leveraging EZKL whereby the hashes of the outputs to the model are read and attested to from an on-chain source.\n",
|
||||
"\n",
|
||||
"In this setup:\n",
|
||||
"- the hashes of outputs are publicly known to the prover and verifier\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"First we import the necessary dependencies and set up logging to be as informative as possible. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# check if notebook is in colab\n",
|
||||
"try:\n",
|
||||
" # install ezkl\n",
|
||||
" import google.colab\n",
|
||||
" import subprocess\n",
|
||||
" import sys\n",
|
||||
" subprocess.check_call([sys.executable, \"-m\", \"pip\", \"install\", \"ezkl\"])\n",
|
||||
" subprocess.check_call([sys.executable, \"-m\", \"pip\", \"install\", \"onnx\"])\n",
|
||||
"\n",
|
||||
"# rely on local installation of ezkl if the notebook is not in colab\n",
|
||||
"except:\n",
|
||||
" pass\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"from torch import nn\n",
|
||||
"import ezkl\n",
|
||||
"import os\n",
|
||||
"import json\n",
|
||||
"import logging\n",
|
||||
"\n",
|
||||
"# uncomment for more descriptive logging \n",
|
||||
"# FORMAT = '%(levelname)s %(name)s %(asctime)-15s %(filename)s:%(lineno)d %(message)s'\n",
|
||||
"# logging.basicConfig(format=FORMAT)\n",
|
||||
"# logging.getLogger().setLevel(logging.DEBUG)\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Now we define our model. It is a very simple PyTorch model that has just one layer, an average pooling 2D layer. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import torch\n",
|
||||
"# Defines the model\n",
|
||||
"\n",
|
||||
"class MyModel(nn.Module):\n",
|
||||
" def __init__(self):\n",
|
||||
" super(MyModel, self).__init__()\n",
|
||||
" self.layer = nn.AvgPool2d(2, 1, (1, 1))\n",
|
||||
"\n",
|
||||
" def forward(self, x):\n",
|
||||
" return self.layer(x)[0]\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"circuit = MyModel()\n",
|
||||
"\n",
|
||||
"# this is where you'd train your model\n",
|
||||
"\n",
|
||||
"\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We omit training for purposes of this demonstration. We've marked where training would happen in the cell above. \n",
|
||||
"Now we export the model to onnx and create a corresponding (randomly generated) input. This input data will eventually be stored on chain and read from according to the call_data field in the graph input.\n",
|
||||
"\n",
|
||||
"You can replace the random `x` with real data if you so wish. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"x = 0.1*torch.rand(1,*[3, 2, 2], requires_grad=True)\n",
|
||||
"\n",
|
||||
"# Flips the neural net into inference mode\n",
|
||||
"circuit.eval()\n",
|
||||
"\n",
|
||||
" # Export the model\n",
|
||||
"torch.onnx.export(circuit, # model being run\n",
|
||||
" x, # model input (or a tuple for multiple inputs)\n",
|
||||
" \"network.onnx\", # where to save the model (can be a file or file-like object)\n",
|
||||
" export_params=True, # store the trained parameter weights inside the model file\n",
|
||||
" opset_version=10, # the ONNX version to export the model to\n",
|
||||
" do_constant_folding=True, # whether to execute constant folding for optimization\n",
|
||||
" input_names = ['input'], # the model's input names\n",
|
||||
" output_names = ['output'], # the model's output names\n",
|
||||
" dynamic_axes={'input' : {0 : 'batch_size'}, # variable length axes\n",
|
||||
" 'output' : {0 : 'batch_size'}})\n",
|
||||
"\n",
|
||||
"data_array = ((x).detach().numpy()).reshape([-1]).tolist()\n",
|
||||
"\n",
|
||||
"data = dict(input_data = [data_array])\n",
|
||||
"\n",
|
||||
" # Serialize data into file:\n",
|
||||
"json.dump(data, open(\"input.json\", 'w' ))\n",
|
||||
"\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We now define a function that will create a new anvil instance which we will deploy our test contract too. This contract will contain in its storage the data that we will read from and attest to. In production you would not need to set up a local anvil instance. Instead you would replace RPC_URL with the actual RPC endpoint of the chain you are deploying your verifiers too, reading from the data on said chain."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import subprocess\n",
|
||||
"import time\n",
|
||||
"import threading\n",
|
||||
"\n",
|
||||
"# make sure anvil is running locally\n",
|
||||
"# $ anvil -p 3030\n",
|
||||
"\n",
|
||||
"RPC_URL = \"http://localhost:3030\"\n",
|
||||
"\n",
|
||||
"# Save process globally\n",
|
||||
"anvil_process = None\n",
|
||||
"\n",
|
||||
"def start_anvil():\n",
|
||||
" global anvil_process\n",
|
||||
" if anvil_process is None:\n",
|
||||
" anvil_process = subprocess.Popen([\"anvil\", \"-p\", \"3030\", \"--code-size-limit=41943040\"])\n",
|
||||
" if anvil_process.returncode is not None:\n",
|
||||
" raise Exception(\"failed to start anvil process\")\n",
|
||||
" time.sleep(3)\n",
|
||||
"\n",
|
||||
"def stop_anvil():\n",
|
||||
" global anvil_process\n",
|
||||
" if anvil_process is not None:\n",
|
||||
" anvil_process.terminate()\n",
|
||||
" anvil_process = None\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We define our `PyRunArgs` objects which contains the visibility parameters for out model. \n",
|
||||
"- `input_visibility` defines the visibility of the model inputs\n",
|
||||
"- `param_visibility` defines the visibility of the model weights and constants and parameters \n",
|
||||
"- `output_visibility` defines the visibility of the model outputs\n",
|
||||
"\n",
|
||||
"Here we create the following setup:\n",
|
||||
"- `input_visibility`: \"private\"\n",
|
||||
"- `param_visibility`: \"private\"\n",
|
||||
"- `output_visibility`: hashed\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import ezkl\n",
|
||||
"\n",
|
||||
"model_path = os.path.join('network.onnx')\n",
|
||||
"compiled_model_path = os.path.join('network.compiled')\n",
|
||||
"pk_path = os.path.join('test.pk')\n",
|
||||
"vk_path = os.path.join('test.vk')\n",
|
||||
"settings_path = os.path.join('settings.json')\n",
|
||||
"srs_path = os.path.join('kzg.srs')\n",
|
||||
"data_path = os.path.join('input.json')\n",
|
||||
"\n",
|
||||
"run_args = ezkl.PyRunArgs()\n",
|
||||
"run_args.input_visibility = \"private\"\n",
|
||||
"run_args.param_visibility = \"private\"\n",
|
||||
"run_args.output_visibility = \"hashed\"\n",
|
||||
"run_args.variables = [(\"batch_size\", 1)]\n",
|
||||
"\n",
|
||||
"\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Now we generate a settings file. This file basically instantiates a bunch of parameters that determine their circuit shape, size etc... Because of the way we represent nonlinearities in the circuit (using Halo2's [lookup tables](https://zcash.github.io/halo2/design/proving-system/lookup.html)), it is often best to _calibrate_ this settings file as some data can fall out of range of these lookups.\n",
|
||||
"\n",
|
||||
"You can pass a dataset for calibration that will be representative of real inputs you might find if and when you deploy the prover. Here we create a dummy calibration dataset for demonstration purposes. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!RUST_LOG=trace\n",
|
||||
"# TODO: Dictionary outputs\n",
|
||||
"res = ezkl.gen_settings(model_path, settings_path, py_run_args=run_args)\n",
|
||||
"assert res == True"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# generate a bunch of dummy calibration data\n",
|
||||
"cal_data = {\n",
|
||||
" \"input_data\": [(0.1*torch.rand(2, *[3, 2, 2])).flatten().tolist()],\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"cal_path = os.path.join('val_data.json')\n",
|
||||
"# save as json file\n",
|
||||
"with open(cal_path, \"w\") as f:\n",
|
||||
" json.dump(cal_data, f)\n",
|
||||
"\n",
|
||||
"res = await ezkl.calibrate_settings(cal_path, model_path, settings_path, \"resources\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"res = ezkl.compile_circuit(model_path, compiled_model_path, settings_path)\n",
|
||||
"assert res == True"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"As we use Halo2 with KZG-commitments we need an SRS string from (preferably) a multi-party trusted setup ceremony. For an overview of the procedures for such a ceremony check out [this page](https://blog.ethereum.org/2023/01/16/announcing-kzg-ceremony). The `get_srs` command retrieves a correctly sized SRS given the calibrated settings file from [here](https://github.com/han0110/halo2-kzg-srs). \n",
|
||||
"\n",
|
||||
"These SRS were generated with [this](https://github.com/privacy-scaling-explorations/perpetualpowersoftau) ceremony. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"res = await ezkl.get_srs( settings_path)\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We now need to generate the circuit witness. These are the model outputs (and any hashes) that are generated when feeding the previously generated `input.json` through the circuit / model. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!export RUST_BACKTRACE=1\n",
|
||||
"\n",
|
||||
"witness_path = \"witness.json\"\n",
|
||||
"\n",
|
||||
"res = await ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(ezkl.felt_to_big_endian(res['processed_outputs']['poseidon_hash'][0]))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We now post the hashes of the outputs to the chain. This is the data that will be read from and attested to."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from web3 import Web3, HTTPProvider\n",
|
||||
"from solcx import compile_standard\n",
|
||||
"from decimal import Decimal\n",
|
||||
"import json\n",
|
||||
"import os\n",
|
||||
"import torch\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# setup web3 instance\n",
|
||||
"w3 = Web3(HTTPProvider(RPC_URL))\n",
|
||||
"\n",
|
||||
"def test_on_chain_data(res):\n",
|
||||
" # Step 0: Convert the tensor to a flat list\n",
|
||||
" data = [int(ezkl.felt_to_big_endian(res['processed_outputs']['poseidon_hash'][0]), 0)]\n",
|
||||
"\n",
|
||||
" # Step 1: Prepare the data\n",
|
||||
" # Step 2: Prepare and compile the contract.\n",
|
||||
" # We are using a test contract here but in production you would\n",
|
||||
" # use whatever contract you are fetching data from.\n",
|
||||
" contract_source_code = '''\n",
|
||||
" // SPDX-License-Identifier: UNLICENSED\n",
|
||||
" pragma solidity ^0.8.17;\n",
|
||||
"\n",
|
||||
" contract TestReads {\n",
|
||||
"\n",
|
||||
" uint[] public arr;\n",
|
||||
" constructor(uint256[] memory _numbers) {\n",
|
||||
" for(uint256 i = 0; i < _numbers.length; i++) {\n",
|
||||
" arr.push(_numbers[i]);\n",
|
||||
" }\n",
|
||||
" }\n",
|
||||
" }\n",
|
||||
" '''\n",
|
||||
"\n",
|
||||
" compiled_sol = compile_standard({\n",
|
||||
" \"language\": \"Solidity\",\n",
|
||||
" \"sources\": {\"testreads.sol\": {\"content\": contract_source_code}},\n",
|
||||
" \"settings\": {\"outputSelection\": {\"*\": {\"*\": [\"metadata\", \"evm.bytecode\", \"abi\"]}}}\n",
|
||||
" })\n",
|
||||
"\n",
|
||||
" # Get bytecode\n",
|
||||
" bytecode = compiled_sol['contracts']['testreads.sol']['TestReads']['evm']['bytecode']['object']\n",
|
||||
"\n",
|
||||
" # Get ABI\n",
|
||||
" # In production if you are reading from really large contracts you can just use\n",
|
||||
" # a stripped down version of the ABI of the contract you are calling, containing only the view functions you will fetch data from.\n",
|
||||
" abi = json.loads(compiled_sol['contracts']['testreads.sol']['TestReads']['metadata'])['output']['abi']\n",
|
||||
"\n",
|
||||
" # Step 3: Deploy the contract\n",
|
||||
" TestReads = w3.eth.contract(abi=abi, bytecode=bytecode)\n",
|
||||
" tx_hash = TestReads.constructor(data).transact()\n",
|
||||
" tx_receipt = w3.eth.wait_for_transaction_receipt(tx_hash)\n",
|
||||
" # If you are deploying to production you can skip the 3 lines of code above and just instantiate the contract like this,\n",
|
||||
" # passing the address and abi of the contract you are fetching data from.\n",
|
||||
" contract = w3.eth.contract(address=tx_receipt['contractAddress'], abi=abi)\n",
|
||||
"\n",
|
||||
" # Step 4: Interact with the contract\n",
|
||||
" calldata = []\n",
|
||||
" for i, _ in enumerate(data):\n",
|
||||
" call = contract.functions.arr(i).build_transaction()\n",
|
||||
" calldata.append((call['data'][2:], 0))\n",
|
||||
"\n",
|
||||
" # Prepare the calls_to_account object\n",
|
||||
" # If you were calling view functions across multiple contracts,\n",
|
||||
" # you would have multiple entries in the calls_to_account array,\n",
|
||||
" # one for each contract.\n",
|
||||
" calls_to_account = [{\n",
|
||||
" 'call_data': calldata,\n",
|
||||
" 'address': contract.address[2:], # remove the '0x' prefix\n",
|
||||
" }]\n",
|
||||
"\n",
|
||||
" print(f'calls_to_account: {calls_to_account}')\n",
|
||||
"\n",
|
||||
" return calls_to_account\n",
|
||||
"\n",
|
||||
"# Now let's start the Anvil process. You don't need to do this if you are deploying to a non-local chain.\n",
|
||||
"start_anvil()\n",
|
||||
"\n",
|
||||
"# Now let's call our function, passing in the same input tensor we used to export the model 2 cells above.\n",
|
||||
"calls_to_account = test_on_chain_data(res)\n",
|
||||
"\n",
|
||||
"data = dict(input_data = [data_array], output_data = {'rpc': RPC_URL, 'calls': calls_to_account })\n",
|
||||
"\n",
|
||||
"# Serialize on-chain data into file:\n",
|
||||
"json.dump(data, open(\"input.json\", 'w'))\n",
|
||||
"\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Here we setup verifying and proving keys for the circuit. As the name suggests the proving key is needed for ... proving and the verifying key is needed for ... verifying. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# HERE WE SETUP THE CIRCUIT PARAMS\n",
|
||||
"# WE GOT KEYS\n",
|
||||
"# WE GOT CIRCUIT PARAMETERS\n",
|
||||
"# EVERYTHING ANYONE HAS EVER NEEDED FOR ZK\n",
|
||||
"res = ezkl.setup(\n",
|
||||
" compiled_model_path,\n",
|
||||
" vk_path,\n",
|
||||
" pk_path,\n",
|
||||
" \n",
|
||||
" )\n",
|
||||
"\n",
|
||||
"assert res == True\n",
|
||||
"assert os.path.isfile(vk_path)\n",
|
||||
"assert os.path.isfile(pk_path)\n",
|
||||
"assert os.path.isfile(settings_path)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Now we generate a full proof. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# GENERATE A PROOF\n",
|
||||
"\n",
|
||||
"proof_path = os.path.join('test.pf')\n",
|
||||
"\n",
|
||||
"res = ezkl.prove(\n",
|
||||
" witness_path,\n",
|
||||
" compiled_model_path,\n",
|
||||
" pk_path,\n",
|
||||
" proof_path,\n",
|
||||
" \n",
|
||||
" \"single\",\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
"print(res)\n",
|
||||
"assert os.path.isfile(proof_path)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"And verify it as a sanity check. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# VERIFY IT\n",
|
||||
"\n",
|
||||
"res = ezkl.verify(\n",
|
||||
" proof_path,\n",
|
||||
" settings_path,\n",
|
||||
" vk_path,\n",
|
||||
" \n",
|
||||
" )\n",
|
||||
"\n",
|
||||
"assert res == True\n",
|
||||
"print(\"verified\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We can now create and then deploy a vanilla evm verifier."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"abi_path = 'test.abi'\n",
|
||||
"sol_code_path = 'test.sol'\n",
|
||||
"\n",
|
||||
"res = await ezkl.create_evm_verifier(\n",
|
||||
" vk_path,\n",
|
||||
" \n",
|
||||
" settings_path,\n",
|
||||
" sol_code_path,\n",
|
||||
" abi_path,\n",
|
||||
" )\n",
|
||||
"assert res == True"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import json\n",
|
||||
"\n",
|
||||
"addr_path_verifier = \"addr_verifier.txt\"\n",
|
||||
"\n",
|
||||
"res = await ezkl.deploy_evm(\n",
|
||||
" addr_path_verifier,\n",
|
||||
" sol_code_path,\n",
|
||||
" 'http://127.0.0.1:3030'\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"assert res == True"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"With the vanilla verifier deployed, we can now create the data attestation contract, which will read in the instances from the calldata to the verifier, attest to them, call the verifier and then return the result. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"\n",
|
||||
"abi_path = 'test.abi'\n",
|
||||
"sol_code_path = 'test.sol'\n",
|
||||
"input_path = 'input.json'\n",
|
||||
"\n",
|
||||
"res = await ezkl.create_evm_data_attestation(\n",
|
||||
" input_path,\n",
|
||||
" settings_path,\n",
|
||||
" sol_code_path,\n",
|
||||
" abi_path,\n",
|
||||
" )"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Now we can deploy the data attest verifier contract. For security reasons, this binding will only deploy to a local anvil instance, using accounts generated by anvil. \n",
|
||||
"So should only be used for testing purposes."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"addr_path_da = \"addr_da.txt\"\n",
|
||||
"\n",
|
||||
"res = await ezkl.deploy_da_evm(\n",
|
||||
" addr_path_da,\n",
|
||||
" input_path,\n",
|
||||
" settings_path,\n",
|
||||
" sol_code_path,\n",
|
||||
" RPC_URL,\n",
|
||||
" )\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Call the view only verify method on the contract to verify the proof. Since it is a view function this is safe to use in production since you don't have to pass your private key."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# read the verifier address\n",
|
||||
"addr_verifier = None\n",
|
||||
"with open(addr_path_verifier, 'r') as f:\n",
|
||||
" addr = f.read()\n",
|
||||
"#read the data attestation address\n",
|
||||
"addr_da = None\n",
|
||||
"with open(addr_path_da, 'r') as f:\n",
|
||||
" addr_da = f.read()\n",
|
||||
"\n",
|
||||
"res = await ezkl.verify_evm(\n",
|
||||
" addr,\n",
|
||||
" proof_path,\n",
|
||||
" RPC_URL,\n",
|
||||
" addr_da,\n",
|
||||
")"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "ezkl",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.15"
|
||||
},
|
||||
"orig_nbformat": 4
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
@@ -150,7 +150,7 @@
|
||||
"res = ezkl.gen_settings(model_path, settings_path)\n",
|
||||
"assert res == True\n",
|
||||
"\n",
|
||||
"res = ezkl.calibrate_settings(data_path, model_path, settings_path, \"resources\")\n",
|
||||
"res = await ezkl.calibrate_settings(data_path, model_path, settings_path, \"resources\")\n",
|
||||
"assert res == True"
|
||||
]
|
||||
},
|
||||
@@ -170,7 +170,7 @@
|
||||
"with open(cal_path, \"w\") as f:\n",
|
||||
" json.dump(cal_data, f)\n",
|
||||
"\n",
|
||||
"res = ezkl.calibrate_settings(cal_path, model_path, settings_path, \"resources\")"
|
||||
"res = await ezkl.calibrate_settings(cal_path, model_path, settings_path, \"resources\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -204,7 +204,7 @@
|
||||
"source": [
|
||||
"# now generate the witness file \n",
|
||||
"\n",
|
||||
"res = ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"res = await ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"assert os.path.isfile(witness_path)"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -437,7 +437,7 @@
|
||||
"\n",
|
||||
"# Optimize for resources, we cap logrows at 12 to reduce setup and proving time, at the expense of accuracy\n",
|
||||
"# You may want to increase the max logrows if accuracy is a concern\n",
|
||||
"res = ezkl.calibrate_settings(target = \"resources\", max_logrows = 12, scales = [2])"
|
||||
"res = await ezkl.calibrate_settings(target = \"resources\", max_logrows = 12, scales = [2])"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -526,7 +526,7 @@
|
||||
"# now generate the witness file\n",
|
||||
"witness_path = os.path.join('witness.json')\n",
|
||||
"\n",
|
||||
"res = ezkl.gen_witness()\n",
|
||||
"res = await ezkl.gen_witness()\n",
|
||||
"assert os.path.isfile(witness_path)"
|
||||
]
|
||||
},
|
||||
@@ -736,4 +736,4 @@
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 0
|
||||
}
|
||||
}
|
||||
|
||||
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
@@ -467,7 +467,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"\n",
|
||||
"res = ezkl.calibrate_settings(data_path, model_path, settings_path, \"resources\")\n",
|
||||
"res = await ezkl.calibrate_settings(data_path, model_path, settings_path, \"resources\")\n",
|
||||
"assert res == True"
|
||||
]
|
||||
},
|
||||
@@ -508,7 +508,7 @@
|
||||
"source": [
|
||||
"# now generate the witness file\n",
|
||||
"\n",
|
||||
"res = ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"res = await ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"assert os.path.isfile(witness_path)"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -77,7 +77,6 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"gip_run_args = ezkl.PyRunArgs()\n",
|
||||
"gip_run_args.ignore_range_check_inputs_outputs = True\n",
|
||||
"gip_run_args.input_visibility = \"polycommit\" # matrix and generalized inverse commitments\n",
|
||||
"gip_run_args.output_visibility = \"fixed\" # no parameters used\n",
|
||||
"gip_run_args.param_visibility = \"fixed\" # should be Tensor(True)"
|
||||
@@ -196,7 +195,7 @@
|
||||
"json.dump(data, open(cal_path, 'w'))\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"res = ezkl.calibrate_settings(data_path, model_path, settings_path, \"resources\")\n",
|
||||
"res = await ezkl.calibrate_settings(data_path, model_path, settings_path, \"resources\")\n",
|
||||
"assert res == True\n"
|
||||
]
|
||||
},
|
||||
@@ -237,7 +236,7 @@
|
||||
"source": [
|
||||
"# now generate the witness file\n",
|
||||
"\n",
|
||||
"res = ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"res = await ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"assert os.path.isfile(witness_path)"
|
||||
]
|
||||
},
|
||||
@@ -336,7 +335,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.13"
|
||||
"version": "3.9.15"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -179,7 +179,7 @@
|
||||
"json.dump(data, open(cal_path, 'w'))\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"res = ezkl.calibrate_settings(data_path, model_path, settings_path, \"resources\")\n",
|
||||
"res = await ezkl.calibrate_settings(data_path, model_path, settings_path, \"resources\")\n",
|
||||
"assert res == True\n"
|
||||
]
|
||||
},
|
||||
@@ -214,7 +214,7 @@
|
||||
"source": [
|
||||
"# now generate the witness file \n",
|
||||
"\n",
|
||||
"res = ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"res = await ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"assert os.path.isfile(witness_path)"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -241,7 +241,7 @@
|
||||
"with open(cal_path, \"w\") as f:\n",
|
||||
" json.dump(cal_data, f)\n",
|
||||
"\n",
|
||||
"res = ezkl.calibrate_settings(cal_path, model_path, settings_path, \"resources\")"
|
||||
"res = await ezkl.calibrate_settings(cal_path, model_path, settings_path, \"resources\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -291,7 +291,7 @@
|
||||
"\n",
|
||||
"witness_path = \"witness.json\"\n",
|
||||
"\n",
|
||||
"res = ezkl.gen_witness(data_path, compiled_model_path, witness_path)"
|
||||
"res = await ezkl.gen_witness(data_path, compiled_model_path, witness_path)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -453,8 +453,8 @@
|
||||
"\n",
|
||||
"res = await ezkl.deploy_evm(\n",
|
||||
" address_path,\n",
|
||||
" 'http://127.0.0.1:3030',\n",
|
||||
" sol_code_path,\n",
|
||||
" 'http://127.0.0.1:3030'\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"assert res == True\n",
|
||||
@@ -474,8 +474,8 @@
|
||||
"\n",
|
||||
"res = await ezkl.verify_evm(\n",
|
||||
" addr,\n",
|
||||
" \"http://127.0.0.1:3030\",\n",
|
||||
" proof_path,\n",
|
||||
" \"http://127.0.0.1:3030\"\n",
|
||||
")\n",
|
||||
"assert res == True"
|
||||
]
|
||||
|
||||
@@ -152,7 +152,7 @@
|
||||
"json.dump(data, open(cal_path, 'w'))\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"res = ezkl.calibrate_settings(data_path, model_path, settings_path, \"resources\")\n",
|
||||
"res = await ezkl.calibrate_settings(data_path, model_path, settings_path, \"resources\")\n",
|
||||
"assert res == True\n"
|
||||
]
|
||||
},
|
||||
@@ -188,7 +188,7 @@
|
||||
"# now generate the witness file \n",
|
||||
"witness_path = \"witness.json\"\n",
|
||||
"\n",
|
||||
"res = ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"res = await ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"assert os.path.isfile(witness_path)"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -155,7 +155,7 @@
|
||||
"json.dump(data, open(cal_path, 'w'))\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"res = ezkl.calibrate_settings(data_path, model_path, settings_path, \"resources\")\n",
|
||||
"res = await ezkl.calibrate_settings(data_path, model_path, settings_path, \"resources\")\n",
|
||||
"assert res == True\n"
|
||||
]
|
||||
},
|
||||
@@ -190,7 +190,7 @@
|
||||
"source": [
|
||||
"# now generate the witness file \n",
|
||||
"\n",
|
||||
"res = ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"res = await ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"assert os.path.isfile(witness_path)"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -233,7 +233,7 @@
|
||||
"with open(cal_path, \"w\") as f:\n",
|
||||
" json.dump(cal_data, f)\n",
|
||||
"\n",
|
||||
"res = ezkl.calibrate_settings(cal_path, model_path, settings_path, \"resources\")"
|
||||
"res = await ezkl.calibrate_settings(cal_path, model_path, settings_path, \"resources\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -315,7 +315,7 @@
|
||||
"\n",
|
||||
"witness_path = \"witness.json\"\n",
|
||||
"\n",
|
||||
"res = ezkl.gen_witness(data_path, compiled_model_path, witness_path, vk_path)\n"
|
||||
"res = await ezkl.gen_witness(data_path, compiled_model_path, witness_path, vk_path)\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -462,8 +462,8 @@
|
||||
"\n",
|
||||
"res = await ezkl.deploy_evm(\n",
|
||||
" address_path,\n",
|
||||
" 'http://127.0.0.1:3030',\n",
|
||||
" sol_code_path,\n",
|
||||
" 'http://127.0.0.1:3030'\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"assert res == True\n",
|
||||
@@ -483,8 +483,8 @@
|
||||
"\n",
|
||||
"res = await ezkl.verify_evm(\n",
|
||||
" addr,\n",
|
||||
" \"http://127.0.0.1:3030\",\n",
|
||||
" proof_path,\n",
|
||||
" \"http://127.0.0.1:3030\"\n",
|
||||
")\n",
|
||||
"assert res == True"
|
||||
]
|
||||
|
||||
@@ -193,7 +193,7 @@
|
||||
"json.dump(data, open(cal_path, 'w'))\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"res = ezkl.calibrate_settings(data_path, model_path, settings_path, \"resources\")\n",
|
||||
"res = await ezkl.calibrate_settings(data_path, model_path, settings_path, \"resources\")\n",
|
||||
"assert res == True\n"
|
||||
]
|
||||
},
|
||||
@@ -228,7 +228,7 @@
|
||||
"source": [
|
||||
"# now generate the witness file \n",
|
||||
"\n",
|
||||
"res = ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"res = await ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"assert os.path.isfile(witness_path)"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -111,12 +111,7 @@
|
||||
" output_data=[((o).detach().numpy()).reshape([-1]).tolist() for o in torch_out])\n",
|
||||
"\n",
|
||||
"# Serialize data into file:\n",
|
||||
"json.dump(data, open(\"input.json\", 'w'))\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# note that you can also call the following function to generate random data for the model\n",
|
||||
"# it is functionally equivalent to the code above\n",
|
||||
"ezkl.gen_random_data()\n"
|
||||
"json.dump(data, open(\"input.json\", 'w'))\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -147,7 +142,7 @@
|
||||
"# Serialize data into file:\n",
|
||||
"json.dump(data, open(cal_path, 'w'))\n",
|
||||
"\n",
|
||||
"res = ezkl.calibrate_settings(data_path, model_path, settings_path, \"resources\")\n",
|
||||
"res = await ezkl.calibrate_settings(data_path, model_path, settings_path, \"resources\")\n",
|
||||
"assert res == True\n"
|
||||
]
|
||||
},
|
||||
@@ -182,7 +177,7 @@
|
||||
"source": [
|
||||
"# now generate the witness file \n",
|
||||
"\n",
|
||||
"res = ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"res = await ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"assert os.path.isfile(witness_path)"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -347,7 +347,7 @@
|
||||
"# Serialize data into file:\n",
|
||||
"json.dump(data, open(cal_path, 'w'))\n",
|
||||
"\n",
|
||||
"res = ezkl.calibrate_settings(data_path, model_path, settings_path, \"resources\")\n",
|
||||
"res = await ezkl.calibrate_settings(data_path, model_path, settings_path, \"resources\")\n",
|
||||
"assert res == True\n"
|
||||
]
|
||||
},
|
||||
@@ -383,7 +383,7 @@
|
||||
"# now generate the witness file \n",
|
||||
"witness_path = \"gan_witness.json\"\n",
|
||||
"\n",
|
||||
"res = ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"res = await ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"assert os.path.isfile(witness_path)"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -142,7 +142,7 @@
|
||||
"# Serialize data into file:\n",
|
||||
"json.dump(data, open(cal_path, 'w'))\n",
|
||||
"\n",
|
||||
"res = ezkl.calibrate_settings(data_path, model_path, settings_path, \"resources\")\n",
|
||||
"res = await ezkl.calibrate_settings(data_path, model_path, settings_path, \"resources\")\n",
|
||||
"assert res == True\n"
|
||||
]
|
||||
},
|
||||
@@ -177,7 +177,7 @@
|
||||
"source": [
|
||||
"# now generate the witness file \n",
|
||||
"\n",
|
||||
"res = ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"res = await ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"assert os.path.isfile(witness_path)"
|
||||
]
|
||||
},
|
||||
@@ -271,9 +271,9 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.12.7"
|
||||
"version": "3.12.2"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
}
|
||||
|
||||
@@ -139,7 +139,7 @@
|
||||
"res = ezkl.gen_settings(model_path, settings_path, py_run_args=run_args)\n",
|
||||
"assert res == True\n",
|
||||
"\n",
|
||||
"res = ezkl.calibrate_settings(data_path, model_path, settings_path, \"resources\")\n",
|
||||
"res = await ezkl.calibrate_settings(data_path, model_path, settings_path, \"resources\")\n",
|
||||
"assert res == True\n"
|
||||
]
|
||||
},
|
||||
@@ -193,7 +193,7 @@
|
||||
"# now generate the witness file \n",
|
||||
"witness_path = \"lstmwitness.json\"\n",
|
||||
"\n",
|
||||
"res = ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"res = await ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"assert os.path.isfile(witness_path)"
|
||||
]
|
||||
},
|
||||
|
||||
456
examples/notebooks/mean_postgres.ipynb
Normal file
456
examples/notebooks/mean_postgres.ipynb
Normal file
@@ -0,0 +1,456 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Mean of ERC20 transfer amounts\n",
|
||||
"\n",
|
||||
"This notebook shows how to calculate the mean of ERC20 transfer amounts, pulling data in from a Postgres database. First we install and get the necessary libraries running. \n",
|
||||
"The first of which is [shovel](https://indexsupply.com/shovel/docs/#getting-started), which is a library that allows us to pull data from the Ethereum blockchain into a Postgres database.\n",
|
||||
"\n",
|
||||
"Make sure you install postgres if needed https://indexsupply.com/shovel/docs/#getting-started. \n",
|
||||
"\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"import getpass\n",
|
||||
"import json\n",
|
||||
"import time\n",
|
||||
"import subprocess\n",
|
||||
"\n",
|
||||
"# swap out for the relevant linux/amd64, darwin/arm64, darwin/amd64, windows/amd64\n",
|
||||
"os.system(\"curl -LO https://indexsupply.net/bin/1.0/linux/amd64/shovel\")\n",
|
||||
"os.system(\"chmod +x shovel\")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"os.environ[\"PG_URL\"] = \"postgres://\" + getpass.getuser() + \":@localhost:5432/shovel\"\n",
|
||||
"\n",
|
||||
"# create a config.json file with the following contents\n",
|
||||
"config = {\n",
|
||||
" \"pg_url\": \"$PG_URL\",\n",
|
||||
" \"eth_sources\": [\n",
|
||||
" {\"name\": \"mainnet\", \"chain_id\": 1, \"url\": \"https://ethereum-rpc.publicnode.com\"},\n",
|
||||
" {\"name\": \"base\", \"chain_id\": 8453, \"url\": \"https://base-rpc.publicnode.com\"}\n",
|
||||
" ],\n",
|
||||
" \"integrations\": [{\n",
|
||||
" \"name\": \"usdc_transfer\",\n",
|
||||
" \"enabled\": True,\n",
|
||||
" \"sources\": [{\"name\": \"mainnet\"}, {\"name\": \"base\"}],\n",
|
||||
" \"table\": {\n",
|
||||
" \"name\": \"usdc\",\n",
|
||||
" \"columns\": [\n",
|
||||
" {\"name\": \"log_addr\", \"type\": \"bytea\"},\n",
|
||||
" {\"name\": \"block_num\", \"type\": \"numeric\"},\n",
|
||||
" {\"name\": \"f\", \"type\": \"bytea\"},\n",
|
||||
" {\"name\": \"t\", \"type\": \"bytea\"},\n",
|
||||
" {\"name\": \"v\", \"type\": \"numeric\"}\n",
|
||||
" ]\n",
|
||||
" },\n",
|
||||
" \"block\": [\n",
|
||||
" {\"name\": \"block_num\", \"column\": \"block_num\"},\n",
|
||||
" {\n",
|
||||
" \"name\": \"log_addr\",\n",
|
||||
" \"column\": \"log_addr\",\n",
|
||||
" \"filter_op\": \"contains\",\n",
|
||||
" \"filter_arg\": [\n",
|
||||
" \"a0b86991c6218b36c1d19d4a2e9eb0ce3606eb48\",\n",
|
||||
" \"833589fCD6eDb6E08f4c7C32D4f71b54bdA02913\"\n",
|
||||
" ]\n",
|
||||
" }\n",
|
||||
" ],\n",
|
||||
" \"event\": {\n",
|
||||
" \"name\": \"Transfer\",\n",
|
||||
" \"type\": \"event\",\n",
|
||||
" \"anonymous\": False,\n",
|
||||
" \"inputs\": [\n",
|
||||
" {\"indexed\": True, \"name\": \"from\", \"type\": \"address\", \"column\": \"f\"},\n",
|
||||
" {\"indexed\": True, \"name\": \"to\", \"type\": \"address\", \"column\": \"t\"},\n",
|
||||
" {\"indexed\": False, \"name\": \"value\", \"type\": \"uint256\", \"column\": \"v\"}\n",
|
||||
" ]\n",
|
||||
" }\n",
|
||||
" }]\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"# write the config to a file\n",
|
||||
"with open(\"config.json\", \"w\") as f:\n",
|
||||
" f.write(json.dumps(config))\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# print the two env variables\n",
|
||||
"os.system(\"echo $PG_URL\")\n",
|
||||
"\n",
|
||||
"os.system(\"createdb -h localhost -p 5432 shovel\")\n",
|
||||
"\n",
|
||||
"os.system(\"echo shovel is now installed. starting:\")\n",
|
||||
"\n",
|
||||
"command = [\"./shovel\", \"-config\", \"config.json\"]\n",
|
||||
"proc = subprocess.Popen(command)\n",
|
||||
"\n",
|
||||
"os.system(\"echo shovel started.\")\n",
|
||||
"\n",
|
||||
"time.sleep(10)\n",
|
||||
"\n",
|
||||
"# after we've fetched some data -- kill the process\n",
|
||||
"proc.terminate()\n",
|
||||
"\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "2wIAHwqH2_mo"
|
||||
},
|
||||
"source": [
|
||||
"**Import Dependencies**"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"id": "9Byiv2Nc2MsK"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# check if notebook is in colab\n",
|
||||
"try:\n",
|
||||
" # install ezkl\n",
|
||||
" import google.colab\n",
|
||||
" import subprocess\n",
|
||||
" import sys\n",
|
||||
" subprocess.check_call([sys.executable, \"-m\", \"pip\", \"install\", \"ezkl\"])\n",
|
||||
" subprocess.check_call([sys.executable, \"-m\", \"pip\", \"install\", \"onnx\"])\n",
|
||||
"\n",
|
||||
"# rely on local installation of ezkl if the notebook is not in colab\n",
|
||||
"except:\n",
|
||||
" pass\n",
|
||||
"\n",
|
||||
"import ezkl\n",
|
||||
"import torch\n",
|
||||
"import datetime\n",
|
||||
"import pandas as pd\n",
|
||||
"import requests\n",
|
||||
"import json\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"import logging\n",
|
||||
"# # uncomment for more descriptive logging \n",
|
||||
"FORMAT = '%(levelname)s %(name)s %(asctime)-15s %(filename)s:%(lineno)d %(message)s'\n",
|
||||
"logging.basicConfig(format=FORMAT)\n",
|
||||
"logging.getLogger().setLevel(logging.DEBUG)\n",
|
||||
"\n",
|
||||
"print(\"ezkl version: \", ezkl.__version__)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "osjj-0Ta3E8O"
|
||||
},
|
||||
"source": [
|
||||
"**Create Computational Graph**"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"base_uri": "https://localhost:8080/"
|
||||
},
|
||||
"id": "x1vl9ZXF3EEW",
|
||||
"outputId": "bda21d02-fe5f-4fb2-8106-f51a8e2e67aa"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from torch import nn\n",
|
||||
"import torch\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"class Model(nn.Module):\n",
|
||||
" def __init__(self):\n",
|
||||
" super(Model, self).__init__()\n",
|
||||
"\n",
|
||||
" # x is a time series \n",
|
||||
" def forward(self, x):\n",
|
||||
" return [torch.mean(x)]\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"circuit = Model()\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"x = 0.1*torch.rand(1,*[1,5], requires_grad=True)\n",
|
||||
"\n",
|
||||
"# # print(torch.__version__)\n",
|
||||
"device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n",
|
||||
"\n",
|
||||
"print(device)\n",
|
||||
"\n",
|
||||
"circuit.to(device)\n",
|
||||
"\n",
|
||||
"# Flips the neural net into inference mode\n",
|
||||
"circuit.eval()\n",
|
||||
"\n",
|
||||
"# Export the model\n",
|
||||
"torch.onnx.export(circuit, # model being run\n",
|
||||
" x, # model input (or a tuple for multiple inputs)\n",
|
||||
" \"lol.onnx\", # where to save the model (can be a file or file-like object)\n",
|
||||
" export_params=True, # store the trained parameter weights inside the model file\n",
|
||||
" opset_version=11, # the ONNX version to export the model to\n",
|
||||
" do_constant_folding=True, # whether to execute constant folding for optimization\n",
|
||||
" input_names = ['input'], # the model's input names\n",
|
||||
" output_names = ['output'], # the model's output names\n",
|
||||
" dynamic_axes={'input' : {0 : 'batch_size'}, # variable length axes\n",
|
||||
" 'output' : {0 : 'batch_size'}})\n",
|
||||
"\n",
|
||||
"# export(circuit, input_shape=[1, 20])\n",
|
||||
"\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "E3qCeX-X5xqd"
|
||||
},
|
||||
"source": [
|
||||
"**Set Data Source and Get Data**"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"base_uri": "https://localhost:8080/"
|
||||
},
|
||||
"id": "6RAMplxk5xPk",
|
||||
"outputId": "bd2158fe-0c00-44fd-e632-6a3f70cdb7c9"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import getpass\n",
|
||||
"# make an input.json file from the df above\n",
|
||||
"input_filename = os.path.join('input.json')\n",
|
||||
"\n",
|
||||
"pg_input_file = dict(input_data = {\n",
|
||||
" \"host\": \"localhost\",\n",
|
||||
" # make sure you replace this with your own username\n",
|
||||
" \"user\": getpass.getuser(),\n",
|
||||
" \"dbname\": \"shovel\",\n",
|
||||
" \"password\": \"\",\n",
|
||||
" \"query\": \"SELECT v FROM usdc ORDER BY block_num DESC LIMIT 5\",\n",
|
||||
" \"port\": \"5432\",\n",
|
||||
"})\n",
|
||||
"\n",
|
||||
"json_formatted_str = json.dumps(pg_input_file, indent=2)\n",
|
||||
"print(json_formatted_str)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
" # Serialize data into file:\n",
|
||||
"json.dump(pg_input_file, open(input_filename, 'w' ))\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# this corresponds to 4 batches\n",
|
||||
"calibration_filename = os.path.join('calibration.json')\n",
|
||||
"\n",
|
||||
"pg_cal_file = dict(input_data = {\n",
|
||||
" \"host\": \"localhost\",\n",
|
||||
" # make sure you replace this with your own username\n",
|
||||
" \"user\": getpass.getuser(),\n",
|
||||
" \"dbname\": \"shovel\",\n",
|
||||
" \"password\": \"\",\n",
|
||||
" \"query\": \"SELECT v FROM usdc ORDER BY block_num DESC LIMIT 20\",\n",
|
||||
" \"port\": \"5432\",\n",
|
||||
"})\n",
|
||||
"\n",
|
||||
" # Serialize data into file:\n",
|
||||
"json.dump( pg_cal_file, open(calibration_filename, 'w' ))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "eLJ7oirQ_HQR"
|
||||
},
|
||||
"source": [
|
||||
"**EZKL Workflow**"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"id": "rNw0C9QL6W88"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import subprocess\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"onnx_filename = os.path.join('lol.onnx')\n",
|
||||
"compiled_filename = os.path.join('lol.compiled')\n",
|
||||
"settings_filename = os.path.join('settings.json')\n",
|
||||
"\n",
|
||||
"# Generate settings using ezkl\n",
|
||||
"res = ezkl.gen_settings(onnx_filename, settings_filename)\n",
|
||||
"\n",
|
||||
"assert res == True\n",
|
||||
"\n",
|
||||
"res = await ezkl.calibrate_settings(input_filename, onnx_filename, settings_filename, \"resources\")\n",
|
||||
"\n",
|
||||
"assert res == True"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"\n",
|
||||
"ezkl.compile_circuit(onnx_filename, compiled_filename, settings_filename)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"base_uri": "https://localhost:8080/"
|
||||
},
|
||||
"id": "4MmE9SX66_Il",
|
||||
"outputId": "16403639-66a4-4280-ac7f-6966b75de5a3"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# generate settings\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# show the settings.json\n",
|
||||
"with open(\"settings.json\") as f:\n",
|
||||
" data = json.load(f)\n",
|
||||
" json_formatted_str = json.dumps(data, indent=2)\n",
|
||||
"\n",
|
||||
" print(json_formatted_str)\n",
|
||||
"\n",
|
||||
"assert os.path.exists(\"settings.json\")\n",
|
||||
"assert os.path.exists(\"input.json\")\n",
|
||||
"assert os.path.exists(\"lol.onnx\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"id": "fULvvnK7_CMb"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"pk_path = os.path.join('test.pk')\n",
|
||||
"vk_path = os.path.join('test.vk')\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# setup the proof\n",
|
||||
"res = ezkl.setup(\n",
|
||||
" compiled_filename,\n",
|
||||
" vk_path,\n",
|
||||
" pk_path\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
"assert res == True\n",
|
||||
"assert os.path.isfile(vk_path)\n",
|
||||
"assert os.path.isfile(pk_path)\n",
|
||||
"assert os.path.isfile(settings_filename)\n",
|
||||
"\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"witness_path = \"witness.json\"\n",
|
||||
"\n",
|
||||
"# generate the witness\n",
|
||||
"res = await ezkl.gen_witness(\n",
|
||||
" input_filename,\n",
|
||||
" compiled_filename,\n",
|
||||
" witness_path\n",
|
||||
" )\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"base_uri": "https://localhost:8080/"
|
||||
},
|
||||
"id": "Oog3j6Kd-Wed",
|
||||
"outputId": "5839d0c1-5b43-476e-c2f8-6707de562260"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# prove the zk circuit\n",
|
||||
"# GENERATE A PROOF\n",
|
||||
"proof_path = os.path.join('test.pf')\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"proof = ezkl.prove(\n",
|
||||
" witness_path,\n",
|
||||
" compiled_filename,\n",
|
||||
" pk_path,\n",
|
||||
" proof_path,\n",
|
||||
" \"single\"\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"print(\"proved\")\n",
|
||||
"\n",
|
||||
"assert os.path.isfile(proof_path)\n",
|
||||
"\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"provenance": []
|
||||
},
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.12.2"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 0
|
||||
}
|
||||
@@ -323,7 +323,7 @@
|
||||
"res = ezkl.gen_settings(model_path, settings_path, py_run_args=run_args)\n",
|
||||
"assert res == True\n",
|
||||
"\n",
|
||||
"res = ezkl.calibrate_settings(cal_path, model_path, settings_path, \"resources\", scales=[2,7])\n",
|
||||
"res = await ezkl.calibrate_settings(cal_path, model_path, settings_path, \"resources\", scales=[2,7])\n",
|
||||
"assert res == True"
|
||||
]
|
||||
},
|
||||
@@ -362,7 +362,7 @@
|
||||
"# now generate the witness file\n",
|
||||
"witness_path = \"witness.json\"\n",
|
||||
"\n",
|
||||
"res = ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"res = await ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"assert os.path.isfile(witness_path)"
|
||||
]
|
||||
},
|
||||
@@ -504,8 +504,8 @@
|
||||
"\n",
|
||||
"res = await ezkl.deploy_evm(\n",
|
||||
" address_path,\n",
|
||||
" 'http://127.0.0.1:3030',\n",
|
||||
" sol_code_path,\n",
|
||||
" 'http://127.0.0.1:3030'\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"assert res == True\n",
|
||||
@@ -527,8 +527,8 @@
|
||||
"\n",
|
||||
"res = await ezkl.verify_evm(\n",
|
||||
" addr,\n",
|
||||
" \"http://127.0.0.1:3030\",\n",
|
||||
" proof_path\n",
|
||||
" proof_path,\n",
|
||||
" \"http://127.0.0.1:3030\"\n",
|
||||
")\n",
|
||||
"assert res == True"
|
||||
]
|
||||
|
||||
@@ -289,7 +289,7 @@
|
||||
"json.dump(data, open(cal_path, 'w'))\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"ezkl.calibrate_settings(cal_path, model_path, settings_path, \"resources\", scales=[0,6])"
|
||||
"await ezkl.calibrate_settings(cal_path, model_path, settings_path, \"resources\", scales=[0,6])"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -321,7 +321,7 @@
|
||||
"# now generate the witness file \n",
|
||||
"witness_path = \"gan_witness.json\"\n",
|
||||
"\n",
|
||||
"res = ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"res = await ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"assert os.path.isfile(witness_path)"
|
||||
]
|
||||
},
|
||||
@@ -425,4 +425,4 @@
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
}
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -215,7 +215,7 @@
|
||||
"json.dump(data, open(cal_path, 'w'))\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"ezkl.calibrate_settings(cal_path, model_path, settings_path, \"resources\")"
|
||||
"await ezkl.calibrate_settings(cal_path, model_path, settings_path, \"resources\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -247,7 +247,7 @@
|
||||
"# now generate the witness file\n",
|
||||
"witness_path = \"ae_witness.json\"\n",
|
||||
"\n",
|
||||
"res = ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"res = await ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"assert os.path.isfile(witness_path)"
|
||||
]
|
||||
},
|
||||
@@ -451,7 +451,7 @@
|
||||
"res = ezkl.gen_settings(model_path, settings_path)\n",
|
||||
"assert res == True\n",
|
||||
"\n",
|
||||
"res = ezkl.calibrate_settings(data_path, model_path, settings_path, \"resources\")\n",
|
||||
"res = await ezkl.calibrate_settings(data_path, model_path, settings_path, \"resources\")\n",
|
||||
"assert res == True\n",
|
||||
"print(\"verified\")"
|
||||
]
|
||||
@@ -485,7 +485,7 @@
|
||||
"# now generate the witness file \n",
|
||||
"witness_path = \"vae_witness.json\"\n",
|
||||
"\n",
|
||||
"res = ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"res = await ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"assert os.path.isfile(witness_path)"
|
||||
]
|
||||
},
|
||||
@@ -590,4 +590,4 @@
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
}
|
||||
|
||||
@@ -845,7 +845,7 @@
|
||||
"res = ezkl.gen_settings(model_path, settings_path)\n",
|
||||
"assert res == True\n",
|
||||
"\n",
|
||||
"res = ezkl.calibrate_settings(data_path, model_path, settings_path, \"resources\", max_logrows = 20, scales = [3])\n",
|
||||
"res = await ezkl.calibrate_settings(data_path, model_path, settings_path, \"resources\", max_logrows = 20, scales = [3])\n",
|
||||
"assert res == True"
|
||||
]
|
||||
},
|
||||
@@ -881,7 +881,7 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"res = ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"res = await ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"assert os.path.isfile(witness_path)"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -1,766 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"\n",
|
||||
"This is a zk version of the tutorial found [here](https://github.com/bentrevett/pytorch-sentiment-analysis/blob/main/1%20-%20Neural%20Bag%20of%20Words.ipynb). The original tutorial is part of the PyTorch Sentiment Analysis series by Ben Trevett.\n",
|
||||
"\n",
|
||||
"1 - NBoW\n",
|
||||
"\n",
|
||||
"In this series we'll be building a machine learning model to perform sentiment analysis -- a subset of text classification where the task is to detect if a given sentence is positive or negative -- using PyTorch and torchtext. The dataset used will be movie reviews from the IMDb dataset, which we'll obtain using the datasets library.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"\n",
|
||||
"Preparing Data\n",
|
||||
"\n",
|
||||
"Before we can implement our NBoW model, we first have to perform quite a few steps to get our data ready to use. NLP usually requires quite a lot of data wrangling beforehand, though libraries such as datasets and torchtext handle most of this for us.\n",
|
||||
"\n",
|
||||
"The steps to take are:\n",
|
||||
"\n",
|
||||
" 1. importing modules\n",
|
||||
" 2. loading data\n",
|
||||
" 3. tokenizing data\n",
|
||||
" 4. creating data splits\n",
|
||||
" 5. creating a vocabulary\n",
|
||||
" 6. numericalizing data\n",
|
||||
" 7. creating the data loaders\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"! pip install torchtex"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import collections\n",
|
||||
"\n",
|
||||
"import datasets\n",
|
||||
"import matplotlib.pyplot as plt\n",
|
||||
"import numpy as np\n",
|
||||
"import torch\n",
|
||||
"import torch.nn as nn\n",
|
||||
"import torch.optim as optim\n",
|
||||
"import torchtext\n",
|
||||
"import tqdm\n",
|
||||
"\n",
|
||||
"# It is usually good practice to run your experiments multiple times with different random seeds -- both to measure the variance of your model and also to avoid having results only calculated with either \"good\" or \"bad\" seeds, i.e. being very lucky or unlucky with the randomness in the training process.\n",
|
||||
"\n",
|
||||
"seed = 1234\n",
|
||||
"\n",
|
||||
"np.random.seed(seed)\n",
|
||||
"torch.manual_seed(seed)\n",
|
||||
"torch.cuda.manual_seed(seed)\n",
|
||||
"torch.backends.cudnn.deterministic = True\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"train_data, test_data = datasets.load_dataset(\"imdb\", split=[\"train\", \"test\"])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We can check the features attribute of a split to get more information about the features. We can see that text is a Value of dtype=string -- in other words, it's a string -- and that label is a ClassLabel. A ClassLabel means the feature is an integer representation of which class the example belongs to. num_classes=2 means that our labels are one of two values, 0 or 1, and names=['neg', 'pos'] gives us the human-readable versions of those values. Thus, a label of 0 means the example is a negative review and a label of 1 means the example is a positive review."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"train_data.features\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"train_data[0]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"One of the first things we need to do to our data is tokenize it. Machine learning models aren't designed to handle strings, they're design to handle numbers. So what we need to do is break down our string into individual tokens, and then convert these tokens to numbers. We'll get to the conversion later, but first we'll look at tokenization.\n",
|
||||
"\n",
|
||||
"Tokenization involves using a tokenizer to process the strings in our dataset. A tokenizer is a function that goes from a string to a list of strings. There are many types of tokenizers available, but we're going to use a relatively simple one provided by torchtext called the basic_english tokenizer. We load our tokenizer as such:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"tokenizer = torchtext.data.utils.get_tokenizer(\"basic_english\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def tokenize_example(example, tokenizer, max_length):\n",
|
||||
" tokens = tokenizer(example[\"text\"])[:max_length]\n",
|
||||
" return {\"tokens\": tokens}\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"max_length = 256\n",
|
||||
"\n",
|
||||
"train_data = train_data.map(\n",
|
||||
" tokenize_example, fn_kwargs={\"tokenizer\": tokenizer, \"max_length\": max_length}\n",
|
||||
")\n",
|
||||
"test_data = test_data.map(\n",
|
||||
" tokenize_example, fn_kwargs={\"tokenizer\": tokenizer, \"max_length\": max_length}\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# create validation data \n",
|
||||
"# Why have both a validation set and a test set? Your test set respresents the real world data that you'd see if you actually deployed this model. You won't be able to see what data your model will be fed once deployed, and your test set is supposed to reflect that. Every time we tune our model hyperparameters or training set-up to make it do a bit better on the test set, we are leak information from the test set into the training process. If we do this too often then we begin to overfit on the test set. Hence, we need some data which can act as a \"proxy\" test set which we can look at more frequently in order to evaluate how well our model actually does on unseen data -- this is the validation set.\n",
|
||||
"\n",
|
||||
"test_size = 0.25\n",
|
||||
"\n",
|
||||
"train_valid_data = train_data.train_test_split(test_size=test_size)\n",
|
||||
"train_data = train_valid_data[\"train\"]\n",
|
||||
"valid_data = train_valid_data[\"test\"]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Next, we have to build a vocabulary. This is look-up table where every unique token in your dataset has a corresponding index (an integer).\n",
|
||||
"\n",
|
||||
"We do this as machine learning models cannot operate on strings, only numerical vaslues. Each index is used to construct a one-hot vector for each token. A one-hot vector is a vector where all the elements are 0, except one, which is 1, and the dimensionality is the total number of unique tokens in your vocabulary, commonly denoted by V."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"min_freq = 5\n",
|
||||
"special_tokens = [\"<unk>\", \"<pad>\"]\n",
|
||||
"\n",
|
||||
"vocab = torchtext.vocab.build_vocab_from_iterator(\n",
|
||||
" train_data[\"tokens\"],\n",
|
||||
" min_freq=min_freq,\n",
|
||||
" specials=special_tokens,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# We store the indices of the unknown and padding tokens (zero and one, respectively) in variables, as we'll use these further on in this notebook.\n",
|
||||
"\n",
|
||||
"unk_index = vocab[\"<unk>\"]\n",
|
||||
"pad_index = vocab[\"<pad>\"]\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"vocab.set_default_index(unk_index)\n",
|
||||
"\n",
|
||||
"# To look-up a list of tokens, we can use the vocabulary's lookup_indices method.\n",
|
||||
"vocab.lookup_indices([\"hello\", \"world\", \"some_token\", \"<pad>\"])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Now we have our vocabulary, we can numericalize our data. This involves converting the tokens within our dataset into indices. Similar to how we tokenized our data using the Dataset.map method, we'll define a function that takes an example and our vocabulary, gets the index for each token in each example and then creates an ids field which containes the numericalized tokens."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def numericalize_example(example, vocab):\n",
|
||||
" ids = vocab.lookup_indices(example[\"tokens\"])\n",
|
||||
" return {\"ids\": ids}\n",
|
||||
"\n",
|
||||
"train_data = train_data.map(numericalize_example, fn_kwargs={\"vocab\": vocab})\n",
|
||||
"valid_data = valid_data.map(numericalize_example, fn_kwargs={\"vocab\": vocab})\n",
|
||||
"test_data = test_data.map(numericalize_example, fn_kwargs={\"vocab\": vocab})\n",
|
||||
"\n",
|
||||
"train_data = train_data.with_format(type=\"torch\", columns=[\"ids\", \"label\"])\n",
|
||||
"valid_data = valid_data.with_format(type=\"torch\", columns=[\"ids\", \"label\"])\n",
|
||||
"test_data = test_data.with_format(type=\"torch\", columns=[\"ids\", \"label\"])\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The final step of preparing the data is creating the data loaders. We can iterate over a data loader to retrieve batches of examples. This is also where we will perform any padding that is necessary.\n",
|
||||
"\n",
|
||||
"We first need to define a function to collate a batch, consisting of a list of examples, into what we want our data loader to output.\n",
|
||||
"\n",
|
||||
"Here, our desired output from the data loader is a dictionary with keys of \"ids\" and \"label\".\n",
|
||||
"\n",
|
||||
"The value of batch[\"ids\"] should be a tensor of shape [batch size, length], where length is the length of the longest sentence (in terms of tokens) within the batch, and all sentences shorter than this should be padded to that length.\n",
|
||||
"\n",
|
||||
"The value of batch[\"label\"] should be a tensor of shape [batch size] consisting of the label for each sentence in the batch.\n",
|
||||
"\n",
|
||||
"We define a function, get_collate_fn, which is passed the pad token index and returns the actual collate function. Within the actual collate function, collate_fn, we get a list of \"ids\" tensors for each example in the batch, and then use the pad_sequence function, which converts the list of tensors into the desired [batch size, length] shaped tensor and performs padding using the specified pad_index. By default, pad_sequence will return a [length, batch size] shaped tensor, but by setting batch_first=True, these two dimensions are switched. We get a list of \"label\" tensors and convert the list of tensors into a single [batch size] shaped tensor."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def get_collate_fn(pad_index):\n",
|
||||
" def collate_fn(batch):\n",
|
||||
" batch_ids = [i[\"ids\"] for i in batch]\n",
|
||||
" batch_ids = nn.utils.rnn.pad_sequence(\n",
|
||||
" batch_ids, padding_value=pad_index, batch_first=True\n",
|
||||
" )\n",
|
||||
" batch_label = [i[\"label\"] for i in batch]\n",
|
||||
" batch_label = torch.stack(batch_label)\n",
|
||||
" batch = {\"ids\": batch_ids, \"label\": batch_label}\n",
|
||||
" return batch\n",
|
||||
"\n",
|
||||
" return collate_fn\n",
|
||||
"\n",
|
||||
"def get_data_loader(dataset, batch_size, pad_index, shuffle=False):\n",
|
||||
" collate_fn = get_collate_fn(pad_index)\n",
|
||||
" data_loader = torch.utils.data.DataLoader(\n",
|
||||
" dataset=dataset,\n",
|
||||
" batch_size=batch_size,\n",
|
||||
" collate_fn=collate_fn,\n",
|
||||
" shuffle=shuffle,\n",
|
||||
" )\n",
|
||||
" return data_loader\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"batch_size = 512\n",
|
||||
"\n",
|
||||
"train_data_loader = get_data_loader(train_data, batch_size, pad_index, shuffle=True)\n",
|
||||
"valid_data_loader = get_data_loader(valid_data, batch_size, pad_index)\n",
|
||||
"test_data_loader = get_data_loader(test_data, batch_size, pad_index)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"\n",
|
||||
"\n",
|
||||
"class NBoW(nn.Module):\n",
|
||||
" def __init__(self, vocab_size, embedding_dim, output_dim, pad_index):\n",
|
||||
" super().__init__()\n",
|
||||
" self.embedding = nn.Embedding(vocab_size, embedding_dim, padding_idx=pad_index)\n",
|
||||
" self.fc = nn.Linear(embedding_dim, output_dim)\n",
|
||||
"\n",
|
||||
" def forward(self, ids):\n",
|
||||
" # ids = [batch size, seq len]\n",
|
||||
" embedded = self.embedding(ids)\n",
|
||||
" # embedded = [batch size, seq len, embedding dim]\n",
|
||||
" pooled = embedded.mean(dim=1)\n",
|
||||
" # pooled = [batch size, embedding dim]\n",
|
||||
" prediction = self.fc(pooled)\n",
|
||||
" # prediction = [batch size, output dim]\n",
|
||||
" return prediction\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"vocab_size = len(vocab)\n",
|
||||
"embedding_dim = 300\n",
|
||||
"output_dim = len(train_data.unique(\"label\"))\n",
|
||||
"\n",
|
||||
"model = NBoW(vocab_size, embedding_dim, output_dim, pad_index)\n",
|
||||
"\n",
|
||||
"def count_parameters(model):\n",
|
||||
" return sum(p.numel() for p in model.parameters() if p.requires_grad)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"print(f\"The model has {count_parameters(model):,} trainable parameters\")\n",
|
||||
"\n",
|
||||
"vectors = torchtext.vocab.GloVe()\n",
|
||||
"\n",
|
||||
"pretrained_embedding = vectors.get_vecs_by_tokens(vocab.get_itos())\n",
|
||||
"\n",
|
||||
"optimizer = optim.Adam(model.parameters())\n",
|
||||
"\n",
|
||||
"criterion = nn.CrossEntropyLoss()\n",
|
||||
"\n",
|
||||
"device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n",
|
||||
"\n",
|
||||
"model = model.to(device)\n",
|
||||
"criterion = criterion.to(device)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def train(data_loader, model, criterion, optimizer, device):\n",
|
||||
" model.train()\n",
|
||||
" epoch_losses = []\n",
|
||||
" epoch_accs = []\n",
|
||||
" for batch in tqdm.tqdm(data_loader, desc=\"training...\"):\n",
|
||||
" ids = batch[\"ids\"].to(device)\n",
|
||||
" label = batch[\"label\"].to(device)\n",
|
||||
" prediction = model(ids)\n",
|
||||
" loss = criterion(prediction, label)\n",
|
||||
" accuracy = get_accuracy(prediction, label)\n",
|
||||
" optimizer.zero_grad()\n",
|
||||
" loss.backward()\n",
|
||||
" optimizer.step()\n",
|
||||
" epoch_losses.append(loss.item())\n",
|
||||
" epoch_accs.append(accuracy.item())\n",
|
||||
" return np.mean(epoch_losses), np.mean(epoch_accs)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def evaluate(data_loader, model, criterion, device):\n",
|
||||
" model.eval()\n",
|
||||
" epoch_losses = []\n",
|
||||
" epoch_accs = []\n",
|
||||
" with torch.no_grad():\n",
|
||||
" for batch in tqdm.tqdm(data_loader, desc=\"evaluating...\"):\n",
|
||||
" ids = batch[\"ids\"].to(device)\n",
|
||||
" label = batch[\"label\"].to(device)\n",
|
||||
" prediction = model(ids)\n",
|
||||
" loss = criterion(prediction, label)\n",
|
||||
" accuracy = get_accuracy(prediction, label)\n",
|
||||
" epoch_losses.append(loss.item())\n",
|
||||
" epoch_accs.append(accuracy.item())\n",
|
||||
" return np.mean(epoch_losses), np.mean(epoch_accs)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def get_accuracy(prediction, label):\n",
|
||||
" batch_size, _ = prediction.shape\n",
|
||||
" predicted_classes = prediction.argmax(dim=-1)\n",
|
||||
" correct_predictions = predicted_classes.eq(label).sum()\n",
|
||||
" accuracy = correct_predictions / batch_size\n",
|
||||
" return accuracy"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"n_epochs = 10\n",
|
||||
"best_valid_loss = float(\"inf\")\n",
|
||||
"\n",
|
||||
"metrics = collections.defaultdict(list)\n",
|
||||
"\n",
|
||||
"for epoch in range(n_epochs):\n",
|
||||
" train_loss, train_acc = train(\n",
|
||||
" train_data_loader, model, criterion, optimizer, device\n",
|
||||
" )\n",
|
||||
" valid_loss, valid_acc = evaluate(valid_data_loader, model, criterion, device)\n",
|
||||
" metrics[\"train_losses\"].append(train_loss)\n",
|
||||
" metrics[\"train_accs\"].append(train_acc)\n",
|
||||
" metrics[\"valid_losses\"].append(valid_loss)\n",
|
||||
" metrics[\"valid_accs\"].append(valid_acc)\n",
|
||||
" if valid_loss < best_valid_loss:\n",
|
||||
" best_valid_loss = valid_loss\n",
|
||||
" torch.save(model.state_dict(), \"nbow.pt\")\n",
|
||||
" print(f\"epoch: {epoch}\")\n",
|
||||
" print(f\"train_loss: {train_loss:.3f}, train_acc: {train_acc:.3f}\")\n",
|
||||
" print(f\"valid_loss: {valid_loss:.3f}, valid_acc: {valid_acc:.3f}\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"fig = plt.figure(figsize=(10, 6))\n",
|
||||
"ax = fig.add_subplot(1, 1, 1)\n",
|
||||
"ax.plot(metrics[\"train_losses\"], label=\"train loss\")\n",
|
||||
"ax.plot(metrics[\"valid_losses\"], label=\"valid loss\")\n",
|
||||
"ax.set_xlabel(\"epoch\")\n",
|
||||
"ax.set_ylabel(\"loss\")\n",
|
||||
"ax.set_xticks(range(n_epochs))\n",
|
||||
"ax.legend()\n",
|
||||
"ax.grid()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"fig = plt.figure(figsize=(10, 6))\n",
|
||||
"ax = fig.add_subplot(1, 1, 1)\n",
|
||||
"ax.plot(metrics[\"train_accs\"], label=\"train accuracy\")\n",
|
||||
"ax.plot(metrics[\"valid_accs\"], label=\"valid accuracy\")\n",
|
||||
"ax.set_xlabel(\"epoch\")\n",
|
||||
"ax.set_ylabel(\"loss\")\n",
|
||||
"ax.set_xticks(range(n_epochs))\n",
|
||||
"ax.legend()\n",
|
||||
"ax.grid()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"model.load_state_dict(torch.load(\"nbow.pt\"))\n",
|
||||
"\n",
|
||||
"test_loss, test_acc = evaluate(test_data_loader, model, criterion, device)\n",
|
||||
"\n",
|
||||
"print(f\"test_loss: {test_loss:.3f}, test_acc: {test_acc:.3f}\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def predict_sentiment(text, model, tokenizer, vocab, device):\n",
|
||||
" tokens = tokenizer(text)\n",
|
||||
" ids = vocab.lookup_indices(tokens)\n",
|
||||
" tensor = torch.LongTensor(ids).unsqueeze(dim=0).to(device)\n",
|
||||
" prediction = model(tensor).squeeze(dim=0)\n",
|
||||
" probability = torch.softmax(prediction, dim=-1)\n",
|
||||
" predicted_class = prediction.argmax(dim=-1).item()\n",
|
||||
" predicted_probability = probability[predicted_class].item()\n",
|
||||
" return predicted_class, predicted_probability"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"text = \"This film is terrible!\"\n",
|
||||
"\n",
|
||||
"predict_sentiment(text, model, tokenizer, vocab, device)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"text = \"This film is great!\"\n",
|
||||
"\n",
|
||||
"predict_sentiment(text, model, tokenizer, vocab, device)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"text = \"This film is not terrible, it's great!\"\n",
|
||||
"\n",
|
||||
"predict_sentiment(text, model, tokenizer, vocab, device)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"text = \"This film is not great, it's terrible!\"\n",
|
||||
"\n",
|
||||
"predict_sentiment(text, model, tokenizer, vocab, device)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def text_to_tensor(text, tokenizer, vocab, device):\n",
|
||||
" tokens = tokenizer(text)\n",
|
||||
" ids = vocab.lookup_indices(tokens)\n",
|
||||
" tensor = torch.LongTensor(ids).unsqueeze(dim=0).to(device)\n",
|
||||
" return tensor\n",
|
||||
"\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Now we do onnx stuff to get the data ready for the zk-circuit."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"\n",
|
||||
"import json\n",
|
||||
"\n",
|
||||
"text = \"This film is terrible!\"\n",
|
||||
"x = text_to_tensor(text, tokenizer, vocab, device)\n",
|
||||
"\n",
|
||||
"# Flips the neural net into inference mode\n",
|
||||
"model.eval()\n",
|
||||
"model.to('cpu')\n",
|
||||
"\n",
|
||||
"model_path = \"network.onnx\"\n",
|
||||
"data_path = \"input.json\"\n",
|
||||
"\n",
|
||||
" # Export the model\n",
|
||||
"torch.onnx.export(model, # model being run\n",
|
||||
" x, # model input (or a tuple for multiple inputs)\n",
|
||||
" model_path, # where to save the model (can be a file or file-like object)\n",
|
||||
" export_params=True, # store the trained parameter weights inside the model file\n",
|
||||
" opset_version=10, # the ONNX version to export the model to\n",
|
||||
" do_constant_folding=True, # whether to execute constant folding for optimization\n",
|
||||
" input_names = ['input'], # the model's input names\n",
|
||||
" output_names = ['output'], # the model's output names\n",
|
||||
" dynamic_axes={'input' : {0 : 'batch_size'}, # variable length axes\n",
|
||||
" 'output' : {0 : 'batch_size'}})\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"data_array = ((x).detach().numpy()).reshape([-1]).tolist()\n",
|
||||
"\n",
|
||||
"data_json = dict(input_data = [data_array])\n",
|
||||
"\n",
|
||||
"print(data_json)\n",
|
||||
"\n",
|
||||
" # Serialize data into file:\n",
|
||||
"json.dump(data_json, open(data_path, 'w'))\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import ezkl\n",
|
||||
"\n",
|
||||
"run_args = ezkl.PyRunArgs()\n",
|
||||
"run_args.logrows = 23\n",
|
||||
"run_args.scale_rebase_multiplier = 10\n",
|
||||
"# inputs should be auditable by all\n",
|
||||
"run_args.input_visibility = \"public\"\n",
|
||||
"# same with outputs\n",
|
||||
"run_args.output_visibility = \"public\"\n",
|
||||
"# for simplicity, we'll just use the fixed model visibility: i.e it is public and can't be changed by the prover\n",
|
||||
"run_args.param_visibility = \"fixed\"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# TODO: Dictionary outputs\n",
|
||||
"res = ezkl.gen_settings(py_run_args=run_args)\n",
|
||||
"assert res == True\n",
|
||||
"\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"res = ezkl.compile_circuit()\n",
|
||||
"assert res == True"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# srs path\n",
|
||||
"res = await ezkl.get_srs()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# now generate the witness file\n",
|
||||
"res = ezkl.gen_witness()\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"res = ezkl.mock()\n",
|
||||
"assert res == True"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"\n",
|
||||
"# HERE WE SETUP THE CIRCUIT PARAMS\n",
|
||||
"# WE GOT KEYS\n",
|
||||
"# WE GOT CIRCUIT PARAMETERS\n",
|
||||
"# EVERYTHING ANYONE HAS EVER NEEDED FOR ZK\n",
|
||||
"\n",
|
||||
"res = ezkl.setup()\n",
|
||||
"\n",
|
||||
"assert res == True"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# GENERATE A PROOF\n",
|
||||
"res = ezkl.prove(proof_path=\"proof.json\")\n",
|
||||
"\n",
|
||||
"print(res)\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# VERIFY IT\n",
|
||||
"res = ezkl.verify()\n",
|
||||
"\n",
|
||||
"assert res == True\n",
|
||||
"print(\"verified\")\n",
|
||||
"\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We can also verify it on chain by creating an onchain verifier"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# check if notebook is in colab\n",
|
||||
"try:\n",
|
||||
" import google.colab\n",
|
||||
" import subprocess\n",
|
||||
" import sys\n",
|
||||
" subprocess.check_call([sys.executable, \"-m\", \"pip\", \"install\", \"solc-select\"])\n",
|
||||
" !solc-select install 0.8.20\n",
|
||||
" !solc-select use 0.8.20\n",
|
||||
" !solc --version\n",
|
||||
" import os\n",
|
||||
"\n",
|
||||
"# rely on local installation if the notebook is not in colab\n",
|
||||
"except:\n",
|
||||
" import os\n",
|
||||
" pass"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"res = await ezkl.create_evm_verifier()\n",
|
||||
"assert res == True\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"You should see a `Verifier.sol`. Right-click and save it locally.\n",
|
||||
"\n",
|
||||
"Now go to [https://remix.ethereum.org](https://remix.ethereum.org).\n",
|
||||
"\n",
|
||||
"Create a new file within remix and copy the verifier code over.\n",
|
||||
"\n",
|
||||
"Finally, compile the code and deploy. For the demo you can deploy to the test environment within remix.\n",
|
||||
"\n",
|
||||
"If everything works, you would have deployed your verifer onchain! Copy the values in the cell above to the respective fields to test if the verifier is working."
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": ".env",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.12.7"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
@@ -232,7 +232,7 @@
|
||||
"run_args.param_visibility = \"fixed\"\n",
|
||||
"run_args.output_visibility = \"public\"\n",
|
||||
"run_args.input_scale = 2\n",
|
||||
"run_args.logrows = 15\n",
|
||||
"run_args.logrows = 8\n",
|
||||
"\n",
|
||||
"ezkl.get_srs(logrows=run_args.logrows, commitment=ezkl.PyCommitments.KZG)"
|
||||
]
|
||||
@@ -282,7 +282,7 @@
|
||||
"\n",
|
||||
" # generate settings for the current model\n",
|
||||
" res = ezkl.gen_settings(model_path, settings_path, py_run_args=run_args)\n",
|
||||
" res = ezkl.calibrate_settings(data_path, model_path, settings_path, \"resources\", scales=[run_args.input_scale], max_logrows=run_args.logrows)\n",
|
||||
" res = await ezkl.calibrate_settings(data_path, model_path, settings_path, \"resources\", scales=[run_args.input_scale], max_logrows=run_args.logrows)\n",
|
||||
" assert res == True\n",
|
||||
"\n",
|
||||
" # load settings and print them to the console\n",
|
||||
@@ -303,7 +303,7 @@
|
||||
" assert os.path.isfile(vk_path)\n",
|
||||
" assert os.path.isfile(pk_path)\n",
|
||||
"\n",
|
||||
" res = ezkl.gen_witness(data_path, compiled_model_path, witness_path, vk_path)\n",
|
||||
" res = await ezkl.gen_witness(data_path, compiled_model_path, witness_path, vk_path)\n",
|
||||
" run_args.input_scale = settings[\"model_output_scales\"][0]\n",
|
||||
"\n",
|
||||
"for i in range(2):\n",
|
||||
@@ -404,7 +404,7 @@
|
||||
"run_args.output_visibility = \"polycommit\"\n",
|
||||
"run_args.variables = [(\"batch_size\", 1)]\n",
|
||||
"run_args.input_scale = 2\n",
|
||||
"run_args.logrows = 15\n"
|
||||
"run_args.logrows = 8\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -466,10 +466,10 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.12.5"
|
||||
"version": "3.12.2"
|
||||
},
|
||||
"orig_nbformat": 4
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
}
|
||||
|
||||
@@ -152,11 +152,9 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"run_args = ezkl.PyRunArgs()\n",
|
||||
"# logrows\n",
|
||||
"run_args.logrows = 20\n",
|
||||
"\n",
|
||||
"res = ezkl.gen_settings(model_path, settings_path, py_run_args=run_args)\n",
|
||||
"!RUST_LOG=trace\n",
|
||||
"# TODO: Dictionary outputs\n",
|
||||
"res = ezkl.gen_settings(model_path, settings_path)\n",
|
||||
"assert res == True\n"
|
||||
]
|
||||
},
|
||||
@@ -176,7 +174,7 @@
|
||||
"json.dump(data, open(cal_path, 'w'))\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"ezkl.calibrate_settings(cal_path, model_path, settings_path, \"resources\")"
|
||||
"await ezkl.calibrate_settings(cal_path, model_path, settings_path, \"resources\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -210,7 +208,7 @@
|
||||
"source": [
|
||||
"# now generate the witness file \n",
|
||||
"\n",
|
||||
"res = ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"res = await ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"assert os.path.isfile(witness_path)"
|
||||
]
|
||||
},
|
||||
@@ -304,9 +302,9 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.13"
|
||||
"version": "3.12.2"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,336 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Reusable Verifiers \n",
|
||||
"\n",
|
||||
"TODO: Update the reusable verifier solidity contract name.. Make it less generic to H2 and more bespoke to us.\n",
|
||||
"\n",
|
||||
"This notebook demonstrates how to create and reuse the same set of separated verifiers for different models. Specifically, we will use the same verifier for the following four models:\n",
|
||||
"\n",
|
||||
"- `1l_mlp sigmoid`\n",
|
||||
"- `1l_mlp relu`\n",
|
||||
"- `1l_conv sigmoid`\n",
|
||||
"- `1l_conv relu`\n",
|
||||
"\n",
|
||||
"When deploying EZKL verifiers on the blockchain, each associated model typically requires its own unique verifier, leading to increased on-chain state usage. \n",
|
||||
"However, with the reusable verifier, we can deploy a single verifier that can be used to verify proofs for any valid H2 circuit. This notebook shows how to do so. \n",
|
||||
"\n",
|
||||
"By reusing the same verifier across multiple models, we significantly reduce the amount of state bloat on the blockchain. Instead of deploying a unique verifier for each model, we register a unique and much smaller verifying key artifact (VKA) on the reusable verifier contract for each model while sharing a common separated verifier. The VKA contains the VK for the model as well circuit specific metadata that was otherwise hardcoded into the stack of the original non-reusable verifier. The VKA is passed as a parameter to the verifyProof method. This VKA calldata needs to be d with the reusable verifier before it can start verifying proofs by calling the registerVKA method. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import torch\n",
|
||||
"import torch.nn as nn\n",
|
||||
"import torch.onnx\n",
|
||||
"\n",
|
||||
"# Define the models\n",
|
||||
"class MLP_Sigmoid(nn.Module):\n",
|
||||
" def __init__(self):\n",
|
||||
" super(MLP_Sigmoid, self).__init__()\n",
|
||||
" self.fc = nn.Linear(3, 3)\n",
|
||||
" self.sigmoid = nn.Sigmoid()\n",
|
||||
"\n",
|
||||
" def forward(self, x):\n",
|
||||
" x = self.fc(x)\n",
|
||||
" x = self.sigmoid(x)\n",
|
||||
" return x\n",
|
||||
"\n",
|
||||
"class MLP_Relu(nn.Module):\n",
|
||||
" def __init__(self):\n",
|
||||
" super(MLP_Relu, self).__init__()\n",
|
||||
" self.fc = nn.Linear(3, 3)\n",
|
||||
" self.relu = nn.ReLU()\n",
|
||||
"\n",
|
||||
" def forward(self, x):\n",
|
||||
" x = self.fc(x)\n",
|
||||
" x = self.relu(x)\n",
|
||||
" return x\n",
|
||||
"\n",
|
||||
"class Conv_Sigmoid(nn.Module):\n",
|
||||
" def __init__(self):\n",
|
||||
" super(Conv_Sigmoid, self).__init__()\n",
|
||||
" self.conv = nn.Conv1d(1, 1, kernel_size=3, stride=1)\n",
|
||||
" self.sigmoid = nn.Sigmoid()\n",
|
||||
"\n",
|
||||
" def forward(self, x):\n",
|
||||
" x = self.conv(x)\n",
|
||||
" x = self.sigmoid(x)\n",
|
||||
" return x\n",
|
||||
"\n",
|
||||
"class Conv_Relu(nn.Module):\n",
|
||||
" def __init__(self):\n",
|
||||
" super(Conv_Relu, self).__init__()\n",
|
||||
" self.conv = nn.Conv1d(1, 1, kernel_size=3, stride=1)\n",
|
||||
" self.relu = nn.ReLU()\n",
|
||||
"\n",
|
||||
" def forward(self, x):\n",
|
||||
" x = self.conv(x)\n",
|
||||
" x = self.relu(x)\n",
|
||||
" return x\n",
|
||||
"\n",
|
||||
"# Instantiate the models\n",
|
||||
"mlp_sigmoid = MLP_Sigmoid()\n",
|
||||
"mlp_relu = MLP_Relu()\n",
|
||||
"conv_sigmoid = Conv_Sigmoid()\n",
|
||||
"conv_relu = Conv_Relu()\n",
|
||||
"\n",
|
||||
"# Dummy input tensor for mlp\n",
|
||||
"dummy_input_mlp = torch.tensor([[-1.5737053155899048, -1.708398461341858, 0.19544155895709991]])\n",
|
||||
"input_mlp_path = 'mlp_input.json'\n",
|
||||
"\n",
|
||||
"# Dummy input tensor for conv\n",
|
||||
"dummy_input_conv = torch.tensor([[[1.4124163389205933, 0.6938204169273376, 1.0664031505584717]]])\n",
|
||||
"input_conv_path = 'conv_input.json'"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"names = ['mlp_sigmoid', 'mlp_relu', 'conv_sigmoid', 'conv_relu']\n",
|
||||
"models = [mlp_sigmoid, mlp_relu, conv_sigmoid, conv_relu]\n",
|
||||
"inputs = [dummy_input_mlp, dummy_input_mlp, dummy_input_conv, dummy_input_conv]\n",
|
||||
"input_paths = [input_mlp_path, input_mlp_path, input_conv_path, input_conv_path]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"import json\n",
|
||||
"import torch\n",
|
||||
"import ezkl\n",
|
||||
"\n",
|
||||
"for name, model, x, input_path in zip(names, models, inputs, input_paths):\n",
|
||||
" # Create a new directory for the model if it doesn't exist\n",
|
||||
" if not os.path.exists(name):\n",
|
||||
" os.mkdir(name)\n",
|
||||
" # Store the paths in each of their respective directories\n",
|
||||
" model_path = os.path.join(name, \"network.onnx\")\n",
|
||||
" compiled_model_path = os.path.join(name, \"network.compiled\")\n",
|
||||
" pk_path = os.path.join(name, \"test.pk\")\n",
|
||||
" vk_path = os.path.join(name, \"test.vk\")\n",
|
||||
" settings_path = os.path.join(name, \"settings.json\")\n",
|
||||
"\n",
|
||||
" witness_path = os.path.join(name, \"witness.json\")\n",
|
||||
" sol_code_path = os.path.join(name, 'test.sol')\n",
|
||||
" vka_path = os.path.join(name, 'vka.bytes')\n",
|
||||
" abi_path = os.path.join(name, 'test.abi')\n",
|
||||
" proof_path = os.path.join(name, \"proof.json\")\n",
|
||||
"\n",
|
||||
" # Flips the neural net into inference mode\n",
|
||||
" model.eval()\n",
|
||||
"\n",
|
||||
" # Export the model\n",
|
||||
" torch.onnx.export(model, x, model_path, export_params=True, opset_version=10,\n",
|
||||
" do_constant_folding=True, input_names=['input'],\n",
|
||||
" output_names=['output'], dynamic_axes={'input': {0: 'batch_size'},\n",
|
||||
" 'output': {0: 'batch_size'}})\n",
|
||||
"\n",
|
||||
" data_array = ((x).detach().numpy()).reshape([-1]).tolist()\n",
|
||||
" data = dict(input_data=[data_array])\n",
|
||||
" json.dump(data, open(input_path, 'w'))\n",
|
||||
"\n",
|
||||
" py_run_args = ezkl.PyRunArgs()\n",
|
||||
" py_run_args.input_visibility = \"private\"\n",
|
||||
" py_run_args.output_visibility = \"public\"\n",
|
||||
" py_run_args.param_visibility = \"fixed\" # private by default\n",
|
||||
"\n",
|
||||
" res = ezkl.gen_settings(model_path, settings_path, py_run_args=py_run_args)\n",
|
||||
" assert res == True\n",
|
||||
"\n",
|
||||
" ezkl.calibrate_settings(input_path, model_path, settings_path, \"resources\")\n",
|
||||
"\n",
|
||||
" res = ezkl.compile_circuit(model_path, compiled_model_path, settings_path)\n",
|
||||
" assert res == True\n",
|
||||
"\n",
|
||||
" res = await ezkl.get_srs(settings_path)\n",
|
||||
" assert res == True\n",
|
||||
"\n",
|
||||
" # now generate the witness file\n",
|
||||
" res = ezkl.gen_witness(input_path, compiled_model_path, witness_path)\n",
|
||||
" assert os.path.isfile(witness_path) == True\n",
|
||||
"\n",
|
||||
" # SETUP \n",
|
||||
" # We recommend disabling selector compression for the setup as it decreases the size of the VK artifact\n",
|
||||
" res = ezkl.setup(compiled_model_path, vk_path, pk_path, disable_selector_compression=True)\n",
|
||||
" assert res == True\n",
|
||||
" assert os.path.isfile(vk_path)\n",
|
||||
" assert os.path.isfile(pk_path)\n",
|
||||
" assert os.path.isfile(settings_path)\n",
|
||||
"\n",
|
||||
" # GENERATE A PROOF\n",
|
||||
" res = ezkl.prove(witness_path, compiled_model_path, pk_path, proof_path, \"single\")\n",
|
||||
" assert os.path.isfile(proof_path)\n",
|
||||
"\n",
|
||||
" res = await ezkl.create_evm_verifier(vk_path, settings_path, sol_code_path, abi_path, reusable=True)\n",
|
||||
" # TODO: Add a flag force equals true to in the deprication process to preserve OG single purpose verifier?\n",
|
||||
" assert res == True\n",
|
||||
"\n",
|
||||
" # TODO: \n",
|
||||
" res = await ezkl.create_evm_vka(vk_path, settings_path, vka_path, decimals=18)\n",
|
||||
" assert res == True\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import subprocess\n",
|
||||
"import time\n",
|
||||
"\n",
|
||||
"# make sure anvil is running locally\n",
|
||||
"# $ anvil -p 3030\n",
|
||||
"\n",
|
||||
"RPC_URL = \"http://localhost:3030\"\n",
|
||||
"\n",
|
||||
"# Save process globally\n",
|
||||
"anvil_process = None\n",
|
||||
"\n",
|
||||
"def start_anvil():\n",
|
||||
" global anvil_process\n",
|
||||
" if anvil_process is None:\n",
|
||||
" anvil_process = subprocess.Popen([\"anvil\", \"-p\", \"3030\", \"--code-size-limit=41943040\"])\n",
|
||||
" if anvil_process.returncode is not None:\n",
|
||||
" raise Exception(\"failed to start anvil process\")\n",
|
||||
" time.sleep(3)\n",
|
||||
"\n",
|
||||
"def stop_anvil():\n",
|
||||
" global anvil_process\n",
|
||||
" if anvil_process is not None:\n",
|
||||
" anvil_process.terminate()\n",
|
||||
" anvil_process = None\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Check that the generated verifiers are identical for all models."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import filecmp\n",
|
||||
"\n",
|
||||
"def compare_files(file1, file2):\n",
|
||||
" return filecmp.cmp(file1, file2, shallow=False)\n",
|
||||
"\n",
|
||||
"sol_code_path_0 = os.path.join(\"mlp_sigmoid\", 'test.sol')\n",
|
||||
"sol_code_path_1 = os.path.join(\"mlp_relu\", 'test.sol')\n",
|
||||
"\n",
|
||||
"sol_code_path_2 = os.path.join(\"conv_sigmoid\", 'test.sol')\n",
|
||||
"sol_code_path_3 = os.path.join(\"conv_relu\", 'test.sol')\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"assert compare_files(sol_code_path_0, sol_code_path_1) == True\n",
|
||||
"assert compare_files(sol_code_path_2, sol_code_path_3) == True"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Here we deploy reusable verifier that will be shared by the four models. We picked the `1l_mlp sigmoid` model as an example but you could have used any of the generated verifiers since they are all identical. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os \n",
|
||||
"addr_path_verifier = \"addr_verifier.txt\"\n",
|
||||
"sol_code_path = os.path.join(\"mlp_sigmoid\", 'test.sol')\n",
|
||||
"\n",
|
||||
"res = await ezkl.deploy_evm(\n",
|
||||
" addr_path_verifier,\n",
|
||||
" 'http://127.0.0.1:3030',\n",
|
||||
" sol_code_path,\n",
|
||||
" \"verifier/reusable\" # TODO deprecate this option for selecting the type of verifier you want to deploy. \n",
|
||||
" # verifier, verifier/reusable, vka\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"assert res == True\n",
|
||||
"\n",
|
||||
"with open(addr_path_verifier, 'r') as file:\n",
|
||||
" addr = file.read().rstrip()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Finally we deploy each of the unique VK-artifacts and verify them using the shared verifier deployed in the previous step."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"for name in names:\n",
|
||||
" addr_path_vk = \"addr_vk.txt\"\n",
|
||||
" vka_path = os.path.join(name, 'vka.bytes')\n",
|
||||
" res = await ezkl.register_vka(\n",
|
||||
" addr, # address of the reusable verifier. TODO: If we deploy the RV across all chains to a single canoncial address, we can hardcode that address and remove this param.\n",
|
||||
" 'http://127.0.0.1:3030',\n",
|
||||
" vka_path=vka_path, # TODO: Pass in private key and potentially create new command that both creates and registers the vka. Simplify testing pipeline for us and other folks. \n",
|
||||
" )\n",
|
||||
" assert res == True\n",
|
||||
" \n",
|
||||
" proof_path = os.path.join(name, \"proof.json\")\n",
|
||||
" res = await ezkl.verify_evm(\n",
|
||||
" addr,\n",
|
||||
" \"http://127.0.0.1:3030\",\n",
|
||||
" proof_path,\n",
|
||||
" vka_path = vka_path # TODO: Turn this from optional to required if we deprecate the orignal verifier. \n",
|
||||
" # TODO: Make it where the use only needs to deply a vka. \n",
|
||||
" )\n",
|
||||
" assert res == True"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": ".env",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.5"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
@@ -167,8 +167,6 @@
|
||||
"run_args = ezkl.PyRunArgs()\n",
|
||||
"# \"hashed/private\" means that the output of the hashing is not visible to the verifier and is instead fed into the computational graph\n",
|
||||
"run_args.input_visibility = \"hashed/private/0\"\n",
|
||||
"# as the inputs are felts we turn off input range checks\n",
|
||||
"run_args.ignore_range_check_inputs_outputs = True\n",
|
||||
"# we set it to fix the set we want to check membership for\n",
|
||||
"run_args.param_visibility = \"fixed\"\n",
|
||||
"# the output is public -- set membership fails if it is not = 0\n",
|
||||
@@ -231,7 +229,7 @@
|
||||
"source": [
|
||||
"# now generate the witness file\n",
|
||||
"\n",
|
||||
"res = ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"res = await ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"assert os.path.isfile(witness_path)"
|
||||
]
|
||||
},
|
||||
@@ -267,7 +265,7 @@
|
||||
" # Serialize data into file:\n",
|
||||
"json.dump( data, open(data_path_faulty, 'w' ))\n",
|
||||
"\n",
|
||||
"res = ezkl.gen_witness(data_path_faulty, compiled_model_path, witness_path_faulty)\n",
|
||||
"res = await ezkl.gen_witness(data_path_faulty, compiled_model_path, witness_path_faulty)\n",
|
||||
"assert os.path.isfile(witness_path_faulty)"
|
||||
]
|
||||
},
|
||||
@@ -312,7 +310,7 @@
|
||||
"# Serialize data into file:\n",
|
||||
"json.dump( data, open(data_path_truthy, 'w' ))\n",
|
||||
"\n",
|
||||
"res = ezkl.gen_witness(data_path_truthy, compiled_model_path, witness_path_truthy)\n",
|
||||
"res = await ezkl.gen_witness(data_path_truthy, compiled_model_path, witness_path_truthy)\n",
|
||||
"assert os.path.isfile(witness_path_truthy)"
|
||||
]
|
||||
},
|
||||
@@ -484,7 +482,7 @@
|
||||
"source": [
|
||||
"import pytest\n",
|
||||
"def test_verification():\n",
|
||||
" with pytest.raises(RuntimeError, match='Failed to run verify: \\\\[halo2\\\\] The constraint system is not satisfied'):\n",
|
||||
" with pytest.raises(RuntimeError, match='Failed to run verify: The constraint system is not satisfied'):\n",
|
||||
" ezkl.verify(\n",
|
||||
" proof_path_faulty,\n",
|
||||
" settings_path,\n",
|
||||
@@ -516,7 +514,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.12.3"
|
||||
"version": "3.9.15"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -205,7 +205,7 @@
|
||||
"source": [
|
||||
"# now generate the witness file \n",
|
||||
"\n",
|
||||
"res = ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"res = await ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"assert os.path.isfile(witness_path)"
|
||||
]
|
||||
},
|
||||
@@ -328,7 +328,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 26,
|
||||
"execution_count": null,
|
||||
"id": "171702d3",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -348,7 +348,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 27,
|
||||
"execution_count": null,
|
||||
"id": "671dfdd5",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -364,7 +364,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 28,
|
||||
"execution_count": null,
|
||||
"id": "50eba2f4",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -399,7 +399,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.12.7"
|
||||
"version": "3.9.15"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -157,7 +157,6 @@
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "b78d3cbf",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
@@ -171,7 +170,7 @@
|
||||
"json.dump(data, open(cal_path, 'w'))\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"ezkl.calibrate_settings(cal_path, model_path, settings_path, \"resources\")"
|
||||
"await ezkl.calibrate_settings(cal_path, model_path, settings_path, \"resources\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -193,7 +192,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# srs path\n",
|
||||
"res = ezkl.get_srs( settings_path)"
|
||||
"res = await ezkl.get_srs( settings_path)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -205,7 +204,7 @@
|
||||
"source": [
|
||||
"# now generate the witness file \n",
|
||||
"\n",
|
||||
"res = ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"res = await ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"assert os.path.isfile(witness_path)"
|
||||
]
|
||||
},
|
||||
@@ -299,9 +298,9 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.12.3"
|
||||
"version": "3.9.15"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
}
|
||||
|
||||
@@ -203,7 +203,7 @@
|
||||
"source": [
|
||||
"# now generate the witness file \n",
|
||||
"\n",
|
||||
"res = ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"res = await ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"assert os.path.isfile(witness_path)"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -204,7 +204,7 @@
|
||||
"source": [
|
||||
"# now generate the witness file \n",
|
||||
"\n",
|
||||
"res = ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"res = await ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"assert os.path.isfile(witness_path)"
|
||||
]
|
||||
},
|
||||
@@ -303,4 +303,4 @@
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
}
|
||||
|
||||
@@ -149,7 +149,7 @@
|
||||
"json.dump(data, open(cal_path, 'w'))\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"ezkl.calibrate_settings(cal_path, model_path, settings_path, \"resources\")"
|
||||
"await ezkl.calibrate_settings(cal_path, model_path, settings_path, \"resources\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -183,7 +183,7 @@
|
||||
"source": [
|
||||
"# now generate the witness file \n",
|
||||
"\n",
|
||||
"res = ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"res = await ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"assert os.path.isfile(witness_path)"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -204,7 +204,6 @@
|
||||
"run_args = ezkl.PyRunArgs()\n",
|
||||
"# \"polycommit\" means that the output of the hashing is not visible to the verifier and is instead fed into the computational graph\n",
|
||||
"run_args.input_visibility = \"polycommit\"\n",
|
||||
"run_args.ignore_range_check_inputs_outputs = True\n",
|
||||
"# the parameters are public\n",
|
||||
"run_args.param_visibility = \"fixed\"\n",
|
||||
"# the output is public (this is the inequality test)\n",
|
||||
@@ -298,7 +297,7 @@
|
||||
"\n",
|
||||
"witness_path = \"witness.json\"\n",
|
||||
"\n",
|
||||
"res = ezkl.gen_witness(data_path, compiled_model_path, witness_path, vk_path)\n",
|
||||
"res = await ezkl.gen_witness(data_path, compiled_model_path, witness_path, vk_path)\n",
|
||||
"assert os.path.isfile(witness_path)\n",
|
||||
"\n",
|
||||
"# we force the output to be 1 this corresponds to the solvency test being true -- and we set this to a fixed vis output\n",
|
||||
@@ -412,7 +411,7 @@
|
||||
"source": [
|
||||
"# now generate the witness file\n",
|
||||
"\n",
|
||||
"res = ezkl.gen_witness(data_path_faulty, compiled_model_path, witness_path, vk_path)\n",
|
||||
"res = await ezkl.gen_witness(data_path_faulty, compiled_model_path, witness_path, vk_path)\n",
|
||||
"assert os.path.isfile(witness_path)\n",
|
||||
"\n",
|
||||
"# we force the output to be 1 this corresponds to the solvency test being true -- and we set this to a fixed vis output\n",
|
||||
@@ -479,11 +478,12 @@
|
||||
"import pytest\n",
|
||||
"\n",
|
||||
"def test_verification():\n",
|
||||
" with pytest.raises(RuntimeError, match='Failed to run verify: \\\\[halo2\\\\] The constraint system is not satisfied'):\n",
|
||||
" with pytest.raises(RuntimeError, match='Failed to run verify: The constraint system is not satisfied'):\n",
|
||||
" ezkl.verify(\n",
|
||||
" proof_path,\n",
|
||||
" settings_path,\n",
|
||||
" vk_path,\n",
|
||||
" \n",
|
||||
" )\n",
|
||||
"\n",
|
||||
"# Run the test function\n",
|
||||
@@ -510,7 +510,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.12.3"
|
||||
"version": "3.9.15"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -167,7 +167,7 @@
|
||||
"res = ezkl.gen_settings(model_path, settings_path)\n",
|
||||
"assert res == True\n",
|
||||
"\n",
|
||||
"res = ezkl.calibrate_settings(data_path, model_path, settings_path, \"resources\")\n",
|
||||
"res = await ezkl.calibrate_settings(data_path, model_path, settings_path, \"resources\")\n",
|
||||
"assert res == True"
|
||||
]
|
||||
},
|
||||
@@ -187,7 +187,7 @@
|
||||
"json.dump(data, open(cal_path, 'w'))\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"ezkl.calibrate_settings(cal_path, model_path, settings_path, \"resources\")"
|
||||
"await ezkl.calibrate_settings(cal_path, model_path, settings_path, \"resources\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -221,7 +221,7 @@
|
||||
"source": [
|
||||
"# now generate the witness file \n",
|
||||
"\n",
|
||||
"res = ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"res = await ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"assert os.path.isfile(witness_path)"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -39,7 +39,7 @@
|
||||
"import json\n",
|
||||
"import numpy as np\n",
|
||||
"from sklearn.svm import SVC\n",
|
||||
"from hummingbird.ml import convert\n",
|
||||
"import sk2torch\n",
|
||||
"import torch\n",
|
||||
"import ezkl\n",
|
||||
"import os\n",
|
||||
@@ -59,11 +59,11 @@
|
||||
"# Train an SVM on the data and wrap it in PyTorch.\n",
|
||||
"sk_model = SVC(probability=True)\n",
|
||||
"sk_model.fit(xs, ys)\n",
|
||||
"model = convert(sk_model, \"torch\").model\n",
|
||||
"model = sk2torch.wrap(sk_model)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"model\n",
|
||||
"\n"
|
||||
]
|
||||
},
|
||||
@@ -84,6 +84,33 @@
|
||||
"data_path = os.path.join('input.json')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "7f0ca328",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import matplotlib.pyplot as plt\n",
|
||||
"# Create a coordinate grid to compute a vector field on.\n",
|
||||
"spaced = np.linspace(-2, 2, num=25)\n",
|
||||
"grid_xs = torch.tensor([[x, y] for x in spaced for y in spaced], requires_grad=True)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# Compute the gradients of the SVM output.\n",
|
||||
"outputs = model.predict_proba(grid_xs)[:, 1]\n",
|
||||
"(input_grads,) = torch.autograd.grad(outputs.sum(), (grid_xs,))\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# Create a quiver plot of the vector field.\n",
|
||||
"plt.quiver(\n",
|
||||
" grid_xs[:, 0].detach().numpy(),\n",
|
||||
" grid_xs[:, 1].detach().numpy(),\n",
|
||||
" input_grads[:, 0].detach().numpy(),\n",
|
||||
" input_grads[:, 1].detach().numpy(),\n",
|
||||
")\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
@@ -92,14 +119,14 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"\n",
|
||||
"spaced = np.linspace(-2, 2, num=25)\n",
|
||||
"grid_xs = torch.tensor([[x, y] for x in spaced for y in spaced], requires_grad=True)\n",
|
||||
"\n",
|
||||
"# export to onnx format\n",
|
||||
"# !!!!!!!!!!!!!!!!! This will flash a warning but it is fine !!!!!!!!!!!!!!!!!!!!!\n",
|
||||
"\n",
|
||||
"# Input to the model\n",
|
||||
"shape = xs.shape[1:]\n",
|
||||
"x = grid_xs[0:1]\n",
|
||||
"torch_out = model.predict(x)\n",
|
||||
"# Export the model\n",
|
||||
"torch.onnx.export(model, # model being run\n",
|
||||
" # model input (or a tuple for multiple inputs)\n",
|
||||
@@ -116,7 +143,9 @@
|
||||
"\n",
|
||||
"d = ((x).detach().numpy()).reshape([-1]).tolist()\n",
|
||||
"\n",
|
||||
"data = dict(input_data=[d])\n",
|
||||
"data = dict(input_shapes=[shape],\n",
|
||||
" input_data=[d],\n",
|
||||
" output_data=[o.reshape([-1]).tolist() for o in torch_out])\n",
|
||||
"\n",
|
||||
"# Serialize data into file:\n",
|
||||
"json.dump(data, open(\"input.json\", 'w'))\n"
|
||||
@@ -138,7 +167,6 @@
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "0bee4d7f",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
@@ -152,7 +180,7 @@
|
||||
"json.dump(data, open(cal_path, 'w'))\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"ezkl.calibrate_settings(cal_path, model_path, settings_path, \"resources\")"
|
||||
"await ezkl.calibrate_settings(cal_path, model_path, settings_path, \"resources\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -186,13 +214,13 @@
|
||||
"source": [
|
||||
"# now generate the witness file \n",
|
||||
"\n",
|
||||
"res = ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"res = await ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"assert os.path.isfile(witness_path)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"execution_count": 10,
|
||||
"id": "b1c561a8",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -392,7 +420,7 @@
|
||||
"res = ezkl.gen_settings(model_path, settings_path)\n",
|
||||
"assert res == True\n",
|
||||
"\n",
|
||||
"res = ezkl.calibrate_settings(data_path, model_path, settings_path, \"resources\")\n",
|
||||
"res = await ezkl.calibrate_settings(data_path, model_path, settings_path, \"resources\")\n",
|
||||
"assert res == True"
|
||||
]
|
||||
}
|
||||
@@ -413,7 +441,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.12.3"
|
||||
"version": "3.9.15"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -637,7 +637,7 @@
|
||||
"json.dump(data, open(cal_path, 'w'))\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"ezkl.calibrate_settings(cal_path, model_path, settings_path, \"resources\", scales = [11])"
|
||||
"await ezkl.calibrate_settings(cal_path, model_path, settings_path, \"resources\", scales = [11])"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -683,7 +683,7 @@
|
||||
" data = json.load(f)\n",
|
||||
" print(len(data['input_data'][0]))\n",
|
||||
"\n",
|
||||
"ezkl.gen_witness(data_path, compiled_model_path, witness_path)"
|
||||
"await ezkl.gen_witness(data_path, compiled_model_path, witness_path)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -758,4 +758,4 @@
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 4
|
||||
}
|
||||
}
|
||||
|
||||
@@ -525,7 +525,7 @@
|
||||
"json.dump(data, open(cal_path, 'w'))\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"ezkl.calibrate_settings(cal_path, model_path, settings_path, \"resources\", scales = [4])"
|
||||
"await ezkl.calibrate_settings(cal_path, model_path, settings_path, \"resources\", scales = [4])"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -572,7 +572,7 @@
|
||||
" data = json.load(f)\n",
|
||||
" print(len(data['input_data'][0]))\n",
|
||||
"\n",
|
||||
"ezkl.gen_witness(data_path, compiled_model_path, witness_path)"
|
||||
"await ezkl.gen_witness(data_path, compiled_model_path, witness_path)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -647,4 +647,4 @@
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 4
|
||||
}
|
||||
}
|
||||
|
||||
@@ -458,7 +458,7 @@
|
||||
"\n",
|
||||
"\n",
|
||||
"ezkl.gen_settings(onnx_filename, settings_filename)\n",
|
||||
"ezkl.calibrate_settings(\n",
|
||||
"await ezkl.calibrate_settings(\n",
|
||||
" input_filename, onnx_filename, settings_filename, \"resources\", scales = [4])\n",
|
||||
"res = await ezkl.get_srs(settings_filename)\n",
|
||||
"ezkl.compile_circuit(onnx_filename, compiled_filename, settings_filename)\n",
|
||||
@@ -527,7 +527,7 @@
|
||||
"\n",
|
||||
"witness_path = \"witness.json\"\n",
|
||||
"\n",
|
||||
"res = ezkl.gen_witness(input_filename, compiled_filename, witness_path)\n",
|
||||
"res = await ezkl.gen_witness(input_filename, compiled_filename, witness_path)\n",
|
||||
"assert os.path.isfile(witness_path)"
|
||||
]
|
||||
},
|
||||
@@ -666,7 +666,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"execution_count": 11,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
@@ -689,8 +689,8 @@
|
||||
"# await\n",
|
||||
"res = await ezkl.deploy_evm(\n",
|
||||
" address_path,\n",
|
||||
" 'http://127.0.0.1:3030',\n",
|
||||
" sol_code_path,\n",
|
||||
" 'http://127.0.0.1:3030'\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"assert res == True\n",
|
||||
@@ -701,7 +701,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"execution_count": 12,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
@@ -722,8 +722,8 @@
|
||||
"\n",
|
||||
"res = await ezkl.verify_evm(\n",
|
||||
" addr,\n",
|
||||
" \"http://127.0.0.1:3030\",\n",
|
||||
" proof_path,\n",
|
||||
" \"http://127.0.0.1:3030\"\n",
|
||||
")\n",
|
||||
"assert res == True"
|
||||
]
|
||||
@@ -743,8 +743,7 @@
|
||||
"provenance": []
|
||||
},
|
||||
"kernelspec": {
|
||||
"display_name": ".env",
|
||||
"language": "python",
|
||||
"display_name": "Python 3",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
@@ -757,9 +756,9 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.12.9"
|
||||
"version": "3.12.2"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 0
|
||||
}
|
||||
}
|
||||
|
||||
@@ -629,7 +629,7 @@
|
||||
"source": [
|
||||
"\n",
|
||||
"\n",
|
||||
"res = ezkl.calibrate_settings(val_data, model_path, settings_path, \"resources\", scales = [4])\n",
|
||||
"res = await ezkl.calibrate_settings(val_data, model_path, settings_path, \"resources\", scales = [4])\n",
|
||||
"assert res == True\n",
|
||||
"print(\"verified\")\n"
|
||||
]
|
||||
@@ -680,7 +680,7 @@
|
||||
"\n",
|
||||
"witness_path = \"witness.json\"\n",
|
||||
"\n",
|
||||
"res = ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"res = await ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"assert os.path.isfile(witness_path)"
|
||||
]
|
||||
},
|
||||
@@ -849,8 +849,8 @@
|
||||
"\n",
|
||||
"res = await ezkl.deploy_evm(\n",
|
||||
" address_path,\n",
|
||||
" 'http://127.0.0.1:3030',\n",
|
||||
" sol_code_path,\n",
|
||||
" 'http://127.0.0.1:3030'\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"assert res == True\n",
|
||||
@@ -870,8 +870,8 @@
|
||||
"\n",
|
||||
"res = await ezkl.verify_evm(\n",
|
||||
" addr,\n",
|
||||
" \"http://127.0.0.1:3030\",\n",
|
||||
" proof_path\n",
|
||||
" proof_path,\n",
|
||||
" \"http://127.0.0.1:3030\"\n",
|
||||
")\n",
|
||||
"assert res == True"
|
||||
]
|
||||
|
||||
547
examples/notebooks/world_rotation.ipynb
Normal file
547
examples/notebooks/world_rotation.ipynb
Normal file
@@ -0,0 +1,547 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "cf69bb3f-94e6-4dba-92cd-ce08df117d67",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## World rotation\n",
|
||||
"\n",
|
||||
"Here we demonstrate how to use the EZKL package to rotate an on-chain world. \n",
|
||||
"\n",
|
||||
"\n",
|
||||
"> **A typical ZK application flow**. For the shape rotators out there — this is an easily digestible example. A user computes a ZK-proof that they have calculated a valid rotation of a world. They submit this proof to a verifier contract which governs an on-chain world, along with a new set of coordinates, and the world rotation updates. Observe that it’s possible for one player to initiate a *global* change.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "95613ee9",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# check if notebook is in colab\n",
|
||||
"try:\n",
|
||||
" # install ezkl\n",
|
||||
" import google.colab\n",
|
||||
" import subprocess\n",
|
||||
" import sys\n",
|
||||
" subprocess.check_call([sys.executable, \"-m\", \"pip\", \"install\", \"ezkl\"])\n",
|
||||
" subprocess.check_call([sys.executable, \"-m\", \"pip\", \"install\", \"onnx\"])\n",
|
||||
"\n",
|
||||
"# rely on local installation of ezkl if the notebook is not in colab\n",
|
||||
"except:\n",
|
||||
" pass\n",
|
||||
"\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from torch import nn\n",
|
||||
"import ezkl\n",
|
||||
"import os\n",
|
||||
"import json\n",
|
||||
"import torch\n",
|
||||
"import math\n",
|
||||
"\n",
|
||||
"# these are constants for the rotation\n",
|
||||
"phi = torch.tensor(5 * math.pi / 180)\n",
|
||||
"s = torch.sin(phi)\n",
|
||||
"c = torch.cos(phi)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"class RotateStuff(nn.Module):\n",
|
||||
" def __init__(self):\n",
|
||||
" super(RotateStuff, self).__init__()\n",
|
||||
"\n",
|
||||
" # create a rotation matrix -- the matrix is constant and is transposed for convenience\n",
|
||||
" self.rot = torch.stack([torch.stack([c, -s]),\n",
|
||||
" torch.stack([s, c])]).t()\n",
|
||||
"\n",
|
||||
" def forward(self, x):\n",
|
||||
" x_rot = x @ self.rot # same as x_rot = (rot @ x.t()).t() due to rot in O(n) (SO(n) even)\n",
|
||||
" return x_rot\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"circuit = RotateStuff()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"This will showcase the principle directions of rotation by plotting the rotation of a single unit vector."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from matplotlib import pyplot\n",
|
||||
"pyplot.figure(figsize=(3, 3))\n",
|
||||
"pyplot.arrow(0, 0, 1, 0, width=0.02, alpha=0.5)\n",
|
||||
"pyplot.arrow(0, 0, 0, 1, width=0.02, alpha=0.5)\n",
|
||||
"pyplot.arrow(0, 0, circuit.rot[0, 0].item(), circuit.rot[0, 1].item(), width=0.02)\n",
|
||||
"pyplot.arrow(0, 0, circuit.rot[1, 0].item(), circuit.rot[1, 1].item(), width=0.02)\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "b37637c4",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"model_path = os.path.join('network.onnx')\n",
|
||||
"compiled_model_path = os.path.join('network.compiled')\n",
|
||||
"pk_path = os.path.join('test.pk')\n",
|
||||
"vk_path = os.path.join('test.vk')\n",
|
||||
"settings_path = os.path.join('settings.json')\n",
|
||||
"srs_path = os.path.join('kzg.srs')\n",
|
||||
"witness_path = os.path.join('witness.json')\n",
|
||||
"data_path = os.path.join('input.json')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "82db373a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"\n",
|
||||
"\n",
|
||||
"# initial principle vectors for the rotation are as in the plot above\n",
|
||||
"x = torch.tensor([[1, 0], [0, 1]], dtype=torch.float32)\n",
|
||||
"\n",
|
||||
"# Flips the neural net into inference mode\n",
|
||||
"circuit.eval()\n",
|
||||
"\n",
|
||||
" # Export the model\n",
|
||||
"torch.onnx.export(circuit, # model being run\n",
|
||||
" x, # model input (or a tuple for multiple inputs)\n",
|
||||
" model_path, # where to save the model (can be a file or file-like object)\n",
|
||||
" export_params=True, # store the trained parameter weights inside the model file\n",
|
||||
" opset_version=10, # the ONNX version to export the model to\n",
|
||||
" do_constant_folding=True, # whether to execute constant folding for optimization\n",
|
||||
" input_names = ['input'], # the model's input names\n",
|
||||
" output_names = ['output'], # the model's output names\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
"data_array = ((x).detach().numpy()).reshape([-1]).tolist()\n",
|
||||
"\n",
|
||||
"data = dict(input_data = [data_array])\n",
|
||||
"\n",
|
||||
" # Serialize data into file:\n",
|
||||
"json.dump( data, open(data_path, 'w' ))\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### World rotation in 2D on-chain"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"For demo purposes we deploy these coordinates to a contract running locally using Anvil. This creates our on-chain world. We then rotate the world using the EZKL package and submit the proof to the contract. The contract then updates the world rotation. For demo purposes we do this repeatedly, rotating the world by 1 transform each time."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import subprocess\n",
|
||||
"import time\n",
|
||||
"import threading\n",
|
||||
"\n",
|
||||
"# make sure anvil is running locally\n",
|
||||
"# $ anvil -p 3030\n",
|
||||
"\n",
|
||||
"RPC_URL = \"http://localhost:3030\"\n",
|
||||
"\n",
|
||||
"# Save process globally\n",
|
||||
"anvil_process = None\n",
|
||||
"\n",
|
||||
"def start_anvil():\n",
|
||||
" global anvil_process\n",
|
||||
" if anvil_process is None:\n",
|
||||
" anvil_process = subprocess.Popen([\"anvil\", \"-p\", \"3030\", \"--code-size-limit=41943040\"])\n",
|
||||
" if anvil_process.returncode is not None:\n",
|
||||
" raise Exception(\"failed to start anvil process\")\n",
|
||||
" time.sleep(3)\n",
|
||||
"\n",
|
||||
"def stop_anvil():\n",
|
||||
" global anvil_process\n",
|
||||
" if anvil_process is not None:\n",
|
||||
" anvil_process.terminate()\n",
|
||||
" anvil_process = None\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We define our `PyRunArgs` objects which contains the visibility parameters for out model. \n",
|
||||
"- `input_visibility` defines the visibility of the model inputs\n",
|
||||
"- `param_visibility` defines the visibility of the model weights and constants and parameters \n",
|
||||
"- `output_visibility` defines the visibility of the model outputs\n",
|
||||
"\n",
|
||||
"Here we create the following setup:\n",
|
||||
"- `input_visibility`: \"public\"\n",
|
||||
"- `param_visibility`: \"fixed\"\n",
|
||||
"- `output_visibility`: public"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "d5e374a2",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"py_run_args = ezkl.PyRunArgs()\n",
|
||||
"py_run_args.input_visibility = \"public\"\n",
|
||||
"py_run_args.output_visibility = \"public\"\n",
|
||||
"py_run_args.param_visibility = \"private\" # private by default\n",
|
||||
"py_run_args.scale_rebase_multiplier = 10\n",
|
||||
"\n",
|
||||
"res = ezkl.gen_settings(model_path, settings_path, py_run_args=py_run_args)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "3aa4f090",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"res = ezkl.compile_circuit(model_path, compiled_model_path, settings_path)\n",
|
||||
"assert res == True"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We also define a contract that holds out test data. This contract will contain in its storage the data that we will read from and attest to. In production you would not need to set up a local anvil instance. Instead you would replace RPC_URL with the actual RPC endpoint of the chain you are deploying your verifiers too, reading from the data on said chain."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "2007dc77",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"ezkl.setup_test_evm_witness(\n",
|
||||
" data_path,\n",
|
||||
" compiled_model_path,\n",
|
||||
" # we write the call data to the same file as the input data\n",
|
||||
" data_path,\n",
|
||||
" input_source=ezkl.PyTestDataSource.OnChain,\n",
|
||||
" output_source=ezkl.PyTestDataSource.File,\n",
|
||||
" rpc_url=RPC_URL)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "ab993958",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"As we use Halo2 with KZG-commitments we need an SRS string from (preferably) a multi-party trusted setup ceremony. For an overview of the procedures for such a ceremony check out [this page](https://blog.ethereum.org/2023/01/16/announcing-kzg-ceremony). The `get_srs` command retrieves a correctly sized SRS given the calibrated settings file from [here](https://github.com/han0110/halo2-kzg-srs). \n",
|
||||
"\n",
|
||||
"These SRS were generated with [this](https://github.com/privacy-scaling-explorations/perpetualpowersoftau) ceremony. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "8b74dcee",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# srs path\n",
|
||||
"res = await ezkl.get_srs( settings_path)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "18c8b7c7",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# now generate the witness file \n",
|
||||
"\n",
|
||||
"witness = await ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"assert os.path.isfile(witness_path)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "ad58432e",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Here we setup verifying and proving keys for the circuit. As the name suggests the proving key is needed for ... proving and the verifying key is needed for ... verifying. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "b1c561a8",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"res = ezkl.setup(\n",
|
||||
" compiled_model_path,\n",
|
||||
" vk_path,\n",
|
||||
" pk_path,\n",
|
||||
" \n",
|
||||
" )\n",
|
||||
"\n",
|
||||
"assert res == True\n",
|
||||
"assert os.path.isfile(vk_path)\n",
|
||||
"assert os.path.isfile(pk_path)\n",
|
||||
"assert os.path.isfile(settings_path)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "1746c8d1",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We can now create an EVM verifier contract from our circuit. This contract will be deployed to the chain we are using. In this case we are using a local anvil instance."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "d1920c0f",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"abi_path = 'test.abi'\n",
|
||||
"sol_code_path = 'test.sol'\n",
|
||||
"\n",
|
||||
"res = await ezkl.create_evm_verifier(\n",
|
||||
" vk_path,\n",
|
||||
" settings_path,\n",
|
||||
" sol_code_path,\n",
|
||||
" abi_path,\n",
|
||||
" )\n",
|
||||
"assert res == True"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "0fd7f22b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import json\n",
|
||||
"\n",
|
||||
"addr_path_verifier = \"addr_verifier.txt\"\n",
|
||||
"\n",
|
||||
"res = await ezkl.deploy_evm(\n",
|
||||
" addr_path_verifier,\n",
|
||||
" sol_code_path,\n",
|
||||
" 'http://127.0.0.1:3030'\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"assert res == True"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "9c0dffab",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"With the vanilla verifier deployed, we can now create the data attestation contract, which will read in the instances from the calldata to the verifier, attest to them, call the verifier and then return the result. \n",
|
||||
"\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "cc888848",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "c2db14d7",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"abi_path = 'test.abi'\n",
|
||||
"sol_code_path = 'test.sol'\n",
|
||||
"input_path = 'input.json'\n",
|
||||
"\n",
|
||||
"res = await ezkl.create_evm_data_attestation(\n",
|
||||
" input_path,\n",
|
||||
" settings_path,\n",
|
||||
" sol_code_path,\n",
|
||||
" abi_path,\n",
|
||||
" )"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "5a018ba6",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"addr_path_da = \"addr_da.txt\"\n",
|
||||
"\n",
|
||||
"res = await ezkl.deploy_da_evm(\n",
|
||||
" addr_path_da,\n",
|
||||
" input_path,\n",
|
||||
" settings_path,\n",
|
||||
" sol_code_path,\n",
|
||||
" RPC_URL,\n",
|
||||
" )"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "2adad845",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Now we can pull in the data from the contract and calculate a new set of coordinates. We then rotate the world by 1 transform and submit the proof to the contract. The contract could then update the world rotation (logic not inserted here). For demo purposes we do this repeatedly, rotating the world by 1 transform. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "c384cbc8",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# GENERATE A PROOF\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"proof_path = os.path.join('test.pf')\n",
|
||||
"\n",
|
||||
"res = ezkl.prove(\n",
|
||||
" witness_path,\n",
|
||||
" compiled_model_path,\n",
|
||||
" pk_path,\n",
|
||||
" proof_path,\n",
|
||||
" \n",
|
||||
" \"single\",\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
"print(res)\n",
|
||||
"assert os.path.isfile(proof_path)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "90eda56e",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Call the view only verify method on the contract to verify the proof. Since it is a view function this is safe to use in production since you don't have to pass your private key."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "76f00d41",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# read the verifier address\n",
|
||||
"addr_verifier = None\n",
|
||||
"with open(addr_path_verifier, 'r') as f:\n",
|
||||
" addr = f.read()\n",
|
||||
"#read the data attestation address\n",
|
||||
"addr_da = None\n",
|
||||
"with open(addr_path_da, 'r') as f:\n",
|
||||
" addr_da = f.read()\n",
|
||||
"\n",
|
||||
"res = ezkl.verify_evm(\n",
|
||||
" addr,\n",
|
||||
" proof_path,\n",
|
||||
" RPC_URL,\n",
|
||||
" addr_da,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"As a sanity check lets plot the rotations of the unit vectors. We can see that the unit vectors rotate as expected by the output of the circuit. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"witness['outputs'][0][0]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"settings = json.load(open(settings_path, 'r'))\n",
|
||||
"out_scale = settings[\"model_output_scales\"][0]\n",
|
||||
"\n",
|
||||
"from matplotlib import pyplot\n",
|
||||
"pyplot.figure(figsize=(3, 3))\n",
|
||||
"pyplot.arrow(0, 0, 1, 0, width=0.02, alpha=0.5)\n",
|
||||
"pyplot.arrow(0, 0, 0, 1, width=0.02, alpha=0.5)\n",
|
||||
"\n",
|
||||
"arrow_x = ezkl.felt_to_float(witness['outputs'][0][0], out_scale)\n",
|
||||
"arrow_y = ezkl.felt_to_float(witness['outputs'][0][1], out_scale)\n",
|
||||
"pyplot.arrow(0, 0, arrow_x, arrow_y, width=0.02)\n",
|
||||
"arrow_x = ezkl.felt_to_float(witness['outputs'][0][2], out_scale)\n",
|
||||
"arrow_y = ezkl.felt_to_float(witness['outputs'][0][3], out_scale)\n",
|
||||
"pyplot.arrow(0, 0, arrow_x, arrow_y, width=0.02)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.12.2"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -193,7 +193,7 @@
|
||||
"with open(cal_path, \"w\") as f:\n",
|
||||
" json.dump(cal_data, f)\n",
|
||||
"\n",
|
||||
"res = ezkl.calibrate_settings(cal_path, model_path, settings_path, \"resources\")"
|
||||
"res = await ezkl.calibrate_settings(cal_path, model_path, settings_path, \"resources\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -227,7 +227,7 @@
|
||||
"source": [
|
||||
"# now generate the witness file \n",
|
||||
"\n",
|
||||
"res = ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"res = await ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"assert os.path.isfile(witness_path)"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -1,106 +0,0 @@
|
||||
{
|
||||
"input_data": [
|
||||
[
|
||||
8761,
|
||||
7654,
|
||||
8501,
|
||||
2404,
|
||||
6929,
|
||||
8858,
|
||||
5946,
|
||||
3673,
|
||||
4131,
|
||||
3854,
|
||||
8137,
|
||||
8239,
|
||||
9038,
|
||||
6299,
|
||||
1118,
|
||||
9737,
|
||||
208,
|
||||
7954,
|
||||
3691,
|
||||
610,
|
||||
3468,
|
||||
3314,
|
||||
8658,
|
||||
8366,
|
||||
2850,
|
||||
477,
|
||||
6114,
|
||||
232,
|
||||
4601,
|
||||
7420,
|
||||
5713,
|
||||
2936,
|
||||
6061,
|
||||
2870,
|
||||
8421,
|
||||
177,
|
||||
7107,
|
||||
7382,
|
||||
6115,
|
||||
5487,
|
||||
8502,
|
||||
2559,
|
||||
1875,
|
||||
129,
|
||||
8533,
|
||||
8201,
|
||||
8414,
|
||||
4775,
|
||||
9817,
|
||||
3127,
|
||||
8761,
|
||||
7654,
|
||||
8501,
|
||||
2404,
|
||||
6929,
|
||||
8858,
|
||||
5946,
|
||||
3673,
|
||||
4131,
|
||||
3854,
|
||||
8137,
|
||||
8239,
|
||||
9038,
|
||||
6299,
|
||||
1118,
|
||||
9737,
|
||||
208,
|
||||
7954,
|
||||
3691,
|
||||
610,
|
||||
3468,
|
||||
3314,
|
||||
8658,
|
||||
8366,
|
||||
2850,
|
||||
477,
|
||||
6114,
|
||||
232,
|
||||
4601,
|
||||
7420,
|
||||
5713,
|
||||
2936,
|
||||
6061,
|
||||
2870,
|
||||
8421,
|
||||
177,
|
||||
7107,
|
||||
7382,
|
||||
6115,
|
||||
5487,
|
||||
8502,
|
||||
2559,
|
||||
1875,
|
||||
129,
|
||||
8533,
|
||||
8201,
|
||||
8414,
|
||||
4775,
|
||||
9817,
|
||||
3127
|
||||
]
|
||||
]
|
||||
}
|
||||
Binary file not shown.
Binary file not shown.
@@ -1 +0,0 @@
|
||||
{"run_args":{"tolerance":{"val":0.0,"scale":1.0},"input_scale":7,"param_scale":7,"scale_rebase_multiplier":10,"lookup_range":[0,0],"logrows":13,"variables":[["batch_size",1]],"input_visibility":"Private","output_visibility":"Public","param_visibility":"Private"},"num_constraints":5619,"total_const_size":513,"model_instance_shapes":[[1,3,10,10]],"model_output_scales":[14],"model_input_scales":[7],"module_sizes":{"kzg":[],"poseidon":[0,[0]],"elgamal":[0,[0]]},"required_lookups":[],"check_mode":"UNSAFE","version":"0.0.0","num_blinding_factors":null}
|
||||
@@ -1 +0,0 @@
|
||||
{"run_args":{"input_scale":7,"param_scale":7,"scale_rebase_multiplier":1,"lookup_range":[-32768,32768],"logrows":17,"num_inner_cols":2,"variables":[["batch_size",1]],"input_visibility":"Private","output_visibility":"Public","param_visibility":"Private","rebase_frac_zero_constants":false,"check_mode":"UNSAFE","commitment":"KZG","decomp_base":16384,"decomp_legs":2,"bounded_log_lookup":false,"ignore_range_check_inputs_outputs":false},"num_rows":54,"total_assignments":109,"total_const_size":4,"total_dynamic_col_size":0,"max_dynamic_input_len":0,"num_dynamic_lookups":0,"num_shuffles":0,"total_shuffle_col_size":0,"model_instance_shapes":[[1,1]],"model_output_scales":[7],"model_input_scales":[7],"module_sizes":{"polycommit":[],"poseidon":[0,[0]]},"required_lookups":[],"required_range_checks":[[-1,1],[0,16383]],"check_mode":"UNSAFE","version":"0.0.0","num_blinding_factors":null,"timestamp":1739396322131,"input_types":["F32"],"output_types":["F32"]}
|
||||
@@ -9,9 +9,7 @@ class MyModel(nn.Module):
|
||||
super(MyModel, self).__init__()
|
||||
|
||||
def forward(self, w, x, y, z):
|
||||
a = (x & y)
|
||||
b = (y & (z ^ w))
|
||||
return [a & b]
|
||||
return [((x & y)) == (x & (y | (z ^ w)))]
|
||||
|
||||
|
||||
circuit = MyModel()
|
||||
|
||||
@@ -1 +1 @@
|
||||
{"input_data": [[false, true, true], [false, true, true], [true, false, false], [false, true, true]]}
|
||||
{"input_data": [[false, true, false], [true, false, false], [true, false, false], [false, false, false]]}
|
||||
@@ -1,17 +1,21 @@
|
||||
pytorch2.2.2:„
|
||||
*
|
||||
pytorch1.12.1:«
|
||||
+
|
||||
input1
|
||||
input2
|
||||
/And_output_0/And"And
|
||||
input2
|
||||
onnx::Equal_4And_0"And
|
||||
'
|
||||
input3
|
||||
input
|
||||
input3
|
||||
input
|
||||
onnx::Or_5Xor_1"Xor
|
||||
+
|
||||
5
|
||||
input2
|
||||
|
||||
/Xor_output_0/And_1_output_0/And_1"And
|
||||
5
|
||||
input2
|
||||
|
||||
onnx::Or_5onnx::And_6Or_2"Or
|
||||
0
|
||||
input1
|
||||
onnx::And_6
|
||||
onnx::Equal_7And_3"And
|
||||
6
|
||||
|
||||
onnx::Equal_4
|
||||
|
||||
|
||||
@@ -1,42 +0,0 @@
|
||||
from torch import nn
|
||||
import torch
|
||||
import json
|
||||
import numpy as np
|
||||
|
||||
|
||||
class MyModel(nn.Module):
|
||||
def __init__(self):
|
||||
super(MyModel, self).__init__()
|
||||
|
||||
def forward(self, x):
|
||||
m = torch.exp(x)
|
||||
|
||||
return m
|
||||
|
||||
|
||||
circuit = MyModel()
|
||||
|
||||
x = torch.empty(1, 8).uniform_(0, 1)
|
||||
|
||||
out = circuit(x)
|
||||
|
||||
print(out)
|
||||
|
||||
torch.onnx.export(circuit, x, "network.onnx",
|
||||
export_params=True, # store the trained parameter weights inside the model file
|
||||
opset_version=17, # the ONNX version to export the model to
|
||||
do_constant_folding=True, # whether to execute constant folding for optimization
|
||||
input_names=['input'], # the model's input names
|
||||
output_names=['output'], # the model's output names
|
||||
dynamic_axes={'input': {0: 'batch_size'}, # variable length axes
|
||||
'output': {0: 'batch_size'}})
|
||||
|
||||
|
||||
d1 = ((x).detach().numpy()).reshape([-1]).tolist()
|
||||
|
||||
data = dict(
|
||||
input_data=[d1],
|
||||
)
|
||||
|
||||
# Serialize data into file:
|
||||
json.dump(data, open("input.json", 'w'))
|
||||
@@ -1 +0,0 @@
|
||||
{"input_data": [[0.5801457762718201, 0.6019012331962585, 0.8695418238639832, 0.17170941829681396, 0.500616729259491, 0.353726327419281, 0.6726185083389282, 0.5936906337738037]]}
|
||||
@@ -1,14 +0,0 @@
|
||||
pytorch2.2.2:o
|
||||
|
||||
inputoutput/Exp"Exp
|
||||
main_graphZ!
|
||||
input
|
||||
|
||||
|
||||
batch_size
|
||||
b"
|
||||
output
|
||||
|
||||
|
||||
batch_size
|
||||
B
|
||||
Binary file not shown.
File diff suppressed because one or more lines are too long
File diff suppressed because it is too large
Load Diff
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user