mirror of
https://github.com/zkonduit/ezkl.git
synced 2026-01-13 08:17:57 -05:00
Compare commits
28 Commits
release-v2
...
ac/make-da
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
1a75963705 | ||
|
|
0ef1f35e59 | ||
|
|
808ab7d0de | ||
|
|
68b2c96b97 | ||
|
|
9a0ab22fdb | ||
|
|
f2b1de3740 | ||
|
|
dcb888ff1e | ||
|
|
26f465e70c | ||
|
|
8eef53213d | ||
|
|
a1345966d7 | ||
|
|
640061c850 | ||
|
|
da7db7d88d | ||
|
|
a55f75ff3f | ||
|
|
bf6f704827 | ||
|
|
0dbfdf4672 | ||
|
|
98299356a6 | ||
|
|
04805d2a91 | ||
|
|
ca18cf29bb | ||
|
|
78f8e23b55 | ||
|
|
7d40926082 | ||
|
|
e2c8182871 | ||
|
|
4f077c9134 | ||
|
|
038805ce02 | ||
|
|
0fb87c9a20 | ||
|
|
77423a6d07 | ||
|
|
8b416c7a00 | ||
|
|
73ec5e549a | ||
|
|
28386d8442 |
22
.github/workflows/benchmarks.yml
vendored
22
.github/workflows/benchmarks.yml
vendored
@@ -15,7 +15,7 @@ jobs:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 #v4.2.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: actions-rust-lang/setup-rust-toolchain@fb51252c7ba57d633bc668f941da052e410add48 #v1.0.6
|
||||
- uses: actions-rs/toolchain@b2417cde72dcf67f306c0ae8e0828a81bf0b189f #v1.0.6
|
||||
with:
|
||||
toolchain: nightly-2023-06-27
|
||||
override: true
|
||||
@@ -32,7 +32,7 @@ jobs:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 #v4.2.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: actions-rust-lang/setup-rust-toolchain@fb51252c7ba57d633bc668f941da052e410add48 #v1.0.6
|
||||
- uses: actions-rs/toolchain@b2417cde72dcf67f306c0ae8e0828a81bf0b189f #v1.0.6
|
||||
with:
|
||||
toolchain: nightly-2023-06-27
|
||||
override: true
|
||||
@@ -49,7 +49,7 @@ jobs:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 #v4.2.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: actions-rust-lang/setup-rust-toolchain@fb51252c7ba57d633bc668f941da052e410add48 #v1.0.6
|
||||
- uses: actions-rs/toolchain@b2417cde72dcf67f306c0ae8e0828a81bf0b189f #v1.0.6
|
||||
with:
|
||||
toolchain: nightly-2023-06-27
|
||||
override: true
|
||||
@@ -66,7 +66,7 @@ jobs:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 #v4.2.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: actions-rust-lang/setup-rust-toolchain@fb51252c7ba57d633bc668f941da052e410add48 #v1.0.6
|
||||
- uses: actions-rs/toolchain@b2417cde72dcf67f306c0ae8e0828a81bf0b189f #v1.0.6
|
||||
with:
|
||||
toolchain: nightly-2023-06-27
|
||||
override: true
|
||||
@@ -83,7 +83,7 @@ jobs:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 #v4.2.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: actions-rust-lang/setup-rust-toolchain@fb51252c7ba57d633bc668f941da052e410add48 #v1.0.6
|
||||
- uses: actions-rs/toolchain@b2417cde72dcf67f306c0ae8e0828a81bf0b189f #v1.0.6
|
||||
with:
|
||||
toolchain: nightly-2023-06-27
|
||||
override: true
|
||||
@@ -100,7 +100,7 @@ jobs:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 #v4.2.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: actions-rust-lang/setup-rust-toolchain@fb51252c7ba57d633bc668f941da052e410add48 #v1.0.6
|
||||
- uses: actions-rs/toolchain@b2417cde72dcf67f306c0ae8e0828a81bf0b189f #v1.0.6
|
||||
with:
|
||||
toolchain: nightly-2023-06-27
|
||||
override: true
|
||||
@@ -117,7 +117,7 @@ jobs:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 #v4.2.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: actions-rust-lang/setup-rust-toolchain@fb51252c7ba57d633bc668f941da052e410add48 #v1.0.6
|
||||
- uses: actions-rs/toolchain@b2417cde72dcf67f306c0ae8e0828a81bf0b189f #v1.0.6
|
||||
with:
|
||||
toolchain: nightly-2023-06-27
|
||||
override: true
|
||||
@@ -134,7 +134,7 @@ jobs:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 #v4.2.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: actions-rust-lang/setup-rust-toolchain@fb51252c7ba57d633bc668f941da052e410add48 #v1.0.6
|
||||
- uses: actions-rs/toolchain@b2417cde72dcf67f306c0ae8e0828a81bf0b189f #v1.0.6
|
||||
with:
|
||||
toolchain: nightly-2023-06-27
|
||||
override: true
|
||||
@@ -151,7 +151,7 @@ jobs:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 #v4.2.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: actions-rust-lang/setup-rust-toolchain@fb51252c7ba57d633bc668f941da052e410add48 #v1.0.6
|
||||
- uses: actions-rs/toolchain@b2417cde72dcf67f306c0ae8e0828a81bf0b189f #v1.0.6
|
||||
with:
|
||||
toolchain: nightly-2023-06-27
|
||||
override: true
|
||||
@@ -168,7 +168,7 @@ jobs:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 #v4.2.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: actions-rust-lang/setup-rust-toolchain@fb51252c7ba57d633bc668f941da052e410add48 #v1.0.6
|
||||
- uses: actions-rs/toolchain@b2417cde72dcf67f306c0ae8e0828a81bf0b189f #v1.0.6
|
||||
with:
|
||||
toolchain: nightly-2023-06-27
|
||||
override: true
|
||||
@@ -185,7 +185,7 @@ jobs:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 #v4.2.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: actions-rust-lang/setup-rust-toolchain@fb51252c7ba57d633bc668f941da052e410add48 #v1.0.6
|
||||
- uses: actions-rs/toolchain@b2417cde72dcf67f306c0ae8e0828a81bf0b189f #v1.0.6
|
||||
with:
|
||||
toolchain: nightly-2023-06-27
|
||||
override: true
|
||||
|
||||
250
.github/workflows/engine.yml
vendored
Normal file
250
.github/workflows/engine.yml
vendored
Normal file
@@ -0,0 +1,250 @@
|
||||
name: Build and Publish EZKL Engine npm package
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
tag:
|
||||
description: "The tag to release"
|
||||
required: true
|
||||
push:
|
||||
tags:
|
||||
- "*"
|
||||
|
||||
defaults:
|
||||
run:
|
||||
working-directory: .
|
||||
jobs:
|
||||
publish-wasm-bindings:
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
name: publish-wasm-bindings
|
||||
env:
|
||||
RELEASE_TAG: ${{ github.ref_name }}
|
||||
runs-on: ubuntu-latest
|
||||
if: startsWith(github.ref, 'refs/tags/')
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 #v4.2.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: actions-rs/toolchain@b2417cde72dcf67f306c0ae8e0828a81bf0b189f #v1.0.6
|
||||
with:
|
||||
toolchain: nightly-2025-02-17
|
||||
override: true
|
||||
components: rustfmt, clippy
|
||||
- uses: jetli/wasm-pack-action@0d096b08b4e5a7de8c28de67e11e945404e9eefa #v0.4.0
|
||||
with:
|
||||
# Pin to version 0.12.1
|
||||
version: 'v0.12.1'
|
||||
- name: Add wasm32-unknown-unknown target
|
||||
run: rustup target add wasm32-unknown-unknown
|
||||
|
||||
- name: Add rust-src
|
||||
run: rustup component add rust-src --toolchain nightly-2025-02-17-x86_64-unknown-linux-gnu
|
||||
- name: Install binaryen
|
||||
run: |
|
||||
set -e
|
||||
curl -L https://github.com/WebAssembly/binaryen/releases/download/version_116/binaryen-version_116-x86_64-linux.tar.gz | tar xzf -
|
||||
export PATH=$PATH:$PWD/binaryen-version_116/bin
|
||||
wasm-opt --version
|
||||
- name: Build wasm files for both web and nodejs compilation targets
|
||||
run: |
|
||||
wasm-pack build --release --target nodejs --out-dir ./pkg/nodejs . -- -Z build-std="panic_abort,std"
|
||||
wasm-pack build --release --target web --out-dir ./pkg/web . -- -Z build-std="panic_abort,std" --features web
|
||||
- name: Create package.json in pkg folder
|
||||
shell: bash
|
||||
run: |
|
||||
cat > pkg/package.json << EOF
|
||||
{
|
||||
"name": "@ezkljs/engine",
|
||||
"version": "$RELEASE_TAG",
|
||||
"dependencies": {
|
||||
"@types/json-bigint": "^1.0.1",
|
||||
"json-bigint": "^1.0.0"
|
||||
},
|
||||
"files": [
|
||||
"nodejs/ezkl_bg.wasm",
|
||||
"nodejs/ezkl.js",
|
||||
"nodejs/ezkl.d.ts",
|
||||
"nodejs/package.json",
|
||||
"nodejs/utils.js",
|
||||
"web/ezkl_bg.wasm",
|
||||
"web/ezkl.js",
|
||||
"web/ezkl.d.ts",
|
||||
"web/snippets/**/*",
|
||||
"web/package.json",
|
||||
"web/utils.js",
|
||||
"ezkl.d.ts"
|
||||
],
|
||||
"main": "nodejs/ezkl.js",
|
||||
"module": "web/ezkl.js",
|
||||
"types": "nodejs/ezkl.d.ts",
|
||||
"sideEffects": [
|
||||
"web/snippets/*"
|
||||
]
|
||||
}
|
||||
EOF
|
||||
|
||||
- name: Replace memory definition in nodejs
|
||||
run: |
|
||||
sed -i "3s|.*|imports['env'] = {memory: new WebAssembly.Memory({initial:20,maximum:65536,shared:true})}|" pkg/nodejs/ezkl.js
|
||||
|
||||
- name: Replace `import.meta.url` with `import.meta.resolve` definition in workerHelpers.js
|
||||
run: |
|
||||
find ./pkg/web/snippets -type f -name "*.js" -exec sed -i "s|import.meta.url|import.meta.resolve|" {} +
|
||||
|
||||
- name: Add serialize and deserialize methods to nodejs bundle
|
||||
run: |
|
||||
echo '
|
||||
const JSONBig = require("json-bigint");
|
||||
|
||||
function deserialize(buffer) { // buffer is a Uint8ClampedArray | Uint8Array // return a JSON object
|
||||
if (buffer instanceof Uint8ClampedArray) {
|
||||
buffer = new Uint8Array(buffer.buffer);
|
||||
}
|
||||
const string = new TextDecoder().decode(buffer);
|
||||
const jsonObject = JSONBig.parse(string);
|
||||
return jsonObject;
|
||||
}
|
||||
|
||||
function serialize(data) { // data is an object // return a Uint8ClampedArray
|
||||
// Step 1: Stringify the Object with BigInt support
|
||||
if (typeof data === "object") {
|
||||
data = JSONBig.stringify(data);
|
||||
}
|
||||
// Step 2: Encode the JSON String
|
||||
const uint8Array = new TextEncoder().encode(data);
|
||||
|
||||
// Step 3: Convert to Uint8ClampedArray
|
||||
return new Uint8ClampedArray(uint8Array.buffer);
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
deserialize,
|
||||
serialize
|
||||
};
|
||||
' > pkg/nodejs/utils.js
|
||||
- name: Add serialize and deserialize methods to web bundle
|
||||
run: |
|
||||
echo '
|
||||
import { parse, stringify } from "json-bigint";
|
||||
|
||||
export function deserialize(buffer) { // buffer is a Uint8ClampedArray | Uint8Array // return a JSON object
|
||||
if (buffer instanceof Uint8ClampedArray) {
|
||||
buffer = new Uint8Array(buffer.buffer);
|
||||
}
|
||||
const string = new TextDecoder().decode(buffer);
|
||||
const jsonObject = parse(string);
|
||||
return jsonObject;
|
||||
}
|
||||
|
||||
export function serialize(data) { // data is an object // return a Uint8ClampedArray
|
||||
// Step 1: Stringify the Object with BigInt support
|
||||
if (typeof data === "object") {
|
||||
data = stringify(data);
|
||||
}
|
||||
// Step 2: Encode the JSON String
|
||||
const uint8Array = new TextEncoder().encode(data);
|
||||
|
||||
// Step 3: Convert to Uint8ClampedArray
|
||||
return new Uint8ClampedArray(uint8Array.buffer);
|
||||
}
|
||||
' > pkg/web/utils.js
|
||||
- name: Expose serialize and deserialize imports in nodejs target
|
||||
run: |
|
||||
sed -i '53i// import serialize and deserialize from utils.js\nconst { serialize, deserialize } = require(`./utils.js`);\nmodule.exports.serialize = serialize;\nmodule.exports.deserialize = deserialize;' pkg/nodejs/ezkl.js
|
||||
- name: Expose serialize and deserialize imports in web target
|
||||
run: |
|
||||
sed -i '51i\
|
||||
// import serialize and deserialize from utils.js\
|
||||
import { serialize, deserialize } from '\''./utils.js'\'';\
|
||||
export { serialize, deserialize };' pkg/web/ezkl.js
|
||||
- name: Add serialize and deserialize imports to nodejs ezkl.d.ts
|
||||
run: |
|
||||
sed -i '1i\
|
||||
export declare function serialize(data: object | string): Uint8ClampedArray;\
|
||||
export declare function deserialize(buffer: Uint8ClampedArray | Uint8Array): any;' pkg/nodejs/ezkl.d.ts
|
||||
|
||||
- name: Add serialize and deserialize imports to web ezkl.d.ts
|
||||
run: |
|
||||
sed -i '1i\
|
||||
export declare function serialize(data: object | string): Uint8ClampedArray;\
|
||||
export declare function deserialize(buffer: Uint8ClampedArray | Uint8Array): any;' pkg/web/ezkl.d.ts
|
||||
|
||||
- name: Create README.md in pkg folder
|
||||
run: |
|
||||
curl -s "https://raw.githubusercontent.com/zkonduit/ezkljs-engine/main/README.md" > ./pkg/README.md
|
||||
|
||||
- name: Set up Node.js
|
||||
uses: actions/setup-node@1a4442cacd436585916779262731d5b162bc6ec7 #v3.8.2
|
||||
with:
|
||||
node-version: "18.12.1"
|
||||
registry-url: "https://registry.npmjs.org"
|
||||
- name: Publish to npm
|
||||
run: |
|
||||
cd pkg
|
||||
npm install
|
||||
npm ci
|
||||
npm publish
|
||||
env:
|
||||
NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }}
|
||||
|
||||
|
||||
in-browser-evm-ver-publish:
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
name: publish-in-browser-evm-verifier-package
|
||||
needs: [publish-wasm-bindings]
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
RELEASE_TAG: ${{ github.ref_name }}
|
||||
if: startsWith(github.ref, 'refs/tags/')
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 #v4.2.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
- name: Update version in package.json
|
||||
shell: bash
|
||||
run: |
|
||||
sed -i "s|\"version\": \".*\"|\"version\": \"$RELEASE_TAG\"|" in-browser-evm-verifier/package.json
|
||||
- name: Prepare tag and fetch package integrity
|
||||
run: |
|
||||
CLEANED_TAG=${RELEASE_TAG} # Get the tag from ref_name
|
||||
CLEANED_TAG="${CLEANED_TAG#v}" # Remove leading 'v'
|
||||
echo "CLEANED_TAG=${CLEANED_TAG}" >> $GITHUB_ENV # Set it as an environment variable for later steps
|
||||
ENGINE_INTEGRITY=$(npm view @ezkljs/engine@$CLEANED_TAG dist.integrity)
|
||||
echo "ENGINE_INTEGRITY=$ENGINE_INTEGRITY" >> $GITHUB_ENV
|
||||
- name: Update @ezkljs/engine version in package.json
|
||||
shell: bash
|
||||
env:
|
||||
RELEASE_TAG: ${{ github.ref_name }}
|
||||
run: |
|
||||
sed -i "s|\"@ezkljs/engine\": \".*\"|\"@ezkljs/engine\": \"$CLEANED_TAG\"|" in-browser-evm-verifier/package.json
|
||||
- name: Update the engine import in in-browser-evm-verifier to use @ezkljs/engine package instead of the local one;
|
||||
run: |
|
||||
sed -i "s|import { encodeVerifierCalldata } from '../nodejs/ezkl';|import { encodeVerifierCalldata } from '@ezkljs/engine';|" in-browser-evm-verifier/src/index.ts
|
||||
- name: Update pnpm-lock.yaml versions and integrity
|
||||
run: |
|
||||
awk -v integrity="$ENGINE_INTEGRITY" -v tag="$CLEANED_TAG" '
|
||||
NR==30{$0=" specifier: \"" tag "\""}
|
||||
NR==31{$0=" version: \"" tag "\""}
|
||||
NR==400{$0=" /@ezkljs/engine@" tag ":"}
|
||||
NR==401{$0=" resolution: {integrity: \"" integrity "\"}"} 1' in-browser-evm-verifier/pnpm-lock.yaml > temp.yaml && mv temp.yaml in-browser-evm-verifier/pnpm-lock.yaml
|
||||
- name: Use pnpm 8
|
||||
uses: pnpm/action-setup@eae0cfeb286e66ffb5155f1a79b90583a127a68b #v2.4.1
|
||||
with:
|
||||
version: 8
|
||||
- name: Set up Node.js
|
||||
uses: actions/setup-node@1a4442cacd436585916779262731d5b162bc6ec7 #v3.8.2
|
||||
with:
|
||||
node-version: "18.12.1"
|
||||
registry-url: "https://registry.npmjs.org"
|
||||
- name: Publish to npm
|
||||
run: |
|
||||
cd in-browser-evm-verifier
|
||||
pnpm install --frozen-lockfile
|
||||
pnpm run build
|
||||
pnpm publish --no-git-checks
|
||||
env:
|
||||
NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }}
|
||||
4
.github/workflows/large-tests.yml
vendored
4
.github/workflows/large-tests.yml
vendored
@@ -13,9 +13,9 @@ jobs:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 #v4.2.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: actions-rust-lang/setup-rust-toolchain@fb51252c7ba57d633bc668f941da052e410add48 #v1.0.6
|
||||
- uses: actions-rs/toolchain@b2417cde72dcf67f306c0ae8e0828a81bf0b189f #v1.0.6
|
||||
with:
|
||||
toolchain: nightly-2025-05-01
|
||||
toolchain: nightly-2025-02-17
|
||||
override: true
|
||||
components: rustfmt, clippy
|
||||
- name: nanoGPT Mock
|
||||
|
||||
17
.github/workflows/pypi-gpu.yml
vendored
17
.github/workflows/pypi-gpu.yml
vendored
@@ -27,8 +27,6 @@ jobs:
|
||||
target: [x86_64]
|
||||
env:
|
||||
RELEASE_TAG: ${{ github.ref_name }}
|
||||
RUSTFLAGS: "-C linker=gcc"
|
||||
OPENSSL_NO_VENDOR: 1
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 #v4.2.2
|
||||
with:
|
||||
@@ -38,16 +36,6 @@ jobs:
|
||||
python-version: 3.12
|
||||
architecture: x64
|
||||
|
||||
|
||||
|
||||
- name: Install build dependencies
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y build-essential g++ gcc cmake libclang-dev llvm-dev libstdc++-12-dev libc6 libc6-dev libssl-dev pkg-config
|
||||
|
||||
- name: Force rebuild icicle dependencies
|
||||
run: cargo clean -p icicle-runtime -p icicle-core -p icicle-hash -p icicle-bn254
|
||||
|
||||
- name: Set pyproject.toml version to match github tag and rename ezkl to ezkl-gpu
|
||||
shell: bash
|
||||
run: |
|
||||
@@ -55,12 +43,11 @@ jobs:
|
||||
sed "s/ezkl/ezkl-gpu/" pyproject.toml.orig > pyproject.toml.tmp
|
||||
sed "s/0\\.0\\.0/${RELEASE_TAG//v}/" pyproject.toml.tmp > pyproject.toml
|
||||
|
||||
- uses: actions-rust-lang/setup-rust-toolchain@fb51252c7ba57d633bc668f941da052e410add48 #v1.0.6
|
||||
- uses: actions-rs/toolchain@b2417cde72dcf67f306c0ae8e0828a81bf0b189f #v1.0.6
|
||||
with:
|
||||
toolchain: nightly-2023-06-27
|
||||
override: true
|
||||
components: rustfmt, clippy
|
||||
cache: false
|
||||
|
||||
- name: Set Cargo.toml version to match github tag and rename ezkl to ezkl-gpu
|
||||
shell: bash
|
||||
@@ -83,7 +70,7 @@ jobs:
|
||||
target: ${{ matrix.target }}
|
||||
manylinux: auto
|
||||
container: off
|
||||
args: --release --out dist --features python-bindings,gpu-accelerated
|
||||
args: --release --out dist --features python-bindings,icicle
|
||||
|
||||
- name: Install built wheel
|
||||
if: matrix.target == 'x86_64'
|
||||
|
||||
10
.github/workflows/pypi.yml
vendored
10
.github/workflows/pypi.yml
vendored
@@ -48,12 +48,11 @@ jobs:
|
||||
mv Cargo.lock Cargo.lock.orig
|
||||
sed "s/0\\.0\\.0/${RELEASE_TAG//v}/" Cargo.lock.orig >Cargo.lock
|
||||
|
||||
- uses: actions-rust-lang/setup-rust-toolchain@fb51252c7ba57d633bc668f941da052e410add48 #v1.0.6
|
||||
- uses: actions-rs/toolchain@b2417cde72dcf67f306c0ae8e0828a81bf0b189f #v1.0.6
|
||||
with:
|
||||
toolchain: nightly-2025-05-01
|
||||
toolchain: nightly-2025-02-17
|
||||
override: true
|
||||
components: rustfmt, clippy
|
||||
cache: false
|
||||
|
||||
- name: Build wheels
|
||||
if: matrix.target == 'universal2-apple-darwin'
|
||||
@@ -114,12 +113,11 @@ jobs:
|
||||
mv Cargo.lock Cargo.lock.orig
|
||||
sed "s/0\\.0\\.0/${RELEASE_TAG//v}/" Cargo.lock.orig >Cargo.lock
|
||||
|
||||
- uses: actions-rust-lang/setup-rust-toolchain@fb51252c7ba57d633bc668f941da052e410add48 #v1.0.6
|
||||
- uses: actions-rs/toolchain@b2417cde72dcf67f306c0ae8e0828a81bf0b189f #v1.0.6
|
||||
with:
|
||||
toolchain: nightly-2025-05-01
|
||||
toolchain: nightly-2025-02-17
|
||||
override: true
|
||||
components: rustfmt, clippy
|
||||
cache: false
|
||||
|
||||
- name: Build wheels
|
||||
uses: PyO3/maturin-action@5f8a1b3b0aad13193f46c9131f9b9e663def8ce5 #v1.46.0
|
||||
|
||||
37
.github/workflows/release.yml
vendored
37
.github/workflows/release.yml
vendored
@@ -26,6 +26,7 @@ jobs:
|
||||
shell: bash
|
||||
run: |
|
||||
echo "EZKL_VERSION=${GITHUB_REF#refs/*/}" >> $GITHUB_ENV
|
||||
echo "version is: ${{ env.EZKL_VERSION }}"
|
||||
|
||||
- name: Create Github Release
|
||||
id: create-release
|
||||
@@ -47,32 +48,23 @@ jobs:
|
||||
TARGET_DIR: ./target
|
||||
RUST_BACKTRACE: 1
|
||||
PCRE2_SYS_STATIC: 1
|
||||
RUSTFLAGS: "-C linker=gcc"
|
||||
OPENSSL_NO_VENDOR: 1
|
||||
steps:
|
||||
- uses: actions-rust-lang/setup-rust-toolchain@fb51252c7ba57d633bc668f941da052e410add48 #v1.0.6
|
||||
- uses: actions-rs/toolchain@b2417cde72dcf67f306c0ae8e0828a81bf0b189f #v1.0.6
|
||||
with:
|
||||
toolchain: nightly-2025-05-01
|
||||
toolchain: nightly-2025-02-17
|
||||
override: true
|
||||
components: rustfmt, clippy
|
||||
cache: false
|
||||
- name: Checkout repo
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 #v4.2.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
- name: Install build dependencies
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y build-essential g++ gcc cmake libclang-dev llvm-dev libstdc++-12-dev libc6 libc6-dev libssl-dev pkg-config
|
||||
|
||||
- name: Force rebuild icicle dependencies
|
||||
run: cargo clean -p icicle-runtime -p icicle-core -p icicle-hash -p icicle-bn254
|
||||
|
||||
- name: Get release version from tag
|
||||
shell: bash
|
||||
run: |
|
||||
echo "EZKL_VERSION=${GITHUB_REF#refs/*/}" >> $GITHUB_ENV
|
||||
echo "version is: ${{ env.EZKL_VERSION }}"
|
||||
|
||||
- name: Set Cargo.toml version to match github tag
|
||||
shell: bash
|
||||
@@ -88,7 +80,7 @@ jobs:
|
||||
sudo apt-get update
|
||||
|
||||
- name: Build release binary
|
||||
run: cargo build --release -Z sparse-registry --features gpu-accelerated
|
||||
run: cargo build --release -Z sparse-registry --features icicle
|
||||
|
||||
- name: Build archive
|
||||
shell: bash
|
||||
@@ -127,27 +119,27 @@ jobs:
|
||||
include:
|
||||
- build: windows-msvc
|
||||
os: windows-latest
|
||||
rust: nightly-2025-05-01
|
||||
rust: nightly-2025-02-17
|
||||
target: x86_64-pc-windows-msvc
|
||||
- build: macos
|
||||
os: macos-13
|
||||
rust: nightly-2025-05-01
|
||||
rust: nightly-2025-02-17
|
||||
target: x86_64-apple-darwin
|
||||
- build: macos-aarch64
|
||||
os: macos-13
|
||||
rust: nightly-2025-05-01
|
||||
rust: nightly-2025-02-17
|
||||
target: aarch64-apple-darwin
|
||||
- build: linux-musl
|
||||
os: ubuntu-22.04
|
||||
rust: nightly-2025-05-01
|
||||
rust: nightly-2025-02-17
|
||||
target: x86_64-unknown-linux-musl
|
||||
- build: linux-gnu
|
||||
os: ubuntu-22.04
|
||||
rust: nightly-2025-05-01
|
||||
rust: nightly-2025-02-17
|
||||
target: x86_64-unknown-linux-gnu
|
||||
- build: linux-aarch64
|
||||
os: ubuntu-22.04
|
||||
rust: nightly-2025-05-01
|
||||
rust: nightly-2025-02-17
|
||||
target: aarch64-unknown-linux-gnu
|
||||
|
||||
steps:
|
||||
@@ -160,6 +152,7 @@ jobs:
|
||||
shell: bash
|
||||
run: |
|
||||
echo "EZKL_VERSION=${GITHUB_REF#refs/*/}" >> $GITHUB_ENV
|
||||
echo "version is: ${{ env.EZKL_VERSION }}"
|
||||
|
||||
- name: Set Cargo.toml version to match github tag
|
||||
shell: bash
|
||||
@@ -205,15 +198,15 @@ jobs:
|
||||
|
||||
- name: Build release binary (no asm or metal)
|
||||
if: matrix.build != 'linux-gnu' && matrix.build != 'macos-aarch64'
|
||||
run: ${{ env.CARGO }} build --release ${{ env.TARGET_FLAGS }} -Z sparse-registry --features mimalloc
|
||||
run: ${{ env.CARGO }} build --release ${{ env.TARGET_FLAGS }} -Z sparse-registry
|
||||
|
||||
- name: Build release binary (asm)
|
||||
if: matrix.build == 'linux-gnu'
|
||||
run: ${{ env.CARGO }} build --release ${{ env.TARGET_FLAGS }} -Z sparse-registry --features mimalloc
|
||||
run: ${{ env.CARGO }} build --release ${{ env.TARGET_FLAGS }} -Z sparse-registry --features asm
|
||||
|
||||
- name: Build release binary (metal)
|
||||
if: matrix.build == 'macos-aarch64'
|
||||
run: ${{ env.CARGO }} build --release ${{ env.TARGET_FLAGS }} -Z sparse-registry --features macos-metal,mimalloc
|
||||
run: ${{ env.CARGO }} build --release ${{ env.TARGET_FLAGS }} -Z sparse-registry --features macos-metal
|
||||
|
||||
- name: Strip release binary
|
||||
if: matrix.build != 'windows-msvc' && matrix.build != 'linux-aarch64'
|
||||
|
||||
1109
.github/workflows/rust.yml
vendored
1109
.github/workflows/rust.yml
vendored
File diff suppressed because it is too large
Load Diff
4
.github/workflows/static-analysis.yml
vendored
4
.github/workflows/static-analysis.yml
vendored
@@ -15,9 +15,9 @@ jobs:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 #v4.2.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: actions-rust-lang/setup-rust-toolchain@fb51252c7ba57d633bc668f941da052e410add48 #v1.0.6
|
||||
- uses: actions-rs/toolchain@b2417cde72dcf67f306c0ae8e0828a81bf0b189f #v1.0.6
|
||||
with:
|
||||
toolchain: nightly-2025-05-01
|
||||
toolchain: nightly-2025-02-17
|
||||
override: true
|
||||
components: rustfmt, clippy
|
||||
|
||||
|
||||
134
.github/workflows/swift-pm.yml
vendored
Normal file
134
.github/workflows/swift-pm.yml
vendored
Normal file
@@ -0,0 +1,134 @@
|
||||
name: Build and Publish EZKL iOS SPM package
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
# Only support SemVer versioning tags
|
||||
- 'v[0-9]+.[0-9]+.[0-9]+'
|
||||
- '[0-9]+.[0-9]+.[0-9]+'
|
||||
|
||||
jobs:
|
||||
build-and-update:
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
runs-on: macos-latest
|
||||
env:
|
||||
EZKL_SWIFT_PACKAGE_REPO: github.com/zkonduit/ezkl-swift-package.git
|
||||
RELEASE_TAG: ${{ github.ref_name }}
|
||||
|
||||
steps:
|
||||
- name: Checkout EZKL
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 #v4.2.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
- name: Extract TAG from github.ref_name
|
||||
run: |
|
||||
# github.ref_name is provided by GitHub Actions and contains the tag name directly.
|
||||
TAG="${RELEASE_TAG}"
|
||||
echo "Original TAG: $TAG"
|
||||
# Remove leading 'v' if present to match the Swift Package Manager version format.
|
||||
NEW_TAG=${TAG#v}
|
||||
echo "Stripped TAG: $NEW_TAG"
|
||||
echo "TAG=$NEW_TAG" >> $GITHUB_ENV
|
||||
|
||||
- name: Install Rust (nightly)
|
||||
uses: actions-rs/toolchain@b2417cde72dcf67f306c0ae8e0828a81bf0b189f #v1.0.6
|
||||
with:
|
||||
toolchain: nightly
|
||||
override: true
|
||||
|
||||
- name: Build EzklCoreBindings
|
||||
run: CONFIGURATION=release cargo run --bin ios_gen_bindings --features "ios-bindings uuid camino uniffi_bindgen" --no-default-features
|
||||
|
||||
- name: Clone ezkl-swift-package repository
|
||||
run: |
|
||||
git clone https://${{ env.EZKL_SWIFT_PACKAGE_REPO }}
|
||||
|
||||
- name: Copy EzklCoreBindings
|
||||
run: |
|
||||
rm -rf ezkl-swift-package/Sources/EzklCoreBindings
|
||||
cp -r build/EzklCoreBindings ezkl-swift-package/Sources/
|
||||
|
||||
- name: Copy Test Files
|
||||
run: |
|
||||
rm -rf ezkl-swift-package/Tests/EzklAssets/
|
||||
mkdir -p ezkl-swift-package/Tests/EzklAssets/
|
||||
cp tests/assets/kzg ezkl-swift-package/Tests/EzklAssets/kzg.srs
|
||||
cp tests/assets/input.json ezkl-swift-package/Tests/EzklAssets/input.json
|
||||
cp tests/assets/model.compiled ezkl-swift-package/Tests/EzklAssets/network.ezkl
|
||||
cp tests/assets/settings.json ezkl-swift-package/Tests/EzklAssets/settings.json
|
||||
|
||||
- name: Check for changes
|
||||
id: check_changes
|
||||
run: |
|
||||
cd ezkl-swift-package
|
||||
if git diff --quiet Sources/EzklCoreBindings Tests/EzklAssets; then
|
||||
echo "no_changes=true" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "no_changes=false" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
|
||||
- name: Set up Xcode environment
|
||||
if: steps.check_changes.outputs.no_changes == 'false'
|
||||
run: |
|
||||
sudo xcode-select -s /Applications/Xcode.app/Contents/Developer
|
||||
sudo xcodebuild -license accept
|
||||
|
||||
- name: Run Package Tests
|
||||
if: steps.check_changes.outputs.no_changes == 'false'
|
||||
run: |
|
||||
cd ezkl-swift-package
|
||||
xcodebuild test \
|
||||
-scheme EzklPackage \
|
||||
-destination 'platform=iOS Simulator,name=iPhone 15 Pro,OS=17.5' \
|
||||
-resultBundlePath ../testResults
|
||||
|
||||
- name: Run Example App Tests
|
||||
if: steps.check_changes.outputs.no_changes == 'false'
|
||||
run: |
|
||||
cd ezkl-swift-package/Example
|
||||
xcodebuild test \
|
||||
-project Example.xcodeproj \
|
||||
-scheme EzklApp \
|
||||
-destination 'platform=iOS Simulator,name=iPhone 15 Pro,OS=17.5' \
|
||||
-parallel-testing-enabled NO \
|
||||
-resultBundlePath ../../exampleTestResults \
|
||||
-skip-testing:EzklAppUITests/EzklAppUITests/testButtonClicksInOrder
|
||||
|
||||
- name: Setup Git
|
||||
run: |
|
||||
cd ezkl-swift-package
|
||||
git config user.name "GitHub Action"
|
||||
git config user.email "action@github.com"
|
||||
git remote set-url origin https://zkonduit:${EZKL_SWIFT_PACKAGE_REPO_TOKEN}@${{ env.EZKL_SWIFT_PACKAGE_REPO }}
|
||||
env:
|
||||
EZKL_SWIFT_PACKAGE_REPO_TOKEN: ${{ secrets.EZKL_PORTER_TOKEN }}
|
||||
|
||||
- name: Commit and Push Changes
|
||||
if: steps.check_changes.outputs.no_changes == 'false'
|
||||
run: |
|
||||
cd ezkl-swift-package
|
||||
git add Sources/EzklCoreBindings Tests/EzklAssets
|
||||
git commit -m "Automatically updated EzklCoreBindings for EZKL"
|
||||
if ! git push origin; then
|
||||
echo "::error::Failed to push changes to ${{ env.EZKL_SWIFT_PACKAGE_REPO }}. Please ensure that EZKL_PORTER_TOKEN has the correct permissions."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
- name: Tag the latest commit
|
||||
run: |
|
||||
cd ezkl-swift-package
|
||||
source $GITHUB_ENV
|
||||
# Tag the latest commit on the current branch
|
||||
if git rev-parse "$TAG" >/dev/null 2>&1; then
|
||||
echo "Tag $TAG already exists locally. Skipping tag creation."
|
||||
else
|
||||
git tag "$TAG"
|
||||
fi
|
||||
|
||||
if ! git push origin "$TAG"; then
|
||||
echo "::error::Failed to push tag '$TAG' to ${{ env.EZKL_SWIFT_PACKAGE_REPO }}. Please ensure EZKL_PORTER_TOKEN has correct permissions."
|
||||
exit 1
|
||||
fi
|
||||
2
.gitignore
vendored
2
.gitignore
vendored
@@ -52,5 +52,3 @@ docs/python/build
|
||||
!tests/assets/vk_aggr.key
|
||||
cache
|
||||
out
|
||||
!tests/assets/wasm.code
|
||||
!tests/assets/wasm.sol
|
||||
482
Cargo.lock
generated
482
Cargo.lock
generated
@@ -126,27 +126,6 @@ dependencies = [
|
||||
"winnow 0.6.26",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "alloy-eip2930"
|
||||
version = "0.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0069cf0642457f87a01a014f6dc29d5d893cd4fd8fddf0c3cdfad1bb3ebafc41"
|
||||
dependencies = [
|
||||
"alloy-primitives 0.8.25",
|
||||
"alloy-rlp",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "alloy-eip7702"
|
||||
version = "0.1.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ea59dc42102bc9a1905dc57901edc6dd48b9f38115df86c7d252acba70d71d04"
|
||||
dependencies = [
|
||||
"alloy-primitives 0.8.25",
|
||||
"alloy-rlp",
|
||||
"k256",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "alloy-eips"
|
||||
version = "0.1.0"
|
||||
@@ -238,7 +217,7 @@ dependencies = [
|
||||
"bytes",
|
||||
"cfg-if",
|
||||
"const-hex",
|
||||
"derive_more 0.99.20",
|
||||
"derive_more",
|
||||
"hex-literal",
|
||||
"itoa",
|
||||
"ruint",
|
||||
@@ -255,7 +234,7 @@ dependencies = [
|
||||
"bytes",
|
||||
"cfg-if",
|
||||
"const-hex",
|
||||
"derive_more 0.99.20",
|
||||
"derive_more",
|
||||
"getrandom 0.2.16",
|
||||
"hex-literal",
|
||||
"itoa",
|
||||
@@ -268,25 +247,6 @@ dependencies = [
|
||||
"tiny-keccak",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "alloy-primitives"
|
||||
version = "0.8.25"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8c77490fe91a0ce933a1f219029521f20fc28c2c0ca95d53fa4da9c00b8d9d4e"
|
||||
dependencies = [
|
||||
"alloy-rlp",
|
||||
"bytes",
|
||||
"cfg-if",
|
||||
"const-hex",
|
||||
"derive_more 2.0.1",
|
||||
"hashbrown 0.15.2",
|
||||
"itoa",
|
||||
"k256",
|
||||
"paste",
|
||||
"ruint",
|
||||
"tiny-keccak",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "alloy-provider"
|
||||
version = "0.1.0"
|
||||
@@ -882,13 +842,14 @@ dependencies = [
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "aurora-engine-modexp"
|
||||
version = "1.2.0"
|
||||
name = "atty"
|
||||
version = "0.2.14"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "518bc5745a6264b5fd7b09dffb9667e400ee9e2bbe18555fac75e1fe9afa0df9"
|
||||
checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8"
|
||||
dependencies = [
|
||||
"hex",
|
||||
"num",
|
||||
"hermit-abi 0.1.19",
|
||||
"libc",
|
||||
"winapi",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -959,6 +920,29 @@ dependencies = [
|
||||
"serde",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "bindgen"
|
||||
version = "0.69.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "271383c67ccabffb7381723dea0672a673f292304fcb45c01cc648c7a8d58088"
|
||||
dependencies = [
|
||||
"bitflags 2.9.0",
|
||||
"cexpr",
|
||||
"clang-sys",
|
||||
"itertools 0.11.0",
|
||||
"lazy_static",
|
||||
"lazycell",
|
||||
"log",
|
||||
"prettyplease",
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"regex",
|
||||
"rustc-hash 1.1.0",
|
||||
"shlex",
|
||||
"syn 2.0.101",
|
||||
"which",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "bit-set"
|
||||
version = "0.5.3"
|
||||
@@ -1177,6 +1161,15 @@ dependencies = [
|
||||
"shlex",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "cexpr"
|
||||
version = "0.6.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6fac387a98bb7c37292057cffc56d62ecb629900026402633ae9160df93a8766"
|
||||
dependencies = [
|
||||
"nom",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "cfg-if"
|
||||
version = "1.0.0"
|
||||
@@ -1227,7 +1220,29 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "57663b653d948a338bfb3eeba9bb2fd5fcfaecb9e199e87e1eda4d9e8b240fd9"
|
||||
dependencies = [
|
||||
"ciborium-io",
|
||||
"half",
|
||||
"half 2.6.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "clang-sys"
|
||||
version = "1.8.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0b023947811758c97c59bf9d1c188fd619ad4718dcaa767947df1cadb14f39f4"
|
||||
dependencies = [
|
||||
"glob",
|
||||
"libc",
|
||||
"libloading",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "clap"
|
||||
version = "2.34.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a0610544180c38b88101fecf2dd634b174a62eef6946f84dfc6a7127512b381c"
|
||||
dependencies = [
|
||||
"bitflags 1.3.2",
|
||||
"textwrap 0.11.0",
|
||||
"unicode-width 0.1.14",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -1258,7 +1273,7 @@ version = "4.5.47"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c06f5378ea264ad4f82bbc826628b5aad714a75abf6ece087e923010eb937fb6"
|
||||
dependencies = [
|
||||
"clap",
|
||||
"clap 4.5.37",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -1301,7 +1316,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "117725a109d387c937a1533ce01b450cbde6b88abceea8473c4d7a85853cda3c"
|
||||
dependencies = [
|
||||
"lazy_static",
|
||||
"windows-sys 0.59.0",
|
||||
"windows-sys 0.48.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -1445,6 +1460,32 @@ dependencies = [
|
||||
"cfg-if",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "criterion"
|
||||
version = "0.3.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b01d6de93b2b6c65e17c634a26653a29d107b3c98c607c765bf38d041531cd8f"
|
||||
dependencies = [
|
||||
"atty",
|
||||
"cast",
|
||||
"clap 2.34.0",
|
||||
"criterion-plot 0.4.5",
|
||||
"csv",
|
||||
"itertools 0.10.5",
|
||||
"lazy_static",
|
||||
"num-traits",
|
||||
"oorandom",
|
||||
"plotters",
|
||||
"rayon",
|
||||
"regex",
|
||||
"serde",
|
||||
"serde_cbor",
|
||||
"serde_derive",
|
||||
"serde_json",
|
||||
"tinytemplate",
|
||||
"walkdir",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "criterion"
|
||||
version = "0.5.1"
|
||||
@@ -1454,8 +1495,8 @@ dependencies = [
|
||||
"anes",
|
||||
"cast",
|
||||
"ciborium",
|
||||
"clap",
|
||||
"criterion-plot",
|
||||
"clap 4.5.37",
|
||||
"criterion-plot 0.5.0",
|
||||
"is-terminal",
|
||||
"itertools 0.10.5",
|
||||
"num-traits",
|
||||
@@ -1471,6 +1512,16 @@ dependencies = [
|
||||
"walkdir",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "criterion-plot"
|
||||
version = "0.4.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2673cc8207403546f45f5fd319a974b1e6983ad1a3ee7e6041650013be041876"
|
||||
dependencies = [
|
||||
"cast",
|
||||
"itertools 0.10.5",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "criterion-plot"
|
||||
version = "0.5.0"
|
||||
@@ -1543,6 +1594,27 @@ dependencies = [
|
||||
"typenum",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "csv"
|
||||
version = "1.3.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "acdc4883a9c96732e4733212c01447ebd805833b7275a73ca3ee080fd77afdaf"
|
||||
dependencies = [
|
||||
"csv-core",
|
||||
"itoa",
|
||||
"ryu",
|
||||
"serde",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "csv-core"
|
||||
version = "0.1.12"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7d02f3b0da4c6504f86e9cd789d8dbafab48c2321be74e9987593de5a894d93d"
|
||||
dependencies = [
|
||||
"memchr",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "dashmap"
|
||||
version = "5.5.3"
|
||||
@@ -1621,27 +1693,6 @@ dependencies = [
|
||||
"syn 2.0.101",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "derive_more"
|
||||
version = "2.0.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "093242cf7570c207c83073cf82f79706fe7b8317e98620a47d5be7c3d8497678"
|
||||
dependencies = [
|
||||
"derive_more-impl",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "derive_more-impl"
|
||||
version = "2.0.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "bda628edc44c4bb645fbe0f758797143e4e07926f7ebf4e9bdfbd3d2ce621df3"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn 2.0.101",
|
||||
"unicode-xid",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "digest"
|
||||
version = "0.9.0"
|
||||
@@ -1888,7 +1939,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "976dd42dc7e85965fe702eb8164f21f450704bdde31faefd6471dba214cb594e"
|
||||
dependencies = [
|
||||
"libc",
|
||||
"windows-sys 0.59.0",
|
||||
"windows-sys 0.52.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -1941,13 +1992,14 @@ version = "0.0.0"
|
||||
dependencies = [
|
||||
"alloy",
|
||||
"bincode",
|
||||
"camino",
|
||||
"chrono",
|
||||
"clap",
|
||||
"clap 4.5.37",
|
||||
"clap_complete",
|
||||
"colored",
|
||||
"colored_json",
|
||||
"console_error_panic_hook",
|
||||
"criterion",
|
||||
"criterion 0.5.1",
|
||||
"ecc",
|
||||
"env_logger 0.10.2",
|
||||
"ethabi",
|
||||
@@ -1959,11 +2011,9 @@ dependencies = [
|
||||
"halo2_solidity_verifier",
|
||||
"halo2curves 0.7.0 (git+https://github.com/privacy-scaling-explorations/halo2curves?rev=b753a832e92d5c86c5c997327a9cf9de86a18851)",
|
||||
"hex",
|
||||
"icicle-runtime",
|
||||
"indicatif",
|
||||
"instant",
|
||||
"itertools 0.10.5",
|
||||
"jemallocator",
|
||||
"lazy_static",
|
||||
"log",
|
||||
"maybe-rayon",
|
||||
@@ -1995,7 +2045,9 @@ dependencies = [
|
||||
"tosubcommand",
|
||||
"tract-onnx",
|
||||
"uniffi",
|
||||
"uniffi_bindgen",
|
||||
"unzip-n",
|
||||
"uuid",
|
||||
"wasm-bindgen",
|
||||
"wasm-bindgen-console-logger",
|
||||
"wasm-bindgen-rayon",
|
||||
@@ -2204,7 +2256,7 @@ version = "0.13.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8640e34b88f7652208ce9e88b1a37a2ae95227d84abec377ccd3c5cfeb141ed4"
|
||||
dependencies = [
|
||||
"rustix",
|
||||
"rustix 1.0.5",
|
||||
"windows-sys 0.59.0",
|
||||
]
|
||||
|
||||
@@ -2397,6 +2449,12 @@ dependencies = [
|
||||
"subtle",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "half"
|
||||
version = "1.8.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1b43ede17f21864e81be2fa654110bf1e793774238d86ef8555c37e6519c0403"
|
||||
|
||||
[[package]]
|
||||
name = "half"
|
||||
version = "2.6.0"
|
||||
@@ -2411,7 +2469,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "halo2_gadgets"
|
||||
version = "0.2.0"
|
||||
source = "git+https://github.com/zkonduit/halo2?branch=ac%2Fconditional-compilation-icicle2#1dd2090741f006fd031a07da7f3c9dfce5e0015e"
|
||||
source = "git+https://github.com/zkonduit/halo2#f441c920be45f8f05d2c06a173d82e8885a5ed4d"
|
||||
dependencies = [
|
||||
"arrayvec 0.7.6",
|
||||
"bitvec",
|
||||
@@ -2428,7 +2486,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "halo2_proofs"
|
||||
version = "0.3.0"
|
||||
source = "git+https://github.com/zkonduit/halo2#1dd2090741f006fd031a07da7f3c9dfce5e0015e?branch=ac%2Fconditional-compilation-icicle2#01c88842679b4308e43ae5ed91c4183e861669bd"
|
||||
source = "git+https://github.com/zkonduit/halo2#f441c920be45f8f05d2c06a173d82e8885a5ed4d"
|
||||
dependencies = [
|
||||
"bincode",
|
||||
"blake2b_simd",
|
||||
@@ -2438,7 +2496,7 @@ dependencies = [
|
||||
"halo2curves 0.7.0 (git+https://github.com/privacy-scaling-explorations/halo2curves?rev=b753a832e92d5c86c5c997327a9cf9de86a18851)",
|
||||
"icicle-bn254",
|
||||
"icicle-core",
|
||||
"icicle-runtime",
|
||||
"icicle-cuda-runtime",
|
||||
"instant",
|
||||
"lazy_static",
|
||||
"log",
|
||||
@@ -2446,7 +2504,7 @@ dependencies = [
|
||||
"mopro-msm",
|
||||
"rand_chacha 0.3.1",
|
||||
"rand_core 0.6.4",
|
||||
"rustc-hash",
|
||||
"rustc-hash 2.1.1",
|
||||
"serde",
|
||||
"sha3 0.9.1",
|
||||
"tracing",
|
||||
@@ -2455,7 +2513,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "halo2_solidity_verifier"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/zkonduit/ezkl-verifier?branch=main#a518a917f076adb851a1ae39e09527f8dbde5000"
|
||||
source = "git+https://github.com/zkonduit/verification-ezkl?branch=vka-hash#409f977e461b435b9afc33ed38edba09fe2eaee4"
|
||||
dependencies = [
|
||||
"askama",
|
||||
"blake2b_simd",
|
||||
@@ -2463,7 +2521,6 @@ dependencies = [
|
||||
"hex",
|
||||
"itertools 0.11.0",
|
||||
"regex",
|
||||
"revm 14.0.3",
|
||||
"ruint",
|
||||
"sha3 0.10.8",
|
||||
]
|
||||
@@ -2661,6 +2718,15 @@ version = "0.5.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea"
|
||||
|
||||
[[package]]
|
||||
name = "hermit-abi"
|
||||
version = "0.1.19"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33"
|
||||
dependencies = [
|
||||
"libc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "hermit-abi"
|
||||
version = "0.3.9"
|
||||
@@ -2852,45 +2918,33 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "icicle-bn254"
|
||||
version = "3.7.0"
|
||||
source = "git+https://github.com/ingonyama-zk/icicle?branch=emir%2Fgate_eval_2#012e00694f4cf399fe7a42d9cfbfa6cd7a60f876"
|
||||
version = "2.8.0"
|
||||
source = "git+https://github.com/ingonyama-zk/icicle?branch=ezkl-icicle2#5dfe006a0f1bc62ea82ca297709bbf3d22a2ca25"
|
||||
dependencies = [
|
||||
"cmake",
|
||||
"criterion 0.3.6",
|
||||
"icicle-core",
|
||||
"icicle-hash",
|
||||
"icicle-runtime",
|
||||
"icicle-cuda-runtime",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "icicle-core"
|
||||
version = "3.7.0"
|
||||
source = "git+https://github.com/ingonyama-zk/icicle?branch=emir%2Fgate_eval_2#012e00694f4cf399fe7a42d9cfbfa6cd7a60f876"
|
||||
version = "2.8.0"
|
||||
source = "git+https://github.com/ingonyama-zk/icicle?branch=ezkl-icicle2#5dfe006a0f1bc62ea82ca297709bbf3d22a2ca25"
|
||||
dependencies = [
|
||||
"criterion 0.3.6",
|
||||
"hex",
|
||||
"icicle-runtime",
|
||||
"once_cell",
|
||||
"rand 0.8.5",
|
||||
"icicle-cuda-runtime",
|
||||
"rayon",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "icicle-hash"
|
||||
version = "3.7.0"
|
||||
source = "git+https://github.com/ingonyama-zk/icicle?branch=emir%2Fgate_eval_2#012e00694f4cf399fe7a42d9cfbfa6cd7a60f876"
|
||||
name = "icicle-cuda-runtime"
|
||||
version = "2.8.0"
|
||||
source = "git+https://github.com/ingonyama-zk/icicle?branch=ezkl-icicle2#5dfe006a0f1bc62ea82ca297709bbf3d22a2ca25"
|
||||
dependencies = [
|
||||
"cmake",
|
||||
"icicle-core",
|
||||
"icicle-runtime",
|
||||
"rand 0.8.5",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "icicle-runtime"
|
||||
version = "3.7.0"
|
||||
source = "git+https://github.com/ingonyama-zk/icicle?branch=emir%2Fgate_eval_2#012e00694f4cf399fe7a42d9cfbfa6cd7a60f876"
|
||||
dependencies = [
|
||||
"cmake",
|
||||
"once_cell",
|
||||
"bindgen",
|
||||
"bitflags 1.3.2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -3148,7 +3202,7 @@ checksum = "e04d7f318608d35d4b61ddd75cbdaee86b023ebe2bd5a66ee0915f0bf93095a9"
|
||||
dependencies = [
|
||||
"hermit-abi 0.5.0",
|
||||
"libc",
|
||||
"windows-sys 0.59.0",
|
||||
"windows-sys 0.52.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -3208,26 +3262,6 @@ version = "1.0.15"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4a5f13b858c8d314ee3e8f639011f7ccefe71f97f96e50151fb991f267928e2c"
|
||||
|
||||
[[package]]
|
||||
name = "jemalloc-sys"
|
||||
version = "0.5.4+5.3.0-patched"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ac6c1946e1cea1788cbfde01c993b52a10e2da07f4bac608228d1bed20bfebf2"
|
||||
dependencies = [
|
||||
"cc",
|
||||
"libc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "jemallocator"
|
||||
version = "0.5.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a0de374a9f8e63150e6f5e8a60cc14c668226d7a347d8aee1a45766e3c4dd3bc"
|
||||
dependencies = [
|
||||
"jemalloc-sys",
|
||||
"libc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "jiff"
|
||||
version = "0.2.10"
|
||||
@@ -3343,12 +3377,28 @@ dependencies = [
|
||||
"spin",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "lazycell"
|
||||
version = "1.3.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55"
|
||||
|
||||
[[package]]
|
||||
name = "libc"
|
||||
version = "0.2.172"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d750af042f7ef4f724306de029d18836c26c1765a54a6a3f094cbd23a7267ffa"
|
||||
|
||||
[[package]]
|
||||
name = "libloading"
|
||||
version = "0.8.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "fc2f4eb4bc735547cfed7c0a4922cbd04a4655978c09b54f1f7b228750664c34"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"windows-targets 0.48.5",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "libm"
|
||||
version = "0.2.13"
|
||||
@@ -3376,6 +3426,12 @@ dependencies = [
|
||||
"redox_syscall",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "linux-raw-sys"
|
||||
version = "0.4.15"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d26c52dbd32dccf2d10cac7725f8eae5296885fb5703b261f7d0a0739ec807ab"
|
||||
|
||||
[[package]]
|
||||
name = "linux-raw-sys"
|
||||
version = "0.9.4"
|
||||
@@ -3814,7 +3870,7 @@ dependencies = [
|
||||
"num-traits",
|
||||
"pyo3",
|
||||
"pyo3-build-config",
|
||||
"rustc-hash",
|
||||
"rustc-hash 2.1.1",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -4245,6 +4301,16 @@ version = "0.1.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "925383efa346730478fb4838dbe9137d2a47675ad789c546d150a6e1dd4ab31c"
|
||||
|
||||
[[package]]
|
||||
name = "prettyplease"
|
||||
version = "0.2.32"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "664ec5419c51e34154eec046ebcba56312d5a2fc3b09a06da188e1ad21afadf6"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"syn 2.0.101",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "primal-check"
|
||||
version = "0.3.4"
|
||||
@@ -4497,7 +4563,7 @@ dependencies = [
|
||||
"pin-project-lite",
|
||||
"quinn-proto",
|
||||
"quinn-udp",
|
||||
"rustc-hash",
|
||||
"rustc-hash 2.1.1",
|
||||
"rustls",
|
||||
"socket2",
|
||||
"thiserror 2.0.12",
|
||||
@@ -4516,7 +4582,7 @@ dependencies = [
|
||||
"getrandom 0.3.2",
|
||||
"rand 0.9.1",
|
||||
"ring",
|
||||
"rustc-hash",
|
||||
"rustc-hash 2.1.1",
|
||||
"rustls",
|
||||
"rustls-pki-types",
|
||||
"slab",
|
||||
@@ -4537,7 +4603,7 @@ dependencies = [
|
||||
"once_cell",
|
||||
"socket2",
|
||||
"tracing",
|
||||
"windows-sys 0.59.0",
|
||||
"windows-sys 0.52.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -4832,21 +4898,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "68f4ca8ae0345104523b4af1a8a7ea97cfa1865cdb7a7c25d23c1a18d9b48598"
|
||||
dependencies = [
|
||||
"auto_impl",
|
||||
"revm-interpreter 1.3.0",
|
||||
"revm-precompile 2.2.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "revm"
|
||||
version = "14.0.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "641702b12847f9ed418d552f4fcabe536d867a2c980e96b6e7e25d7b992f929f"
|
||||
dependencies = [
|
||||
"auto_impl",
|
||||
"cfg-if",
|
||||
"dyn-clone",
|
||||
"revm-interpreter 10.0.3",
|
||||
"revm-precompile 11.0.3",
|
||||
"revm-interpreter",
|
||||
"revm-precompile",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -4855,16 +4908,7 @@ version = "1.3.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f959cafdf64a7f89b014fa73dc2325001cf654b3d9400260b212d19a2ebe3da0"
|
||||
dependencies = [
|
||||
"revm-primitives 1.3.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "revm-interpreter"
|
||||
version = "10.0.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2e5e14002afae20b5bf1566f22316122f42f57517000e559c55b25bf7a49cba2"
|
||||
dependencies = [
|
||||
"revm-primitives 10.0.0",
|
||||
"revm-primitives",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -4876,23 +4920,7 @@ dependencies = [
|
||||
"k256",
|
||||
"num",
|
||||
"once_cell",
|
||||
"revm-primitives 1.3.0",
|
||||
"ripemd",
|
||||
"sha2",
|
||||
"substrate-bn",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "revm-precompile"
|
||||
version = "11.0.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3198c06247e8d4ad0d1312591edf049b0de4ddffa9fecb625c318fd67db8639b"
|
||||
dependencies = [
|
||||
"aurora-engine-modexp",
|
||||
"cfg-if",
|
||||
"k256",
|
||||
"once_cell",
|
||||
"revm-primitives 10.0.0",
|
||||
"revm-primitives",
|
||||
"ripemd",
|
||||
"sha2",
|
||||
"substrate-bn",
|
||||
@@ -4914,24 +4942,6 @@ dependencies = [
|
||||
"hex",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "revm-primitives"
|
||||
version = "10.0.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6f1525851a03aff9a9d6a1d018b414d76252d6802ab54695b27093ecd7e7a101"
|
||||
dependencies = [
|
||||
"alloy-eip2930",
|
||||
"alloy-eip7702",
|
||||
"alloy-primitives 0.8.25",
|
||||
"auto_impl",
|
||||
"bitflags 2.9.0",
|
||||
"bitvec",
|
||||
"cfg-if",
|
||||
"dyn-clone",
|
||||
"enumn",
|
||||
"hex",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rfc6979"
|
||||
version = "0.4.0"
|
||||
@@ -5014,6 +5024,12 @@ version = "0.1.24"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "719b953e2095829ee67db738b3bfa9fa368c94900df327b3f07fe6e794d2fe1f"
|
||||
|
||||
[[package]]
|
||||
name = "rustc-hash"
|
||||
version = "1.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2"
|
||||
|
||||
[[package]]
|
||||
name = "rustc-hash"
|
||||
version = "2.1.1"
|
||||
@@ -5059,6 +5075,19 @@ dependencies = [
|
||||
"version_check",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rustix"
|
||||
version = "0.38.44"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "fdb5bc1ae2baa591800df16c9ca78619bf65c0488b41b96ccec5d11220d8c154"
|
||||
dependencies = [
|
||||
"bitflags 2.9.0",
|
||||
"errno",
|
||||
"libc",
|
||||
"linux-raw-sys 0.4.15",
|
||||
"windows-sys 0.52.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rustix"
|
||||
version = "1.0.5"
|
||||
@@ -5068,8 +5097,8 @@ dependencies = [
|
||||
"bitflags 2.9.0",
|
||||
"errno",
|
||||
"libc",
|
||||
"linux-raw-sys",
|
||||
"windows-sys 0.59.0",
|
||||
"linux-raw-sys 0.9.4",
|
||||
"windows-sys 0.52.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -5316,6 +5345,16 @@ dependencies = [
|
||||
"serde",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "serde_cbor"
|
||||
version = "0.11.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2bef2ebfde456fb76bbcf9f59315333decc4fda0b2b44b420243c11e0f5ec1f5"
|
||||
dependencies = [
|
||||
"half 1.8.3",
|
||||
"serde",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "serde_derive"
|
||||
version = "1.0.219"
|
||||
@@ -5496,7 +5535,7 @@ dependencies = [
|
||||
"num-traits",
|
||||
"poseidon",
|
||||
"rand 0.8.5",
|
||||
"revm 3.5.0",
|
||||
"revm",
|
||||
"serde",
|
||||
"sha3 0.10.8",
|
||||
]
|
||||
@@ -5762,8 +5801,8 @@ dependencies = [
|
||||
"fastrand",
|
||||
"getrandom 0.3.2",
|
||||
"once_cell",
|
||||
"rustix",
|
||||
"windows-sys 0.59.0",
|
||||
"rustix 1.0.5",
|
||||
"windows-sys 0.52.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -5808,6 +5847,15 @@ dependencies = [
|
||||
"syn 1.0.109",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "textwrap"
|
||||
version = "0.11.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d326610f408c7a4eb6f51c37c330e496b08506c9457c9d34287ecc38809fb060"
|
||||
dependencies = [
|
||||
"unicode-width 0.1.14",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "textwrap"
|
||||
version = "0.16.2"
|
||||
@@ -6201,7 +6249,7 @@ dependencies = [
|
||||
"downcast-rs",
|
||||
"dyn-clone",
|
||||
"dyn-hash",
|
||||
"half",
|
||||
"half 2.6.0",
|
||||
"itertools 0.12.1",
|
||||
"lazy_static",
|
||||
"maplit",
|
||||
@@ -6236,7 +6284,7 @@ dependencies = [
|
||||
"downcast-rs",
|
||||
"dyn-clone",
|
||||
"dyn-hash",
|
||||
"half",
|
||||
"half 2.6.0",
|
||||
"lazy_static",
|
||||
"liquid",
|
||||
"liquid-core",
|
||||
@@ -6394,6 +6442,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f31bff6daf87277a9014bcdefbc2842b0553392919d1096843c5aad899ca4588"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"uniffi_bindgen",
|
||||
"uniffi_build",
|
||||
"uniffi_core",
|
||||
"uniffi_macros",
|
||||
@@ -6416,7 +6465,7 @@ dependencies = [
|
||||
"once_cell",
|
||||
"paste",
|
||||
"serde",
|
||||
"textwrap",
|
||||
"textwrap 0.16.2",
|
||||
"toml 0.5.11",
|
||||
"uniffi_meta",
|
||||
"uniffi_testing",
|
||||
@@ -6508,7 +6557,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "cef408229a3a407fafa4c36dc4f6ece78a6fb258ab28d2b64bddd49c8cb680f6"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"textwrap",
|
||||
"textwrap 0.16.2",
|
||||
"uniffi_meta",
|
||||
"uniffi_testing",
|
||||
"weedle2",
|
||||
@@ -6576,6 +6625,15 @@ version = "0.2.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821"
|
||||
|
||||
[[package]]
|
||||
name = "uuid"
|
||||
version = "1.16.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "458f7a779bf54acc9f347480ac654f68407d3aab21269a6e3c9f922acd9e2da9"
|
||||
dependencies = [
|
||||
"getrandom 0.3.2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "valuable"
|
||||
version = "0.1.1"
|
||||
@@ -6839,6 +6897,18 @@ dependencies = [
|
||||
"nom",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "which"
|
||||
version = "4.4.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "87ba24419a2078cd2b0f2ede2691b6c66d8e47836da3b6db8265ebad47afbfc7"
|
||||
dependencies = [
|
||||
"either",
|
||||
"home",
|
||||
"once_cell",
|
||||
"rustix 0.38.44",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "winapi"
|
||||
version = "0.3.9"
|
||||
@@ -6861,7 +6931,7 @@ version = "0.1.9"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "cf221c93e13a30d793f7645a0e7762c55d169dbb0a49671918a2319d289b10bb"
|
||||
dependencies = [
|
||||
"windows-sys 0.59.0",
|
||||
"windows-sys 0.48.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -7216,7 +7286,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0d65cbf2f12c15564212d48f4e3dfb87923d25d611f2aed18f4cb23f0413d89e"
|
||||
dependencies = [
|
||||
"libc",
|
||||
"rustix",
|
||||
"rustix 1.0.5",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
|
||||
88
Cargo.toml
88
Cargo.toml
@@ -16,12 +16,12 @@ crate-type = ["cdylib", "rlib", "staticlib"]
|
||||
|
||||
|
||||
[dependencies]
|
||||
halo2_gadgets = { git = "https://github.com/zkonduit/halo2", branch= "ac/conditional-compilation-icicle2" }
|
||||
halo2_gadgets = { git = "https://github.com/zkonduit/halo2" }
|
||||
halo2curves = { git = "https://github.com/privacy-scaling-explorations/halo2curves", rev = "b753a832e92d5c86c5c997327a9cf9de86a18851", features = [
|
||||
"derive_serde",
|
||||
] }
|
||||
halo2_proofs = { git = "https://github.com/zkonduit/halo2", package = "halo2_proofs", branch= "ac/conditional-compilation-icicle2", features = [
|
||||
"circuit-params", "mv-lookup"
|
||||
halo2_proofs = { git = "https://github.com/zkonduit/halo2", package = "halo2_proofs", features = [
|
||||
"circuit-params",
|
||||
] }
|
||||
rand = { version = "0.8", default-features = false }
|
||||
itertools = { version = "0.10.3", default-features = false }
|
||||
@@ -33,11 +33,9 @@ thiserror = { version = "1.0.38", default-features = false }
|
||||
hex = { version = "0.4.3", default-features = false }
|
||||
halo2_wrong_ecc = { git = "https://github.com/zkonduit/halo2wrong", branch = "ac/chunked-mv-lookup", package = "ecc" }
|
||||
snark-verifier = { git = "https://github.com/zkonduit/snark-verifier", branch = "ac/chunked-mv-lookup", features = [
|
||||
"derive_serde", "mv-lookup"
|
||||
] }
|
||||
halo2_solidity_verifier = { git = "https://github.com/zkonduit/ezkl-verifier", branch = "main", optional = true, features = [
|
||||
"evm", "mv-lookup",
|
||||
"derive_serde",
|
||||
] }
|
||||
halo2_solidity_verifier = { git = "https://github.com/zkonduit/verification-ezkl", branch = "vka-hash", optional = true }
|
||||
maybe-rayon = { version = "0.1.1", default-features = false }
|
||||
bincode = { version = "1.3.3", default-features = false }
|
||||
unzip-n = "0.1.2"
|
||||
@@ -45,12 +43,10 @@ num = "0.4.1"
|
||||
tosubcommand = { git = "https://github.com/zkonduit/enum_to_subcommand", package = "tosubcommand", optional = true }
|
||||
semver = { version = "1.0.22", optional = true }
|
||||
|
||||
|
||||
[target.'cfg(not(target_arch = "wasm32"))'.dependencies]
|
||||
|
||||
# evm related deps
|
||||
serde_json = { version = "1.0.97", features = ["float_roundtrip", "raw_value"] }
|
||||
|
||||
# evm related deps
|
||||
alloy = { git = "https://github.com/alloy-rs/alloy", version = "0.1.0", rev = "5fbf57bac99edef9d8475190109a7ea9fb7e5e83", features = [
|
||||
"provider-http",
|
||||
"signers",
|
||||
@@ -60,7 +56,6 @@ alloy = { git = "https://github.com/alloy-rs/alloy", version = "0.1.0", rev = "5
|
||||
"node-bindings",
|
||||
|
||||
], optional = true }
|
||||
|
||||
foundry-compilers = { version = "0.4.1", features = [
|
||||
"svm-solc",
|
||||
], optional = true }
|
||||
@@ -93,12 +88,15 @@ pyo3-log = { version = "0.12.0", default-features = false, optional = true }
|
||||
tract-onnx = { git = "https://github.com/sonos/tract/", rev = "37132e0397d0a73e5bd3a8615d932dabe44f6736", default-features = false, optional = true }
|
||||
tabled = { version = "0.12.0", optional = true }
|
||||
objc = { version = "0.2.4", optional = true }
|
||||
pyo3-stub-gen = { version = "0.6.0", optional = true }
|
||||
jemallocator = { version = "0.5", optional = true }
|
||||
mimalloc = { version = "0.1", optional = true }
|
||||
# GPU / device related things (optional - only enabled with gpu-accelerated feature)
|
||||
icicle-runtime = { git = "https://github.com/ingonyama-zk/icicle", branch="emir/gate_eval_2", package="icicle-runtime", optional = true }
|
||||
pyo3-stub-gen = { version = "0.6.0", optional = true }
|
||||
|
||||
# universal bindings
|
||||
uniffi = { version = "=0.28.0", optional = true }
|
||||
getrandom = { version = "0.2.8", optional = true }
|
||||
uniffi_bindgen = { version = "=0.28.0", optional = true }
|
||||
camino = { version = "^1.1", optional = true }
|
||||
uuid = { version = "1.10.0", features = ["v4"], optional = true }
|
||||
|
||||
[target.'cfg(not(all(target_arch = "wasm32", target_os = "unknown")))'.dependencies]
|
||||
colored = { version = "2.0.0", default-features = false, optional = true }
|
||||
@@ -208,7 +206,9 @@ test = false
|
||||
bench = false
|
||||
required-features = ["ezkl"]
|
||||
|
||||
|
||||
[[bin]]
|
||||
name = "ios_gen_bindings"
|
||||
required-features = ["ios-bindings", "uuid", "camino", "uniffi_bindgen"]
|
||||
|
||||
[[bin]]
|
||||
name = "py_stub_gen"
|
||||
@@ -217,19 +217,20 @@ required-features = ["python-bindings"]
|
||||
[features]
|
||||
web = ["wasm-bindgen-rayon"]
|
||||
default = [
|
||||
"eth",
|
||||
"dep:halo2_solidity_verifier",
|
||||
"eth-mv-lookup",
|
||||
"ezkl",
|
||||
"precompute-coset",
|
||||
"no-banner",
|
||||
"parallel-poly-read",
|
||||
"reusable-verifier",
|
||||
]
|
||||
onnx = ["dep:tract-onnx"]
|
||||
python-bindings = ["pyo3", "pyo3-log", "pyo3-async-runtimes", "pyo3-stub-gen"]
|
||||
logging = ["dep:colored", "dep:env_logger", "dep:chrono"]
|
||||
ios-bindings = ["eth-mv-lookup", "precompute-coset", "parallel-poly-read", "uniffi"]
|
||||
ios-bindings-test = ["ios-bindings", "uniffi/bindgen-tests"]
|
||||
ezkl = [
|
||||
"onnx",
|
||||
"dep:colored",
|
||||
"dep:env_logger",
|
||||
"tabled/color",
|
||||
"serde_json/std",
|
||||
"colored_json",
|
||||
@@ -240,50 +241,57 @@ ezkl = [
|
||||
"dep:lazy_static",
|
||||
"dep:tokio",
|
||||
"dep:openssl",
|
||||
"dep:mimalloc",
|
||||
"dep:chrono",
|
||||
"dep:sha256",
|
||||
"dep:clap_complete",
|
||||
"dep:semver",
|
||||
"dep:clap",
|
||||
"dep:tosubcommand",
|
||||
"logging",
|
||||
]
|
||||
eth = ["dep:alloy", "dep:foundry-compilers", "dep:ethabi"]
|
||||
eth = [
|
||||
"dep:alloy",
|
||||
"dep:foundry-compilers",
|
||||
"dep:ethabi",
|
||||
]
|
||||
solidity-verifier = [
|
||||
"dep:halo2_solidity_verifier",
|
||||
]
|
||||
solidity-verifier-mv-lookup = [
|
||||
"halo2_solidity_verifier/mv-lookup",
|
||||
]
|
||||
eth-mv-lookup = [
|
||||
"solidity-verifier-mv-lookup",
|
||||
"mv-lookup",
|
||||
"eth",
|
||||
]
|
||||
eth-original-lookup = [
|
||||
"eth",
|
||||
"solidity-verifier",
|
||||
]
|
||||
parallel-poly-read = [
|
||||
"halo2_proofs/circuit-params",
|
||||
"halo2_proofs/parallel-poly-read",
|
||||
]
|
||||
mv-lookup = ["halo2_proofs/mv-lookup", "snark-verifier/mv-lookup"]
|
||||
mv-lookup = [
|
||||
"halo2_proofs/mv-lookup",
|
||||
"snark-verifier/mv-lookup",
|
||||
]
|
||||
asm = ["halo2curves/asm", "halo2_proofs/asm"]
|
||||
precompute-coset = ["halo2_proofs/precompute-coset"]
|
||||
det-prove = []
|
||||
gpu-accelerated = ["halo2_proofs/gpu-accelerated", "dep:icicle-runtime"]
|
||||
icicle = ["halo2_proofs/icicle_gpu"]
|
||||
empty-cmd = []
|
||||
no-banner = []
|
||||
no-update = []
|
||||
macos-metal = ["halo2_proofs/macos"]
|
||||
ios-metal = ["halo2_proofs/ios"]
|
||||
jemalloc = ["dep:jemallocator"]
|
||||
mimalloc = ["dep:mimalloc"]
|
||||
reusable-verifier = []
|
||||
|
||||
|
||||
[patch.'https://github.com/zkonduit/halo2']
|
||||
halo2_proofs = { git = "https://github.com/zkonduit/halo2#1dd2090741f006fd031a07da7f3c9dfce5e0015e", package = "halo2_proofs", branch= "ac/conditional-compilation-icicle2", features = [
|
||||
"circuit-params", "mv-lookup"
|
||||
] }
|
||||
|
||||
[patch.'https://github.com/zkonduit/halo2#f441c920be45f8f05d2c06a173d82e8885a5ed4d']
|
||||
halo2_proofs = { git = "https://github.com/zkonduit/halo2#1dd2090741f006fd031a07da7f3c9dfce5e0015e", package = "halo2_proofs", branch= "ac/conditional-compilation-icicle2", features = [
|
||||
"circuit-params", "mv-lookup"
|
||||
] }
|
||||
|
||||
|
||||
[patch.crates-io]
|
||||
uniffi_testing = { git = "https://github.com/ElusAegis/uniffi-rs", branch = "feat/testing-feature-build-fix" }
|
||||
|
||||
[profile.release]
|
||||
# debug = true
|
||||
rustflags = ["-C", "relocation-model=pic"]
|
||||
lto = "fat"
|
||||
codegen-units = 1
|
||||
@@ -296,5 +304,3 @@ opt-level = 3
|
||||
|
||||
[package.metadata.wasm-pack.profile.release]
|
||||
wasm-opt = ["-O4", "--flexible-inline-max-function-size", "4294967295"]
|
||||
|
||||
|
||||
|
||||
@@ -76,6 +76,11 @@ For more details visit the [docs](https://docs.ezkl.xyz). The CLI is faster than
|
||||
|
||||
Build the auto-generated rust documentation and open the docs in your browser locally. `cargo doc --open`
|
||||
|
||||
#### In-browser EVM Verifier
|
||||
|
||||
As an alternative to running the native Halo2 verifier as a WASM binding in the browser, you can use the in-browser EVM verifier. The source code of which you can find in the `in-browser-evm-verifier` directory and a README with instructions on how to use it.
|
||||
|
||||
|
||||
### Building the Project 🔨
|
||||
|
||||
#### Rust CLI
|
||||
|
||||
312
abis/DataAttestation.json
Normal file
312
abis/DataAttestation.json
Normal file
@@ -0,0 +1,312 @@
|
||||
[
|
||||
{
|
||||
"inputs": [
|
||||
{
|
||||
"internalType": "address",
|
||||
"name": "_contractAddresses",
|
||||
"type": "address"
|
||||
},
|
||||
{
|
||||
"internalType": "bytes",
|
||||
"name": "_callData",
|
||||
"type": "bytes"
|
||||
},
|
||||
{
|
||||
"internalType": "uint256[]",
|
||||
"name": "_decimals",
|
||||
"type": "uint256[]"
|
||||
},
|
||||
{
|
||||
"internalType": "uint256[]",
|
||||
"name": "_bits",
|
||||
"type": "uint256[]"
|
||||
},
|
||||
{
|
||||
"internalType": "uint8",
|
||||
"name": "_instanceOffset",
|
||||
"type": "uint8"
|
||||
}
|
||||
],
|
||||
"stateMutability": "nonpayable",
|
||||
"type": "constructor"
|
||||
},
|
||||
{
|
||||
"inputs": [],
|
||||
"name": "HALF_ORDER",
|
||||
"outputs": [
|
||||
{
|
||||
"internalType": "uint256",
|
||||
"name": "",
|
||||
"type": "uint256"
|
||||
}
|
||||
],
|
||||
"stateMutability": "view",
|
||||
"type": "function"
|
||||
},
|
||||
{
|
||||
"inputs": [],
|
||||
"name": "ORDER",
|
||||
"outputs": [
|
||||
{
|
||||
"internalType": "uint256",
|
||||
"name": "",
|
||||
"type": "uint256"
|
||||
}
|
||||
],
|
||||
"stateMutability": "view",
|
||||
"type": "function"
|
||||
},
|
||||
{
|
||||
"inputs": [
|
||||
{
|
||||
"internalType": "uint256[]",
|
||||
"name": "instances",
|
||||
"type": "uint256[]"
|
||||
}
|
||||
],
|
||||
"name": "attestData",
|
||||
"outputs": [],
|
||||
"stateMutability": "view",
|
||||
"type": "function"
|
||||
},
|
||||
{
|
||||
"inputs": [],
|
||||
"name": "callData",
|
||||
"outputs": [
|
||||
{
|
||||
"internalType": "bytes",
|
||||
"name": "",
|
||||
"type": "bytes"
|
||||
}
|
||||
],
|
||||
"stateMutability": "view",
|
||||
"type": "function"
|
||||
},
|
||||
{
|
||||
"inputs": [],
|
||||
"name": "contractAddress",
|
||||
"outputs": [
|
||||
{
|
||||
"internalType": "address",
|
||||
"name": "",
|
||||
"type": "address"
|
||||
}
|
||||
],
|
||||
"stateMutability": "view",
|
||||
"type": "function"
|
||||
},
|
||||
{
|
||||
"inputs": [
|
||||
{
|
||||
"internalType": "bytes",
|
||||
"name": "encoded",
|
||||
"type": "bytes"
|
||||
}
|
||||
],
|
||||
"name": "getInstancesCalldata",
|
||||
"outputs": [
|
||||
{
|
||||
"internalType": "uint256[]",
|
||||
"name": "instances",
|
||||
"type": "uint256[]"
|
||||
}
|
||||
],
|
||||
"stateMutability": "pure",
|
||||
"type": "function"
|
||||
},
|
||||
{
|
||||
"inputs": [
|
||||
{
|
||||
"internalType": "bytes",
|
||||
"name": "encoded",
|
||||
"type": "bytes"
|
||||
}
|
||||
],
|
||||
"name": "getInstancesMemory",
|
||||
"outputs": [
|
||||
{
|
||||
"internalType": "uint256[]",
|
||||
"name": "instances",
|
||||
"type": "uint256[]"
|
||||
}
|
||||
],
|
||||
"stateMutability": "pure",
|
||||
"type": "function"
|
||||
},
|
||||
{
|
||||
"inputs": [
|
||||
{
|
||||
"internalType": "uint256",
|
||||
"name": "index",
|
||||
"type": "uint256"
|
||||
}
|
||||
],
|
||||
"name": "getScalars",
|
||||
"outputs": [
|
||||
{
|
||||
"components": [
|
||||
{
|
||||
"internalType": "uint256",
|
||||
"name": "decimals",
|
||||
"type": "uint256"
|
||||
},
|
||||
{
|
||||
"internalType": "uint256",
|
||||
"name": "bits",
|
||||
"type": "uint256"
|
||||
}
|
||||
],
|
||||
"internalType": "struct DataAttestation.Scalars",
|
||||
"name": "",
|
||||
"type": "tuple"
|
||||
}
|
||||
],
|
||||
"stateMutability": "view",
|
||||
"type": "function"
|
||||
},
|
||||
{
|
||||
"inputs": [],
|
||||
"name": "instanceOffset",
|
||||
"outputs": [
|
||||
{
|
||||
"internalType": "uint8",
|
||||
"name": "",
|
||||
"type": "uint8"
|
||||
}
|
||||
],
|
||||
"stateMutability": "view",
|
||||
"type": "function"
|
||||
},
|
||||
{
|
||||
"inputs": [
|
||||
{
|
||||
"internalType": "uint256",
|
||||
"name": "x",
|
||||
"type": "uint256"
|
||||
},
|
||||
{
|
||||
"internalType": "uint256",
|
||||
"name": "y",
|
||||
"type": "uint256"
|
||||
},
|
||||
{
|
||||
"internalType": "uint256",
|
||||
"name": "denominator",
|
||||
"type": "uint256"
|
||||
}
|
||||
],
|
||||
"name": "mulDiv",
|
||||
"outputs": [
|
||||
{
|
||||
"internalType": "uint256",
|
||||
"name": "result",
|
||||
"type": "uint256"
|
||||
}
|
||||
],
|
||||
"stateMutability": "pure",
|
||||
"type": "function"
|
||||
},
|
||||
{
|
||||
"inputs": [
|
||||
{
|
||||
"internalType": "int256",
|
||||
"name": "x",
|
||||
"type": "int256"
|
||||
},
|
||||
{
|
||||
"components": [
|
||||
{
|
||||
"internalType": "uint256",
|
||||
"name": "decimals",
|
||||
"type": "uint256"
|
||||
},
|
||||
{
|
||||
"internalType": "uint256",
|
||||
"name": "bits",
|
||||
"type": "uint256"
|
||||
}
|
||||
],
|
||||
"internalType": "struct DataAttestation.Scalars",
|
||||
"name": "_scalars",
|
||||
"type": "tuple"
|
||||
}
|
||||
],
|
||||
"name": "quantizeData",
|
||||
"outputs": [
|
||||
{
|
||||
"internalType": "int256",
|
||||
"name": "quantized_data",
|
||||
"type": "int256"
|
||||
}
|
||||
],
|
||||
"stateMutability": "pure",
|
||||
"type": "function"
|
||||
},
|
||||
{
|
||||
"inputs": [
|
||||
{
|
||||
"internalType": "address",
|
||||
"name": "target",
|
||||
"type": "address"
|
||||
},
|
||||
{
|
||||
"internalType": "bytes",
|
||||
"name": "data",
|
||||
"type": "bytes"
|
||||
}
|
||||
],
|
||||
"name": "staticCall",
|
||||
"outputs": [
|
||||
{
|
||||
"internalType": "bytes",
|
||||
"name": "",
|
||||
"type": "bytes"
|
||||
}
|
||||
],
|
||||
"stateMutability": "view",
|
||||
"type": "function"
|
||||
},
|
||||
{
|
||||
"inputs": [
|
||||
{
|
||||
"internalType": "int256",
|
||||
"name": "x",
|
||||
"type": "int256"
|
||||
}
|
||||
],
|
||||
"name": "toFieldElement",
|
||||
"outputs": [
|
||||
{
|
||||
"internalType": "uint256",
|
||||
"name": "field_element",
|
||||
"type": "uint256"
|
||||
}
|
||||
],
|
||||
"stateMutability": "pure",
|
||||
"type": "function"
|
||||
},
|
||||
{
|
||||
"inputs": [
|
||||
{
|
||||
"internalType": "address",
|
||||
"name": "verifier",
|
||||
"type": "address"
|
||||
},
|
||||
{
|
||||
"internalType": "bytes",
|
||||
"name": "encoded",
|
||||
"type": "bytes"
|
||||
}
|
||||
],
|
||||
"name": "verifyWithDataAttestation",
|
||||
"outputs": [
|
||||
{
|
||||
"internalType": "bool",
|
||||
"name": "",
|
||||
"type": "bool"
|
||||
}
|
||||
],
|
||||
"stateMutability": "view",
|
||||
"type": "function"
|
||||
}
|
||||
]
|
||||
98
abis/QuantizeData.json
Normal file
98
abis/QuantizeData.json
Normal file
@@ -0,0 +1,98 @@
|
||||
[
|
||||
{
|
||||
"inputs": [
|
||||
{
|
||||
"internalType": "int256[]",
|
||||
"name": "quantized_data",
|
||||
"type": "int256[]"
|
||||
}
|
||||
],
|
||||
"name": "check_is_valid_field_element",
|
||||
"outputs": [
|
||||
{
|
||||
"internalType": "uint256[]",
|
||||
"name": "output",
|
||||
"type": "uint256[]"
|
||||
}
|
||||
],
|
||||
"stateMutability": "pure",
|
||||
"type": "function"
|
||||
},
|
||||
{
|
||||
"inputs": [
|
||||
{
|
||||
"internalType": "bytes[]",
|
||||
"name": "data",
|
||||
"type": "bytes[]"
|
||||
},
|
||||
{
|
||||
"internalType": "uint256[]",
|
||||
"name": "decimals",
|
||||
"type": "uint256[]"
|
||||
},
|
||||
{
|
||||
"internalType": "uint256[]",
|
||||
"name": "scales",
|
||||
"type": "uint256[]"
|
||||
}
|
||||
],
|
||||
"name": "quantize_data_multi",
|
||||
"outputs": [
|
||||
{
|
||||
"internalType": "int256[]",
|
||||
"name": "quantized_data",
|
||||
"type": "int256[]"
|
||||
}
|
||||
],
|
||||
"stateMutability": "pure",
|
||||
"type": "function"
|
||||
},
|
||||
{
|
||||
"inputs": [
|
||||
{
|
||||
"internalType": "bytes",
|
||||
"name": "data",
|
||||
"type": "bytes"
|
||||
},
|
||||
{
|
||||
"internalType": "uint256",
|
||||
"name": "decimals",
|
||||
"type": "uint256"
|
||||
},
|
||||
{
|
||||
"internalType": "uint256[]",
|
||||
"name": "scales",
|
||||
"type": "uint256[]"
|
||||
}
|
||||
],
|
||||
"name": "quantize_data_single",
|
||||
"outputs": [
|
||||
{
|
||||
"internalType": "int256[]",
|
||||
"name": "quantized_data",
|
||||
"type": "int256[]"
|
||||
}
|
||||
],
|
||||
"stateMutability": "pure",
|
||||
"type": "function"
|
||||
},
|
||||
{
|
||||
"inputs": [
|
||||
{
|
||||
"internalType": "int64[]",
|
||||
"name": "quantized_data",
|
||||
"type": "int64[]"
|
||||
}
|
||||
],
|
||||
"name": "to_field_element",
|
||||
"outputs": [
|
||||
{
|
||||
"internalType": "uint256[]",
|
||||
"name": "output",
|
||||
"type": "uint256[]"
|
||||
}
|
||||
],
|
||||
"stateMutability": "pure",
|
||||
"type": "function"
|
||||
}
|
||||
]
|
||||
32
abis/TestReads.json
Normal file
32
abis/TestReads.json
Normal file
@@ -0,0 +1,32 @@
|
||||
[
|
||||
{
|
||||
"inputs": [
|
||||
{
|
||||
"internalType": "int256[]",
|
||||
"name": "_numbers",
|
||||
"type": "int256[]"
|
||||
}
|
||||
],
|
||||
"stateMutability": "nonpayable",
|
||||
"type": "constructor"
|
||||
},
|
||||
{
|
||||
"inputs": [
|
||||
{
|
||||
"internalType": "uint256",
|
||||
"name": "",
|
||||
"type": "uint256"
|
||||
}
|
||||
],
|
||||
"name": "arr",
|
||||
"outputs": [
|
||||
{
|
||||
"internalType": "int256",
|
||||
"name": "",
|
||||
"type": "int256"
|
||||
}
|
||||
],
|
||||
"stateMutability": "view",
|
||||
"type": "function"
|
||||
}
|
||||
]
|
||||
@@ -4,6 +4,7 @@ use ezkl::circuit::*;
|
||||
use ezkl::pfsys::create_keys;
|
||||
use ezkl::pfsys::create_proof_circuit;
|
||||
use ezkl::pfsys::srs::gen_srs;
|
||||
use ezkl::pfsys::TranscriptType;
|
||||
use ezkl::tensor::*;
|
||||
use halo2_proofs::poly::kzg::commitment::KZGCommitmentScheme;
|
||||
use halo2_proofs::poly::kzg::multiopen::ProverSHPLONK;
|
||||
@@ -67,7 +68,7 @@ impl Circuit<Fr> for MyCircuit {
|
||||
config
|
||||
.layout(
|
||||
&mut region,
|
||||
&[&self.image, &self.kernel, &self.bias],
|
||||
&[self.image.clone(), self.kernel.clone(), self.bias.clone()],
|
||||
Box::new(PolyOp::Conv {
|
||||
padding: vec![(0, 0)],
|
||||
stride: vec![1; 2],
|
||||
@@ -152,6 +153,8 @@ fn runcnvrl(c: &mut Criterion) {
|
||||
¶ms,
|
||||
&pk,
|
||||
CheckMode::UNSAFE,
|
||||
ezkl::Commitments::KZG,
|
||||
TranscriptType::EVM,
|
||||
None,
|
||||
None,
|
||||
);
|
||||
|
||||
@@ -2,6 +2,7 @@ use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion, Through
|
||||
use ezkl::circuit::poly::PolyOp;
|
||||
use ezkl::circuit::*;
|
||||
use ezkl::pfsys::create_proof_circuit;
|
||||
use ezkl::pfsys::TranscriptType;
|
||||
use ezkl::pfsys::{create_keys, srs::gen_srs};
|
||||
use ezkl::tensor::*;
|
||||
use halo2_proofs::poly::kzg::commitment::KZGCommitmentScheme;
|
||||
@@ -14,7 +15,6 @@ use halo2_proofs::{
|
||||
plonk::{Circuit, ConstraintSystem, Error},
|
||||
};
|
||||
use halo2curves::bn256::{Bn256, Fr};
|
||||
use itertools::Itertools;
|
||||
use rand::rngs::OsRng;
|
||||
use snark_verifier::system::halo2::transcript::evm::EvmTranscript;
|
||||
use std::marker::PhantomData;
|
||||
@@ -59,7 +59,7 @@ impl Circuit<Fr> for MyCircuit {
|
||||
config
|
||||
.layout(
|
||||
&mut region,
|
||||
&self.inputs.iter().collect_vec(),
|
||||
&self.inputs,
|
||||
Box::new(PolyOp::Einsum {
|
||||
equation: "i,i->".to_string(),
|
||||
}),
|
||||
@@ -119,6 +119,8 @@ fn rundot(c: &mut Criterion) {
|
||||
¶ms,
|
||||
&pk,
|
||||
CheckMode::UNSAFE,
|
||||
ezkl::Commitments::KZG,
|
||||
TranscriptType::EVM,
|
||||
None,
|
||||
None,
|
||||
);
|
||||
|
||||
@@ -1,78 +1,52 @@
|
||||
use criterion::{
|
||||
criterion_group, criterion_main, AxisScale, BenchmarkId, Criterion, PlotConfiguration,
|
||||
Throughput,
|
||||
};
|
||||
use ezkl::circuit::einsum::analysis::analyze_einsum_usage;
|
||||
use ezkl::circuit::einsum::circuit_params::SingleEinsumParams;
|
||||
use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion, Throughput};
|
||||
use ezkl::circuit::poly::PolyOp;
|
||||
use ezkl::circuit::*;
|
||||
use ezkl::pfsys::srs::gen_srs;
|
||||
use ezkl::pfsys::{create_keys, create_proof_circuit};
|
||||
use ezkl::pfsys::create_proof_circuit;
|
||||
use ezkl::pfsys::TranscriptType;
|
||||
use ezkl::pfsys::{create_keys, srs::gen_srs};
|
||||
use ezkl::tensor::*;
|
||||
use halo2_proofs::circuit::floor_planner::V1;
|
||||
use halo2_proofs::poly::kzg::commitment::KZGCommitmentScheme;
|
||||
use halo2_proofs::poly::kzg::multiopen::{ProverSHPLONK, VerifierSHPLONK};
|
||||
use halo2_proofs::poly::kzg::multiopen::ProverSHPLONK;
|
||||
use halo2_proofs::poly::kzg::multiopen::VerifierSHPLONK;
|
||||
use halo2_proofs::poly::kzg::strategy::SingleStrategy;
|
||||
use halo2_proofs::{
|
||||
arithmetic::Field,
|
||||
circuit::{Layouter, Value},
|
||||
circuit::{Layouter, SimpleFloorPlanner, Value},
|
||||
plonk::{Circuit, ConstraintSystem, Error},
|
||||
};
|
||||
use halo2curves::bn256::{Bn256, Fr};
|
||||
use halo2curves::ff::PrimeField;
|
||||
use itertools::Itertools;
|
||||
use rand::rngs::OsRng;
|
||||
use snark_verifier::system::halo2::transcript::evm::EvmTranscript;
|
||||
use std::collections::HashMap;
|
||||
use std::marker::PhantomData;
|
||||
|
||||
static mut LEN: usize = 4;
|
||||
static mut K: usize = 15;
|
||||
const K: usize = 16;
|
||||
|
||||
#[derive(Clone)]
|
||||
struct MyCircuit<F: PrimeField + TensorType + PartialOrd> {
|
||||
inputs: [ValTensor<F>; 2],
|
||||
einsum_params: SingleEinsumParams<F>,
|
||||
struct MyCircuit {
|
||||
inputs: [ValTensor<Fr>; 2],
|
||||
_marker: PhantomData<Fr>,
|
||||
}
|
||||
|
||||
impl Circuit<Fr> for MyCircuit<Fr> {
|
||||
impl Circuit<Fr> for MyCircuit {
|
||||
type Config = BaseConfig<Fr>;
|
||||
type FloorPlanner = V1;
|
||||
type Params = SingleEinsumParams<Fr>;
|
||||
type FloorPlanner = SimpleFloorPlanner;
|
||||
type Params = ();
|
||||
|
||||
fn without_witnesses(&self) -> Self {
|
||||
self.clone()
|
||||
}
|
||||
|
||||
fn configure_with_params(cs: &mut ConstraintSystem<Fr>, params: Self::Params) -> Self::Config {
|
||||
let mut config = Self::Config::default();
|
||||
fn configure(cs: &mut ConstraintSystem<Fr>) -> Self::Config {
|
||||
let len = unsafe { LEN };
|
||||
|
||||
let mut equations = HashMap::new();
|
||||
equations.insert((0, params.equation), params.input_axes_to_dims);
|
||||
let analysis = analyze_einsum_usage(&equations).unwrap();
|
||||
let num_einsum_inner_cols = 1;
|
||||
unsafe {
|
||||
config
|
||||
.configure_einsums(cs, &analysis, num_einsum_inner_cols, K)
|
||||
.unwrap();
|
||||
let _constant = VarTensor::constant_cols(cs, K, 2, false);
|
||||
}
|
||||
let a = VarTensor::new_advice(cs, K, 1, len * len);
|
||||
|
||||
config
|
||||
}
|
||||
let b = VarTensor::new_advice(cs, K, 1, len * len);
|
||||
|
||||
fn params(&self) -> Self::Params {
|
||||
SingleEinsumParams::<Fr>::new(
|
||||
&self.einsum_params.equation,
|
||||
&[
|
||||
&self.inputs[0].get_inner().unwrap(),
|
||||
&self.inputs[1].get_inner().unwrap(),
|
||||
],
|
||||
)
|
||||
.unwrap()
|
||||
}
|
||||
let output = VarTensor::new_advice(cs, K, 1, (len + 1) * len);
|
||||
|
||||
fn configure(_cs: &mut ConstraintSystem<Fr>) -> Self::Config {
|
||||
unimplemented!("call configure_with_params instead")
|
||||
Self::Config::configure(cs, &[a, b], &output, CheckMode::UNSAFE)
|
||||
}
|
||||
|
||||
fn synthesize(
|
||||
@@ -80,33 +54,16 @@ impl Circuit<Fr> for MyCircuit<Fr> {
|
||||
mut config: Self::Config,
|
||||
mut layouter: impl Layouter<Fr>,
|
||||
) -> Result<(), Error> {
|
||||
let challenges = config
|
||||
.einsums
|
||||
.as_ref()
|
||||
.ok_or(Error::Synthesis)?
|
||||
.challenges()
|
||||
.unwrap()
|
||||
.iter()
|
||||
.map(|c| layouter.get_challenge(*c))
|
||||
.collect_vec();
|
||||
|
||||
layouter.assign_region(
|
||||
|| "",
|
||||
|region| {
|
||||
let mut region = region::RegionCtx::new_with_challenges(
|
||||
region,
|
||||
0,
|
||||
1,
|
||||
1024,
|
||||
2,
|
||||
challenges.clone(),
|
||||
);
|
||||
let mut region = region::RegionCtx::new(region, 0, 1, 1024, 2);
|
||||
config
|
||||
.layout(
|
||||
&mut region,
|
||||
&self.inputs.iter().collect_vec(),
|
||||
&self.inputs,
|
||||
Box::new(PolyOp::Einsum {
|
||||
equation: self.einsum_params.equation.clone(),
|
||||
equation: "ab,bc->ac".to_string(),
|
||||
}),
|
||||
)
|
||||
.unwrap();
|
||||
@@ -119,49 +76,41 @@ impl Circuit<Fr> for MyCircuit<Fr> {
|
||||
|
||||
fn runmatmul(c: &mut Criterion) {
|
||||
let mut group = c.benchmark_group("accum_einsum_matmul");
|
||||
group.plot_config(PlotConfiguration::default().summary_scale(AxisScale::Linear));
|
||||
group.sampling_mode(criterion::SamplingMode::Flat);
|
||||
group.sample_size(10);
|
||||
let len = 128;
|
||||
unsafe {
|
||||
LEN = len;
|
||||
}
|
||||
for k in 16..17 {
|
||||
let params = unsafe {
|
||||
K = k;
|
||||
gen_srs::<KZGCommitmentScheme<_>>(K as u32)
|
||||
let params = gen_srs::<KZGCommitmentScheme<_>>(17);
|
||||
for &len in [4, 32].iter() {
|
||||
unsafe {
|
||||
LEN = len;
|
||||
};
|
||||
|
||||
let mut a = Tensor::from((0..len * len).map(|_| Value::known(Fr::random(OsRng))));
|
||||
a.reshape(&[len, len]).unwrap();
|
||||
|
||||
// parameters
|
||||
let mut b = Tensor::from((0..len * len).map(|_| Value::known(Fr::random(OsRng))));
|
||||
b.reshape(&[len, len]).unwrap();
|
||||
|
||||
let einsum_params = SingleEinsumParams::<Fr>::new("ij,jk->ik", &[&a, &b]).unwrap();
|
||||
|
||||
let circuit = MyCircuit {
|
||||
inputs: [ValTensor::from(a), ValTensor::from(b)],
|
||||
einsum_params,
|
||||
_marker: PhantomData,
|
||||
};
|
||||
|
||||
group.throughput(Throughput::Elements(len as u64));
|
||||
group.bench_with_input(BenchmarkId::new("pk", k), &k, |b, &_| {
|
||||
group.bench_with_input(BenchmarkId::new("pk", len), &len, |b, &_| {
|
||||
b.iter(|| {
|
||||
create_keys::<KZGCommitmentScheme<Bn256>, MyCircuit<Fr>>(&circuit, ¶ms, true)
|
||||
create_keys::<KZGCommitmentScheme<Bn256>, MyCircuit>(&circuit, ¶ms, true)
|
||||
.unwrap();
|
||||
});
|
||||
});
|
||||
|
||||
let pk = create_keys::<KZGCommitmentScheme<Bn256>, MyCircuit<Fr>>(&circuit, ¶ms, false)
|
||||
.unwrap();
|
||||
let pk =
|
||||
create_keys::<KZGCommitmentScheme<Bn256>, MyCircuit>(&circuit, ¶ms, true).unwrap();
|
||||
|
||||
group.throughput(Throughput::Elements(len as u64));
|
||||
group.bench_with_input(BenchmarkId::new("prove", k), &k, |b, &_| {
|
||||
group.bench_with_input(BenchmarkId::new("prove", len), &len, |b, &_| {
|
||||
b.iter(|| {
|
||||
let prover = create_proof_circuit::<
|
||||
KZGCommitmentScheme<_>,
|
||||
MyCircuit<Fr>,
|
||||
MyCircuit,
|
||||
ProverSHPLONK<_>,
|
||||
VerifierSHPLONK<_>,
|
||||
SingleStrategy<_>,
|
||||
@@ -174,6 +123,8 @@ fn runmatmul(c: &mut Criterion) {
|
||||
¶ms,
|
||||
&pk,
|
||||
CheckMode::UNSAFE,
|
||||
ezkl::Commitments::KZG,
|
||||
TranscriptType::EVM,
|
||||
None,
|
||||
None,
|
||||
);
|
||||
|
||||
@@ -5,7 +5,7 @@ use ezkl::circuit::*;
|
||||
use ezkl::circuit::lookup::LookupOp;
|
||||
use ezkl::circuit::poly::PolyOp;
|
||||
use ezkl::pfsys::create_proof_circuit;
|
||||
|
||||
use ezkl::pfsys::TranscriptType;
|
||||
use ezkl::pfsys::{create_keys, srs::gen_srs};
|
||||
use ezkl::tensor::*;
|
||||
use halo2_proofs::poly::kzg::commitment::KZGCommitmentScheme;
|
||||
@@ -17,7 +17,6 @@ use halo2_proofs::{
|
||||
plonk::{Circuit, ConstraintSystem, Error},
|
||||
};
|
||||
use halo2curves::bn256::{Bn256, Fr};
|
||||
use itertools::Itertools;
|
||||
use snark_verifier::system::halo2::transcript::evm::EvmTranscript;
|
||||
use std::marker::PhantomData;
|
||||
|
||||
@@ -87,13 +86,13 @@ impl Circuit<Fr> for MyCircuit {
|
||||
let mut region = region::RegionCtx::new(region, 0, 1, 1024, 2);
|
||||
let output = config
|
||||
.base_config
|
||||
.layout(&mut region, &self.inputs.iter().collect_vec(), Box::new(op))
|
||||
.layout(&mut region, &self.inputs, Box::new(op))
|
||||
.unwrap();
|
||||
let _output = config
|
||||
.base_config
|
||||
.layout(
|
||||
&mut region,
|
||||
&[&output.unwrap()],
|
||||
&[output.unwrap()],
|
||||
Box::new(LookupOp::Sigmoid { scale: 1.0.into() }),
|
||||
)
|
||||
.unwrap();
|
||||
@@ -154,6 +153,8 @@ fn runmatmul(c: &mut Criterion) {
|
||||
¶ms,
|
||||
&pk,
|
||||
CheckMode::UNSAFE,
|
||||
ezkl::Commitments::KZG,
|
||||
TranscriptType::EVM,
|
||||
None,
|
||||
None,
|
||||
);
|
||||
|
||||
@@ -5,7 +5,7 @@ use ezkl::circuit::lookup::LookupOp;
|
||||
use ezkl::circuit::poly::PolyOp;
|
||||
use ezkl::circuit::table::Range;
|
||||
use ezkl::pfsys::create_proof_circuit;
|
||||
|
||||
use ezkl::pfsys::TranscriptType;
|
||||
use ezkl::pfsys::{create_keys, srs::gen_srs};
|
||||
use ezkl::tensor::*;
|
||||
use halo2_proofs::poly::kzg::commitment::KZGCommitmentScheme;
|
||||
@@ -17,7 +17,6 @@ use halo2_proofs::{
|
||||
plonk::{Circuit, ConstraintSystem, Error},
|
||||
};
|
||||
use halo2curves::bn256::{Bn256, Fr};
|
||||
use itertools::Itertools;
|
||||
use snark_verifier::system::halo2::transcript::evm::EvmTranscript;
|
||||
use std::marker::PhantomData;
|
||||
|
||||
@@ -88,13 +87,13 @@ impl Circuit<Fr> for MyCircuit {
|
||||
let mut region = region::RegionCtx::new(region, 0, 1, 1024, 2);
|
||||
let output = config
|
||||
.base_config
|
||||
.layout(&mut region, &self.inputs.iter().collect_vec(), Box::new(op))
|
||||
.layout(&mut region, &self.inputs, Box::new(op))
|
||||
.unwrap();
|
||||
let _output = config
|
||||
.base_config
|
||||
.layout(
|
||||
&mut region,
|
||||
&[&output.unwrap()],
|
||||
&[output.unwrap()],
|
||||
Box::new(LookupOp::Sigmoid { scale: 1.0.into() }),
|
||||
)
|
||||
.unwrap();
|
||||
@@ -157,6 +156,8 @@ fn runmatmul(c: &mut Criterion) {
|
||||
¶ms,
|
||||
&pk,
|
||||
CheckMode::UNSAFE,
|
||||
ezkl::Commitments::KZG,
|
||||
TranscriptType::EVM,
|
||||
None,
|
||||
None,
|
||||
);
|
||||
|
||||
@@ -2,7 +2,7 @@ use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion, Through
|
||||
use ezkl::circuit::poly::PolyOp;
|
||||
use ezkl::circuit::*;
|
||||
use ezkl::pfsys::create_proof_circuit;
|
||||
|
||||
use ezkl::pfsys::TranscriptType;
|
||||
use ezkl::pfsys::{create_keys, srs::gen_srs};
|
||||
use ezkl::tensor::*;
|
||||
use halo2_proofs::poly::kzg::commitment::KZGCommitmentScheme;
|
||||
@@ -15,7 +15,6 @@ use halo2_proofs::{
|
||||
plonk::{Circuit, ConstraintSystem, Error},
|
||||
};
|
||||
use halo2curves::bn256::{Bn256, Fr};
|
||||
use itertools::Itertools;
|
||||
use rand::rngs::OsRng;
|
||||
use snark_verifier::system::halo2::transcript::evm::EvmTranscript;
|
||||
use std::marker::PhantomData;
|
||||
@@ -60,7 +59,7 @@ impl Circuit<Fr> for MyCircuit {
|
||||
config
|
||||
.layout(
|
||||
&mut region,
|
||||
&self.inputs.iter().collect_vec(),
|
||||
&self.inputs,
|
||||
Box::new(PolyOp::Sum { axes: vec![0] }),
|
||||
)
|
||||
.unwrap();
|
||||
@@ -116,6 +115,8 @@ fn runsum(c: &mut Criterion) {
|
||||
¶ms,
|
||||
&pk,
|
||||
CheckMode::UNSAFE,
|
||||
ezkl::Commitments::KZG,
|
||||
TranscriptType::EVM,
|
||||
None,
|
||||
None,
|
||||
);
|
||||
|
||||
@@ -4,7 +4,7 @@ use ezkl::circuit::*;
|
||||
use ezkl::pfsys::create_keys;
|
||||
use ezkl::pfsys::create_proof_circuit;
|
||||
use ezkl::pfsys::srs::gen_srs;
|
||||
|
||||
use ezkl::pfsys::TranscriptType;
|
||||
use ezkl::tensor::*;
|
||||
use halo2_proofs::poly::kzg::commitment::KZGCommitmentScheme;
|
||||
use halo2_proofs::poly::kzg::multiopen::ProverSHPLONK;
|
||||
@@ -63,7 +63,7 @@ impl Circuit<Fr> for MyCircuit {
|
||||
config
|
||||
.layout(
|
||||
&mut region,
|
||||
&[&self.image],
|
||||
&[self.image.clone()],
|
||||
Box::new(HybridOp::SumPool {
|
||||
padding: vec![(0, 0); 2],
|
||||
stride: vec![1, 1],
|
||||
@@ -131,6 +131,8 @@ fn runsumpool(c: &mut Criterion) {
|
||||
¶ms,
|
||||
&pk,
|
||||
CheckMode::UNSAFE,
|
||||
ezkl::Commitments::KZG,
|
||||
TranscriptType::EVM,
|
||||
None,
|
||||
None,
|
||||
);
|
||||
|
||||
@@ -2,7 +2,7 @@ use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion, Through
|
||||
use ezkl::circuit::poly::PolyOp;
|
||||
use ezkl::circuit::*;
|
||||
use ezkl::pfsys::create_proof_circuit;
|
||||
|
||||
use ezkl::pfsys::TranscriptType;
|
||||
use ezkl::pfsys::{create_keys, srs::gen_srs};
|
||||
use ezkl::tensor::*;
|
||||
use halo2_proofs::poly::kzg::commitment::KZGCommitmentScheme;
|
||||
@@ -15,7 +15,6 @@ use halo2_proofs::{
|
||||
plonk::{Circuit, ConstraintSystem, Error},
|
||||
};
|
||||
use halo2curves::bn256::{Bn256, Fr};
|
||||
use itertools::Itertools;
|
||||
use rand::rngs::OsRng;
|
||||
use snark_verifier::system::halo2::transcript::evm::EvmTranscript;
|
||||
use std::marker::PhantomData;
|
||||
@@ -58,11 +57,7 @@ impl Circuit<Fr> for MyCircuit {
|
||||
|region| {
|
||||
let mut region = region::RegionCtx::new(region, 0, 1, 1024, 2);
|
||||
config
|
||||
.layout(
|
||||
&mut region,
|
||||
&self.inputs.iter().collect_vec(),
|
||||
Box::new(PolyOp::Add),
|
||||
)
|
||||
.layout(&mut region, &self.inputs, Box::new(PolyOp::Add))
|
||||
.unwrap();
|
||||
Ok(())
|
||||
},
|
||||
@@ -118,6 +113,8 @@ fn runadd(c: &mut Criterion) {
|
||||
¶ms,
|
||||
&pk,
|
||||
CheckMode::UNSAFE,
|
||||
ezkl::Commitments::KZG,
|
||||
TranscriptType::EVM,
|
||||
None,
|
||||
None,
|
||||
);
|
||||
|
||||
@@ -3,7 +3,7 @@ use ezkl::circuit::poly::PolyOp;
|
||||
use ezkl::circuit::region::RegionCtx;
|
||||
use ezkl::circuit::*;
|
||||
use ezkl::pfsys::create_proof_circuit;
|
||||
|
||||
use ezkl::pfsys::TranscriptType;
|
||||
use ezkl::pfsys::{create_keys, srs::gen_srs};
|
||||
use ezkl::tensor::*;
|
||||
use halo2_proofs::poly::kzg::commitment::KZGCommitmentScheme;
|
||||
@@ -16,7 +16,6 @@ use halo2_proofs::{
|
||||
plonk::{Circuit, ConstraintSystem, Error},
|
||||
};
|
||||
use halo2curves::bn256::{Bn256, Fr};
|
||||
use itertools::Itertools;
|
||||
use rand::rngs::OsRng;
|
||||
use snark_verifier::system::halo2::transcript::evm::EvmTranscript;
|
||||
use std::marker::PhantomData;
|
||||
@@ -59,11 +58,7 @@ impl Circuit<Fr> for MyCircuit {
|
||||
|region| {
|
||||
let mut region = RegionCtx::new(region, 0, 1, 1024, 2);
|
||||
config
|
||||
.layout(
|
||||
&mut region,
|
||||
&self.inputs.iter().collect_vec(),
|
||||
Box::new(PolyOp::Pow(4)),
|
||||
)
|
||||
.layout(&mut region, &self.inputs, Box::new(PolyOp::Pow(4)))
|
||||
.unwrap();
|
||||
Ok(())
|
||||
},
|
||||
@@ -117,6 +112,8 @@ fn runpow(c: &mut Criterion) {
|
||||
¶ms,
|
||||
&pk,
|
||||
CheckMode::UNSAFE,
|
||||
ezkl::Commitments::KZG,
|
||||
TranscriptType::EVM,
|
||||
None,
|
||||
None,
|
||||
);
|
||||
|
||||
@@ -8,7 +8,7 @@ use ezkl::circuit::*;
|
||||
use ezkl::pfsys::create_keys;
|
||||
use ezkl::pfsys::create_proof_circuit;
|
||||
use ezkl::pfsys::srs::gen_srs;
|
||||
|
||||
use ezkl::pfsys::TranscriptType;
|
||||
use ezkl::tensor::*;
|
||||
use halo2_proofs::circuit::Value;
|
||||
use halo2_proofs::poly::kzg::commitment::KZGCommitmentScheme;
|
||||
@@ -104,6 +104,8 @@ fn runposeidon(c: &mut Criterion) {
|
||||
¶ms,
|
||||
&pk,
|
||||
CheckMode::UNSAFE,
|
||||
ezkl::Commitments::KZG,
|
||||
TranscriptType::EVM,
|
||||
None,
|
||||
None,
|
||||
);
|
||||
|
||||
@@ -4,7 +4,7 @@ use ezkl::circuit::region::RegionCtx;
|
||||
use ezkl::circuit::{BaseConfig as Config, CheckMode};
|
||||
use ezkl::fieldutils::IntegerRep;
|
||||
use ezkl::pfsys::create_proof_circuit;
|
||||
|
||||
use ezkl::pfsys::TranscriptType;
|
||||
use ezkl::pfsys::{create_keys, srs::gen_srs};
|
||||
use ezkl::tensor::*;
|
||||
use halo2_proofs::poly::kzg::commitment::KZGCommitmentScheme;
|
||||
@@ -70,7 +70,7 @@ impl Circuit<Fr> for NLCircuit {
|
||||
config
|
||||
.layout(
|
||||
&mut region,
|
||||
&[&self.input],
|
||||
&[self.input.clone()],
|
||||
Box::new(PolyOp::LeakyReLU {
|
||||
slope: 0.0.into(),
|
||||
scale: 1,
|
||||
@@ -130,6 +130,8 @@ fn runrelu(c: &mut Criterion) {
|
||||
¶ms,
|
||||
&pk,
|
||||
CheckMode::UNSAFE,
|
||||
ezkl::Commitments::KZG,
|
||||
TranscriptType::EVM,
|
||||
None,
|
||||
None,
|
||||
);
|
||||
|
||||
@@ -4,7 +4,7 @@ use ezkl::circuit::table::Range;
|
||||
use ezkl::circuit::{ops::lookup::LookupOp, BaseConfig as Config, CheckMode};
|
||||
use ezkl::fieldutils::IntegerRep;
|
||||
use ezkl::pfsys::create_proof_circuit;
|
||||
|
||||
use ezkl::pfsys::TranscriptType;
|
||||
use ezkl::pfsys::{create_keys, srs::gen_srs};
|
||||
use ezkl::tensor::*;
|
||||
use halo2_proofs::poly::kzg::commitment::KZGCommitmentScheme;
|
||||
@@ -67,7 +67,7 @@ impl Circuit<Fr> for NLCircuit {
|
||||
config
|
||||
.layout(
|
||||
&mut region,
|
||||
&[&self.input],
|
||||
&[self.input.clone()],
|
||||
Box::new(LookupOp::Sigmoid { scale: 1.0.into() }),
|
||||
)
|
||||
.unwrap();
|
||||
@@ -124,6 +124,8 @@ fn runrelu(c: &mut Criterion) {
|
||||
¶ms,
|
||||
&pk,
|
||||
CheckMode::UNSAFE,
|
||||
ezkl::Commitments::KZG,
|
||||
TranscriptType::EVM,
|
||||
None,
|
||||
None,
|
||||
);
|
||||
|
||||
4
build.rs
4
build.rs
@@ -1,3 +1,7 @@
|
||||
fn main() {
|
||||
if cfg!(feature = "ios-bindings-test") {
|
||||
println!("cargo::rustc-env=UNIFFI_CARGO_BUILD_EXTRA_ARGS=--features=ios-bindings --no-default-features");
|
||||
}
|
||||
|
||||
println!("cargo::rerun-if-changed=build.rs");
|
||||
}
|
||||
|
||||
397
contracts/AttestData.sol
Normal file
397
contracts/AttestData.sol
Normal file
@@ -0,0 +1,397 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
pragma solidity ^0.8.20;
|
||||
contract LoadInstances {
|
||||
/**
|
||||
* @dev Parse the instances array from the Halo2Verifier encoded calldata.
|
||||
* @notice must pass encoded bytes from memory
|
||||
* @param encoded - verifier calldata
|
||||
*/
|
||||
function getInstancesMemory(
|
||||
bytes memory encoded
|
||||
) public pure returns (uint256[] memory instances) {
|
||||
bytes4 funcSig;
|
||||
uint256 instances_offset;
|
||||
uint256 instances_length;
|
||||
assembly {
|
||||
// fetch function sig. Either `verifyProof(bytes,uint256[])` or `verifyProof(address,bytes,uint256[])`
|
||||
funcSig := mload(add(encoded, 0x20))
|
||||
}
|
||||
if (funcSig == 0xaf83a18d) {
|
||||
instances_offset = 0x64;
|
||||
} else if (funcSig == 0x1e8e1e13) {
|
||||
instances_offset = 0x44;
|
||||
} else {
|
||||
revert("Invalid function signature");
|
||||
}
|
||||
assembly {
|
||||
// Fetch instances offset which is 4 + 32 + 32 bytes away from
|
||||
// start of encoded for `verifyProof(bytes,uint256[])`,
|
||||
// and 4 + 32 + 32 +32 away for `verifyProof(address,bytes,uint256[])`
|
||||
|
||||
instances_offset := mload(add(encoded, instances_offset))
|
||||
|
||||
instances_length := mload(add(add(encoded, 0x24), instances_offset))
|
||||
}
|
||||
instances = new uint256[](instances_length); // Allocate memory for the instances array.
|
||||
assembly {
|
||||
// Now instances points to the start of the array data
|
||||
// (right after the length field).
|
||||
for {
|
||||
let i := 0x20
|
||||
} lt(i, add(mul(instances_length, 0x20), 0x20)) {
|
||||
i := add(i, 0x20)
|
||||
} {
|
||||
mstore(
|
||||
add(instances, i),
|
||||
mload(add(add(encoded, add(i, 0x24)), instances_offset))
|
||||
)
|
||||
}
|
||||
}
|
||||
require(
|
||||
funcSig == 0xaf83a18d || funcSig == 0x1e8e1e13,
|
||||
"Invalid function signature"
|
||||
);
|
||||
}
|
||||
/**
|
||||
* @dev Parse the instances array from the Halo2Verifier encoded calldata.
|
||||
* @notice must pass encoded bytes from calldata
|
||||
* @param encoded - verifier calldata
|
||||
*/
|
||||
function getInstancesCalldata(
|
||||
bytes calldata encoded
|
||||
) public pure returns (uint256[] memory instances) {
|
||||
bytes4 funcSig;
|
||||
uint256 instances_offset;
|
||||
uint256 instances_length;
|
||||
assembly {
|
||||
// fetch function sig. Either `verifyProof(bytes,uint256[])` or `verifyProof(address,bytes,uint256[])`
|
||||
funcSig := calldataload(encoded.offset)
|
||||
}
|
||||
if (funcSig == 0xaf83a18d) {
|
||||
instances_offset = 0x44;
|
||||
} else if (funcSig == 0x1e8e1e13) {
|
||||
instances_offset = 0x24;
|
||||
} else {
|
||||
revert("Invalid function signature");
|
||||
}
|
||||
// We need to create a new assembly block in order for solidity
|
||||
// to cast the funcSig to a bytes4 type. Otherwise it will load the entire first 32 bytes of the calldata
|
||||
// within the block
|
||||
assembly {
|
||||
// Fetch instances offset which is 4 + 32 + 32 bytes away from
|
||||
// start of encoded for `verifyProof(bytes,uint256[])`,
|
||||
// and 4 + 32 + 32 +32 away for `verifyProof(address,bytes,uint256[])`
|
||||
|
||||
instances_offset := calldataload(
|
||||
add(encoded.offset, instances_offset)
|
||||
)
|
||||
|
||||
instances_length := calldataload(
|
||||
add(add(encoded.offset, 0x04), instances_offset)
|
||||
)
|
||||
}
|
||||
instances = new uint256[](instances_length); // Allocate memory for the instances array.
|
||||
assembly {
|
||||
// Now instances points to the start of the array data
|
||||
// (right after the length field).
|
||||
|
||||
for {
|
||||
let i := 0x20
|
||||
} lt(i, add(mul(instances_length, 0x20), 0x20)) {
|
||||
i := add(i, 0x20)
|
||||
} {
|
||||
mstore(
|
||||
add(instances, i),
|
||||
calldataload(
|
||||
add(add(encoded.offset, add(i, 0x04)), instances_offset)
|
||||
)
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// The kzg commitments of a given model, all aggregated into a single bytes array.
|
||||
// At solidity generation time, the commitments are hardcoded into the contract via the COMMITMENT_KZG constant.
|
||||
// It will be used to check that the proof commitments match the expected commitments.
|
||||
bytes constant COMMITMENT_KZG = hex"1234";
|
||||
|
||||
contract SwapProofCommitments {
|
||||
/**
|
||||
* @dev Swap the proof commitments
|
||||
* @notice must pass encoded bytes from memory
|
||||
* @param encoded - verifier calldata
|
||||
*/
|
||||
function checkKzgCommits(
|
||||
bytes calldata encoded
|
||||
) internal pure returns (bool equal) {
|
||||
bytes4 funcSig;
|
||||
uint256 proof_offset;
|
||||
uint256 proof_length;
|
||||
assembly {
|
||||
// fetch function sig. Either `verifyProof(bytes,uint256[])` or `verifyProof(address,bytes,uint256[])`
|
||||
funcSig := calldataload(encoded.offset)
|
||||
}
|
||||
if (funcSig == 0xaf83a18d) {
|
||||
proof_offset = 0x24;
|
||||
} else if (funcSig == 0x1e8e1e13) {
|
||||
proof_offset = 0x04;
|
||||
} else {
|
||||
revert("Invalid function signature");
|
||||
}
|
||||
assembly {
|
||||
// Fetch proof offset which is 4 + 32 bytes away from
|
||||
// start of encoded for `verifyProof(bytes,uint256[])`,
|
||||
// and 4 + 32 + 32 away for `verifyProof(address,bytes,uint256[])`
|
||||
|
||||
proof_offset := calldataload(add(encoded.offset, proof_offset))
|
||||
|
||||
proof_length := calldataload(
|
||||
add(add(encoded.offset, 0x04), proof_offset)
|
||||
)
|
||||
}
|
||||
// Check the length of the commitment against the proof bytes
|
||||
if (proof_length < COMMITMENT_KZG.length) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Load COMMITMENT_KZG into memory
|
||||
bytes memory commitment = COMMITMENT_KZG;
|
||||
|
||||
// Compare the first N bytes of the proof with COMMITMENT_KZG
|
||||
uint words = (commitment.length + 31) / 32; // Calculate the number of 32-byte words
|
||||
|
||||
assembly {
|
||||
// Now we compare the commitment with the proof,
|
||||
// ensuring that the commitments divided up into 32 byte words are all equal.
|
||||
for {
|
||||
let i := 0x20
|
||||
} lt(i, add(mul(words, 0x20), 0x20)) {
|
||||
i := add(i, 0x20)
|
||||
} {
|
||||
let wordProof := calldataload(
|
||||
add(add(encoded.offset, add(i, 0x04)), proof_offset)
|
||||
)
|
||||
let wordCommitment := mload(add(commitment, i))
|
||||
equal := eq(wordProof, wordCommitment)
|
||||
if eq(equal, 0) {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return equal; // Return true if the commitment comparison passed
|
||||
} /// end checkKzgCommits
|
||||
}
|
||||
|
||||
contract DataAttestation is LoadInstances, SwapProofCommitments {
|
||||
// the address of the account to make calls to
|
||||
address public immutable contractAddress;
|
||||
|
||||
// the abi encoded function calls to make to the `contractAddress` that returns the attested to data
|
||||
bytes public callData;
|
||||
|
||||
struct Scalars {
|
||||
// The number of base 10 decimals to scale the data by.
|
||||
// For most ERC20 tokens this is 1e18
|
||||
uint256 decimals;
|
||||
// The number of fractional bits of the fixed point EZKL data points.
|
||||
uint256 bits;
|
||||
}
|
||||
|
||||
Scalars[] private scalars;
|
||||
|
||||
function getScalars(uint256 index) public view returns (Scalars memory) {
|
||||
return scalars[index];
|
||||
}
|
||||
|
||||
/**
|
||||
* @notice EZKL P value
|
||||
* @dev In order to prevent the verifier from accepting two version of the same pubInput, n and the quantity (n + P), where n + P <= 2^256, we require that all instances are stricly less than P. a
|
||||
* @dev The reason for this is that the assmebly code of the verifier performs all arithmetic operations modulo P and as a consequence can't distinguish between n and n + P.
|
||||
*/
|
||||
uint256 public constant ORDER =
|
||||
uint256(
|
||||
0x30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f0000001
|
||||
);
|
||||
|
||||
uint256 public constant HALF_ORDER = ORDER >> 1;
|
||||
|
||||
uint8 public instanceOffset;
|
||||
|
||||
/**
|
||||
* @dev Initialize the contract with account calls the EZKL model will read from.
|
||||
* @param _contractAddresses - The calls to all the contracts EZKL reads storage from.
|
||||
* @param _callData - The abi encoded function calls to make to the `contractAddress` that EZKL reads storage from.
|
||||
*/
|
||||
constructor(
|
||||
address _contractAddresses,
|
||||
bytes memory _callData,
|
||||
uint256[] memory _decimals,
|
||||
uint[] memory _bits,
|
||||
uint8 _instanceOffset
|
||||
) {
|
||||
require(
|
||||
_bits.length == _decimals.length,
|
||||
"Invalid scalar array lengths"
|
||||
);
|
||||
for (uint i; i < _bits.length; i++) {
|
||||
scalars.push(Scalars(10 ** _decimals[i], 1 << _bits[i]));
|
||||
}
|
||||
contractAddress = _contractAddresses;
|
||||
callData = _callData;
|
||||
instanceOffset = _instanceOffset;
|
||||
}
|
||||
|
||||
function mulDiv(
|
||||
uint256 x,
|
||||
uint256 y,
|
||||
uint256 denominator
|
||||
) public pure returns (uint256 result) {
|
||||
unchecked {
|
||||
uint256 prod0;
|
||||
uint256 prod1;
|
||||
assembly {
|
||||
let mm := mulmod(x, y, not(0))
|
||||
prod0 := mul(x, y)
|
||||
prod1 := sub(sub(mm, prod0), lt(mm, prod0))
|
||||
}
|
||||
|
||||
if (prod1 == 0) {
|
||||
return prod0 / denominator;
|
||||
}
|
||||
|
||||
require(denominator > prod1, "Math: mulDiv overflow");
|
||||
|
||||
uint256 remainder;
|
||||
assembly {
|
||||
remainder := mulmod(x, y, denominator)
|
||||
prod1 := sub(prod1, gt(remainder, prod0))
|
||||
prod0 := sub(prod0, remainder)
|
||||
}
|
||||
|
||||
uint256 twos = denominator & (~denominator + 1);
|
||||
assembly {
|
||||
denominator := div(denominator, twos)
|
||||
prod0 := div(prod0, twos)
|
||||
twos := add(div(sub(0, twos), twos), 1)
|
||||
}
|
||||
|
||||
prod0 |= prod1 * twos;
|
||||
|
||||
uint256 inverse = (3 * denominator) ^ 2;
|
||||
|
||||
inverse *= 2 - denominator * inverse;
|
||||
inverse *= 2 - denominator * inverse;
|
||||
inverse *= 2 - denominator * inverse;
|
||||
inverse *= 2 - denominator * inverse;
|
||||
inverse *= 2 - denominator * inverse;
|
||||
inverse *= 2 - denominator * inverse;
|
||||
|
||||
result = prod0 * inverse;
|
||||
return result;
|
||||
}
|
||||
}
|
||||
/**
|
||||
* @dev Quantize the data returned from the account calls to the scale used by the EZKL model.
|
||||
* @param x - One of the elements of the data returned from the account calls
|
||||
* @param _scalars - The scaling factors for the data returned from the account calls.
|
||||
*
|
||||
*/
|
||||
function quantizeData(
|
||||
int x,
|
||||
Scalars memory _scalars
|
||||
) public pure returns (int256 quantized_data) {
|
||||
if (_scalars.bits == 1 && _scalars.decimals == 1) {
|
||||
return x;
|
||||
}
|
||||
bool neg = x < 0;
|
||||
if (neg) x = -x;
|
||||
uint output = mulDiv(uint256(x), _scalars.bits, _scalars.decimals);
|
||||
if (
|
||||
mulmod(uint256(x), _scalars.bits, _scalars.decimals) * 2 >=
|
||||
_scalars.decimals
|
||||
) {
|
||||
output += 1;
|
||||
}
|
||||
if (output > HALF_ORDER) {
|
||||
revert("Overflow field modulus");
|
||||
}
|
||||
quantized_data = neg ? -int256(output) : int256(output);
|
||||
}
|
||||
/**
|
||||
* @dev Make a static call to the account to fetch the data that EZKL reads from.
|
||||
* @param target - The address of the account to make calls to.
|
||||
* @param data - The abi encoded function calls to make to the `contractAddress` that EZKL reads storage from.
|
||||
* @return The data returned from the account calls. (Must come from either a view or pure function. Will throw an error otherwise)
|
||||
*/
|
||||
function staticCall(
|
||||
address target,
|
||||
bytes memory data
|
||||
) public view returns (bytes memory) {
|
||||
(bool success, bytes memory returndata) = target.staticcall(data);
|
||||
if (success) {
|
||||
if (returndata.length == 0) {
|
||||
require(
|
||||
target.code.length > 0,
|
||||
"Address: call to non-contract"
|
||||
);
|
||||
}
|
||||
return returndata;
|
||||
} else {
|
||||
revert("Address: low-level call failed");
|
||||
}
|
||||
}
|
||||
/**
|
||||
* @dev Convert the fixed point quantized data into a field element.
|
||||
* @param x - The quantized data.
|
||||
* @return field_element - The field element.
|
||||
*/
|
||||
function toFieldElement(
|
||||
int256 x
|
||||
) public pure returns (uint256 field_element) {
|
||||
// The casting down to uint256 is safe because the order is about 2^254, and the value
|
||||
// of x ranges of -2^127 to 2^127, so x + int(ORDER) is always positive.
|
||||
return uint256(x + int(ORDER)) % ORDER;
|
||||
}
|
||||
|
||||
/**
|
||||
* @dev Make the account calls to fetch the data that EZKL reads from and attest to the data.
|
||||
* @param instances - The public instances to the proof (the data in the proof that publicly accessible to the verifier).
|
||||
*/
|
||||
function attestData(uint256[] memory instances) public view {
|
||||
bytes memory returnData = staticCall(contractAddress, callData);
|
||||
int256[] memory x = abi.decode(returnData, (int256[]));
|
||||
int output;
|
||||
uint fieldElement;
|
||||
for (uint i = 0; i < x.length; i++) {
|
||||
output = quantizeData(x[i], scalars[i]);
|
||||
fieldElement = toFieldElement(output);
|
||||
if (fieldElement != instances[i]) {
|
||||
revert("Public input does not match");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @dev Verify the proof with the data attestation.
|
||||
* @param verifier - The address of the verifier contract.
|
||||
* @param encoded - The verifier calldata.
|
||||
*/
|
||||
function verifyWithDataAttestation(
|
||||
address verifier,
|
||||
bytes calldata encoded
|
||||
) public view returns (bool) {
|
||||
require(verifier.code.length > 0, "Address: call to non-contract");
|
||||
attestData(getInstancesCalldata(encoded));
|
||||
require(checkKzgCommits(encoded), "Invalid KZG commitments");
|
||||
// static call the verifier contract to verify the proof
|
||||
(bool success, bytes memory returndata) = verifier.staticcall(encoded);
|
||||
|
||||
if (success) {
|
||||
return abi.decode(returndata, (bool));
|
||||
} else {
|
||||
revert("low-level call to verifier failed");
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,7 +1,7 @@
|
||||
import ezkl
|
||||
|
||||
project = 'ezkl'
|
||||
release = '23.0.2'
|
||||
release = '0.0.0'
|
||||
version = release
|
||||
|
||||
|
||||
|
||||
@@ -1,171 +0,0 @@
|
||||
use ezkl::circuit::einsum::analysis::analyze_einsum_usage;
|
||||
use ezkl::circuit::poly::PolyOp;
|
||||
use ezkl::circuit::*;
|
||||
use ezkl::tensor::*;
|
||||
use halo2_proofs::circuit::floor_planner::V1;
|
||||
use halo2_proofs::dev::MockProver;
|
||||
use halo2_proofs::{
|
||||
arithmetic::Field,
|
||||
circuit::{Layouter, Value},
|
||||
plonk::{Circuit, ConstraintSystem, Error},
|
||||
};
|
||||
use halo2curves::bn256::Fr;
|
||||
use halo2curves::ff::PrimeField;
|
||||
use itertools::Itertools;
|
||||
use rand::rngs::OsRng;
|
||||
use std::collections::HashMap;
|
||||
use std::marker::PhantomData;
|
||||
|
||||
const K: usize = 13;
|
||||
|
||||
#[derive(Clone)]
|
||||
struct MyCircuit<F: PrimeField + TensorType + PartialOrd> {
|
||||
inputs: [ValTensor<F>; 2],
|
||||
einsum: Einsum<F>,
|
||||
}
|
||||
|
||||
#[derive(Clone, Default)]
|
||||
struct Einsum<F: PrimeField + TensorType + PartialOrd> {
|
||||
equation: String,
|
||||
input_axes_to_dims: HashMap<char, usize>,
|
||||
_marker: PhantomData<F>,
|
||||
}
|
||||
|
||||
impl<F: PrimeField + TensorType + PartialOrd> Einsum<F> {
|
||||
pub fn new(equation: &str, inputs: &[&Tensor<Value<F>>]) -> Result<Self, CircuitError> {
|
||||
let mut eq = equation.split("->");
|
||||
let inputs_eq = eq.next().ok_or(CircuitError::InvalidEinsum)?;
|
||||
let inputs_eq = inputs_eq.split(',').collect::<Vec<_>>();
|
||||
|
||||
// Check that the number of inputs matches the number of inputs in the equation
|
||||
if inputs.len() != inputs_eq.len() {
|
||||
return Err(TensorError::DimMismatch("einsum".to_string()).into());
|
||||
}
|
||||
|
||||
let mut input_axes_to_dims = HashMap::new();
|
||||
for (i, input) in inputs.iter().enumerate() {
|
||||
for j in 0..inputs_eq[i].len() {
|
||||
let c = inputs_eq[i]
|
||||
.chars()
|
||||
.nth(j)
|
||||
.ok_or(CircuitError::InvalidEinsum)?;
|
||||
if let std::collections::hash_map::Entry::Vacant(e) = input_axes_to_dims.entry(c) {
|
||||
e.insert(input.dims()[j]);
|
||||
} else if input_axes_to_dims[&c] != input.dims()[j] {
|
||||
return Err(TensorError::DimMismatch("einsum".to_string()).into());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(Self {
|
||||
equation: equation.to_owned(),
|
||||
input_axes_to_dims,
|
||||
_marker: PhantomData,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl Circuit<Fr> for MyCircuit<Fr> {
|
||||
type Config = BaseConfig<Fr>;
|
||||
type FloorPlanner = V1;
|
||||
type Params = Einsum<Fr>;
|
||||
|
||||
fn without_witnesses(&self) -> Self {
|
||||
self.clone()
|
||||
}
|
||||
|
||||
fn configure_with_params(cs: &mut ConstraintSystem<Fr>, params: Self::Params) -> Self::Config {
|
||||
let mut config = Self::Config::default();
|
||||
|
||||
let mut equations = HashMap::new();
|
||||
equations.insert((0, params.equation), params.input_axes_to_dims);
|
||||
let analysis = analyze_einsum_usage(&equations).unwrap();
|
||||
let num_einsum_inner_cols = 1;
|
||||
config
|
||||
.configure_einsums(cs, &analysis, num_einsum_inner_cols, K)
|
||||
.unwrap();
|
||||
|
||||
config
|
||||
}
|
||||
|
||||
fn params(&self) -> Self::Params {
|
||||
Einsum::<Fr>::new(
|
||||
&self.einsum.equation,
|
||||
&[
|
||||
&self.inputs[0].get_inner().unwrap(),
|
||||
&self.inputs[1].get_inner().unwrap(),
|
||||
],
|
||||
)
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
fn configure(_cs: &mut ConstraintSystem<Fr>) -> Self::Config {
|
||||
unimplemented!("call configure_with_params instead")
|
||||
}
|
||||
|
||||
fn synthesize(
|
||||
&self,
|
||||
mut config: Self::Config,
|
||||
mut layouter: impl Layouter<Fr>,
|
||||
) -> Result<(), Error> {
|
||||
let challenges = config
|
||||
.einsums
|
||||
.as_ref()
|
||||
.ok_or(Error::Synthesis)?
|
||||
.challenges()
|
||||
.unwrap()
|
||||
.iter()
|
||||
.map(|c| layouter.get_challenge(*c))
|
||||
.collect_vec();
|
||||
|
||||
layouter.assign_region(
|
||||
|| "",
|
||||
|region| {
|
||||
let mut region = region::RegionCtx::new_with_challenges(
|
||||
region,
|
||||
0,
|
||||
1,
|
||||
1024,
|
||||
2,
|
||||
challenges.clone(),
|
||||
);
|
||||
config
|
||||
.layout(
|
||||
&mut region,
|
||||
&self.inputs.iter().collect_vec(),
|
||||
Box::new(PolyOp::Einsum {
|
||||
equation: self.einsum.equation.clone(),
|
||||
}),
|
||||
)
|
||||
.unwrap();
|
||||
Ok(())
|
||||
},
|
||||
)?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
fn runmatmul() {
|
||||
let len = 64;
|
||||
|
||||
let mut a = Tensor::from((0..len * len).map(|_| Value::known(Fr::random(OsRng))));
|
||||
a.reshape(&[len, len]).unwrap();
|
||||
|
||||
// parameters
|
||||
let mut b = Tensor::from((0..len * len).map(|_| Value::known(Fr::random(OsRng))));
|
||||
b.reshape(&[len, len]).unwrap();
|
||||
|
||||
let einsum = Einsum::<Fr>::new("ij,jk->ik", &[&a, &b]).unwrap();
|
||||
|
||||
let circuit = MyCircuit {
|
||||
inputs: [ValTensor::from(a), ValTensor::from(b)],
|
||||
einsum,
|
||||
};
|
||||
|
||||
let mock_prover = MockProver::run(K as u32, &circuit, vec![]).unwrap();
|
||||
mock_prover.assert_satisfied();
|
||||
}
|
||||
|
||||
pub fn main() {
|
||||
runmatmul()
|
||||
}
|
||||
@@ -1,179 +0,0 @@
|
||||
use ezkl::circuit::einsum::analysis::analyze_einsum_usage;
|
||||
use ezkl::circuit::poly::PolyOp;
|
||||
use ezkl::circuit::*;
|
||||
use ezkl::tensor::*;
|
||||
use halo2_proofs::circuit::floor_planner::V1;
|
||||
use halo2_proofs::dev::MockProver;
|
||||
use halo2_proofs::{
|
||||
arithmetic::Field,
|
||||
circuit::{Layouter, Value},
|
||||
plonk::{Circuit, ConstraintSystem, Error},
|
||||
};
|
||||
use halo2curves::bn256::Fr;
|
||||
use halo2curves::ff::PrimeField;
|
||||
use itertools::Itertools;
|
||||
use rand::rngs::OsRng;
|
||||
use std::collections::HashMap;
|
||||
use std::marker::PhantomData;
|
||||
|
||||
static mut LEN: usize = 4;
|
||||
const K: usize = 11;
|
||||
|
||||
#[derive(Clone)]
|
||||
struct MyCircuit<F: PrimeField + TensorType + PartialOrd> {
|
||||
inputs: [ValTensor<F>; 2],
|
||||
einsum: Einsum<F>,
|
||||
}
|
||||
|
||||
#[derive(Clone, Default)]
|
||||
struct Einsum<F: PrimeField + TensorType + PartialOrd> {
|
||||
equation: String,
|
||||
input_axes_to_dims: HashMap<char, usize>,
|
||||
_marker: PhantomData<F>,
|
||||
}
|
||||
|
||||
impl<F: PrimeField + TensorType + PartialOrd> Einsum<F> {
|
||||
pub fn new(equation: &str, inputs: &[&Tensor<Value<F>>]) -> Result<Self, CircuitError> {
|
||||
let mut eq = equation.split("->");
|
||||
let inputs_eq = eq.next().ok_or(CircuitError::InvalidEinsum)?;
|
||||
let inputs_eq = inputs_eq.split(',').collect::<Vec<_>>();
|
||||
|
||||
// Check that the number of inputs matches the number of inputs in the equation
|
||||
if inputs.len() != inputs_eq.len() {
|
||||
return Err(TensorError::DimMismatch("einsum".to_string()).into());
|
||||
}
|
||||
|
||||
let mut input_axes_to_dims = HashMap::new();
|
||||
for (i, input) in inputs.iter().enumerate() {
|
||||
for j in 0..inputs_eq[i].len() {
|
||||
let c = inputs_eq[i]
|
||||
.chars()
|
||||
.nth(j)
|
||||
.ok_or(CircuitError::InvalidEinsum)?;
|
||||
if let std::collections::hash_map::Entry::Vacant(e) = input_axes_to_dims.entry(c) {
|
||||
e.insert(input.dims()[j]);
|
||||
} else if input_axes_to_dims[&c] != input.dims()[j] {
|
||||
return Err(TensorError::DimMismatch("einsum".to_string()).into());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(Self {
|
||||
equation: equation.to_owned(),
|
||||
input_axes_to_dims,
|
||||
_marker: PhantomData,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl Circuit<Fr> for MyCircuit<Fr> {
|
||||
type Config = BaseConfig<Fr>;
|
||||
type FloorPlanner = V1;
|
||||
type Params = Einsum<Fr>;
|
||||
|
||||
fn without_witnesses(&self) -> Self {
|
||||
self.clone()
|
||||
}
|
||||
|
||||
fn configure_with_params(cs: &mut ConstraintSystem<Fr>, params: Self::Params) -> Self::Config {
|
||||
let len = unsafe { LEN };
|
||||
|
||||
let a = VarTensor::new_advice(cs, K, 1, len);
|
||||
let b = VarTensor::new_advice(cs, K, 1, len);
|
||||
let output = VarTensor::new_advice(cs, K, 1, len);
|
||||
|
||||
let mut config = Self::Config::configure(cs, &[a, b], &output, CheckMode::UNSAFE);
|
||||
|
||||
let mut equations = HashMap::new();
|
||||
equations.insert((0, params.equation), params.input_axes_to_dims);
|
||||
let analysis = analyze_einsum_usage(&equations).unwrap();
|
||||
let num_einsum_inner_cols = 1;
|
||||
config
|
||||
.configure_einsums(cs, &analysis, num_einsum_inner_cols, K)
|
||||
.unwrap();
|
||||
|
||||
config
|
||||
}
|
||||
|
||||
fn params(&self) -> Self::Params {
|
||||
Einsum::<Fr>::new(
|
||||
&self.einsum.equation,
|
||||
&[
|
||||
&self.inputs[0].get_inner().unwrap(),
|
||||
&self.inputs[1].get_inner().unwrap(),
|
||||
],
|
||||
)
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
fn configure(_cs: &mut ConstraintSystem<Fr>) -> Self::Config {
|
||||
unimplemented!("call configure_with_params instead")
|
||||
}
|
||||
|
||||
fn synthesize(
|
||||
&self,
|
||||
mut config: Self::Config,
|
||||
mut layouter: impl Layouter<Fr>,
|
||||
) -> Result<(), Error> {
|
||||
let challenges = config
|
||||
.einsums
|
||||
.as_ref()
|
||||
.ok_or(Error::Synthesis)?
|
||||
.challenges()
|
||||
.unwrap()
|
||||
.iter()
|
||||
.map(|c| layouter.get_challenge(*c))
|
||||
.collect_vec();
|
||||
|
||||
layouter.assign_region(
|
||||
|| "",
|
||||
|region| {
|
||||
let mut region = region::RegionCtx::new_with_challenges(
|
||||
region,
|
||||
0,
|
||||
1,
|
||||
1024,
|
||||
2,
|
||||
challenges.clone(),
|
||||
);
|
||||
config
|
||||
.layout(
|
||||
&mut region,
|
||||
&self.inputs.iter().collect_vec(),
|
||||
Box::new(PolyOp::Einsum {
|
||||
equation: self.einsum.equation.clone(),
|
||||
}),
|
||||
)
|
||||
.unwrap();
|
||||
Ok(())
|
||||
},
|
||||
)?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
fn runbatchmatmul() {
|
||||
let batch_size = 5;
|
||||
let len = 12;
|
||||
|
||||
let mut a = Tensor::from((0..batch_size * len * len).map(|_| Value::known(Fr::random(OsRng))));
|
||||
a.reshape(&[batch_size, len, len]).unwrap();
|
||||
|
||||
// parameters
|
||||
let mut b = Tensor::from((0..batch_size * len * len).map(|_| Value::known(Fr::random(OsRng))));
|
||||
b.reshape(&[batch_size, len, len]).unwrap();
|
||||
|
||||
let einsum = Einsum::<Fr>::new("ijk,ikl->ijl", &[&a, &b]).unwrap();
|
||||
|
||||
let circuit = MyCircuit {
|
||||
inputs: [ValTensor::from(a), ValTensor::from(b)],
|
||||
einsum,
|
||||
};
|
||||
|
||||
let mock_prover = MockProver::run(K as u32, &circuit, vec![]).unwrap();
|
||||
mock_prover.assert_satisfied();
|
||||
}
|
||||
|
||||
pub fn main() {
|
||||
runbatchmatmul()
|
||||
}
|
||||
@@ -32,6 +32,7 @@ use mnist::*;
|
||||
use rand::rngs::OsRng;
|
||||
use std::marker::PhantomData;
|
||||
|
||||
|
||||
mod params;
|
||||
|
||||
const K: usize = 20;
|
||||
@@ -215,7 +216,11 @@ where
|
||||
.layer_config
|
||||
.layout(
|
||||
&mut region,
|
||||
&[&self.input, &self.l0_params[0], &self.l0_params[1]],
|
||||
&[
|
||||
self.input.clone(),
|
||||
self.l0_params[0].clone(),
|
||||
self.l0_params[1].clone(),
|
||||
],
|
||||
Box::new(op),
|
||||
)
|
||||
.unwrap();
|
||||
@@ -224,7 +229,7 @@ where
|
||||
.layer_config
|
||||
.layout(
|
||||
&mut region,
|
||||
&[&x.unwrap()],
|
||||
&[x.unwrap()],
|
||||
Box::new(PolyOp::LeakyReLU {
|
||||
slope: 0.0.into(),
|
||||
scale: 1,
|
||||
@@ -236,7 +241,7 @@ where
|
||||
.layer_config
|
||||
.layout(
|
||||
&mut region,
|
||||
&[&x.unwrap()],
|
||||
&[x.unwrap()],
|
||||
Box::new(LookupOp::Div { denom: 32.0.into() }),
|
||||
)
|
||||
.unwrap()
|
||||
@@ -248,7 +253,7 @@ where
|
||||
.layer_config
|
||||
.layout(
|
||||
&mut region,
|
||||
&[&self.l2_params[0], &x],
|
||||
&[self.l2_params[0].clone(), x],
|
||||
Box::new(PolyOp::Einsum {
|
||||
equation: "ij,j->ik".to_string(),
|
||||
}),
|
||||
@@ -260,7 +265,7 @@ where
|
||||
.layer_config
|
||||
.layout(
|
||||
&mut region,
|
||||
&[&x, &self.l2_params[1]],
|
||||
&[x, self.l2_params[1].clone()],
|
||||
Box::new(PolyOp::Add),
|
||||
)
|
||||
.unwrap()
|
||||
|
||||
@@ -117,7 +117,10 @@ impl<const LEN: usize, const LOOKUP_MIN: IntegerRep, const LOOKUP_MAX: IntegerRe
|
||||
.layer_config
|
||||
.layout(
|
||||
&mut region,
|
||||
&[&self.l0_params[0].clone().try_into().unwrap(), &self.input],
|
||||
&[
|
||||
self.l0_params[0].clone().try_into().unwrap(),
|
||||
self.input.clone(),
|
||||
],
|
||||
Box::new(PolyOp::Einsum {
|
||||
equation: "ab,bc->ac".to_string(),
|
||||
}),
|
||||
@@ -132,7 +135,7 @@ impl<const LEN: usize, const LOOKUP_MIN: IntegerRep, const LOOKUP_MAX: IntegerRe
|
||||
.layer_config
|
||||
.layout(
|
||||
&mut region,
|
||||
&[&x, &self.l0_params[1].clone().try_into().unwrap()],
|
||||
&[x, self.l0_params[1].clone().try_into().unwrap()],
|
||||
Box::new(PolyOp::Add),
|
||||
)
|
||||
.unwrap()
|
||||
@@ -144,7 +147,7 @@ impl<const LEN: usize, const LOOKUP_MIN: IntegerRep, const LOOKUP_MAX: IntegerRe
|
||||
.layer_config
|
||||
.layout(
|
||||
&mut region,
|
||||
&[&x],
|
||||
&[x],
|
||||
Box::new(PolyOp::LeakyReLU {
|
||||
scale: 1,
|
||||
slope: 0.0.into(),
|
||||
@@ -160,7 +163,7 @@ impl<const LEN: usize, const LOOKUP_MIN: IntegerRep, const LOOKUP_MAX: IntegerRe
|
||||
.layer_config
|
||||
.layout(
|
||||
&mut region,
|
||||
&[&self.l2_params[0].clone().try_into().unwrap(), &x],
|
||||
&[self.l2_params[0].clone().try_into().unwrap(), x],
|
||||
Box::new(PolyOp::Einsum {
|
||||
equation: "ab,bc->ac".to_string(),
|
||||
}),
|
||||
@@ -175,7 +178,7 @@ impl<const LEN: usize, const LOOKUP_MIN: IntegerRep, const LOOKUP_MAX: IntegerRe
|
||||
.layer_config
|
||||
.layout(
|
||||
&mut region,
|
||||
&[&x, &self.l2_params[1].clone().try_into().unwrap()],
|
||||
&[x, self.l2_params[1].clone().try_into().unwrap()],
|
||||
Box::new(PolyOp::Add),
|
||||
)
|
||||
.unwrap()
|
||||
@@ -187,7 +190,7 @@ impl<const LEN: usize, const LOOKUP_MIN: IntegerRep, const LOOKUP_MAX: IntegerRe
|
||||
.layer_config
|
||||
.layout(
|
||||
&mut region,
|
||||
&[&x],
|
||||
&[x],
|
||||
Box::new(PolyOp::LeakyReLU {
|
||||
scale: 1,
|
||||
slope: 0.0.into(),
|
||||
@@ -200,7 +203,7 @@ impl<const LEN: usize, const LOOKUP_MIN: IntegerRep, const LOOKUP_MAX: IntegerRe
|
||||
.layer_config
|
||||
.layout(
|
||||
&mut region,
|
||||
&[&x.unwrap()],
|
||||
&[x.unwrap()],
|
||||
Box::new(LookupOp::Div {
|
||||
denom: ezkl::circuit::utils::F32::from(128.),
|
||||
}),
|
||||
|
||||
@@ -866,7 +866,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"execution_count": 98,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
@@ -879,7 +879,6 @@
|
||||
"run_args.input_visibility = \"private\"\n",
|
||||
"run_args.param_visibility = \"fixed\"\n",
|
||||
"run_args.output_visibility = \"public\"\n",
|
||||
"run_args.disable_freivalds = True\n",
|
||||
"run_args.variables = [(\"batch_size\", 1)]\n",
|
||||
"\n",
|
||||
"\n",
|
||||
@@ -905,7 +904,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"\n",
|
||||
"res = ezkl.calibrate_settings(\"input.json\", target=\"resources\", scales = [4])\n",
|
||||
"res = await ezkl.calibrate_settings(\"input.json\", target=\"resources\", scales = [4])\n",
|
||||
"assert res == True\n",
|
||||
"print(\"verified\")\n"
|
||||
]
|
||||
@@ -955,7 +954,7 @@
|
||||
"source": [
|
||||
"\n",
|
||||
"\n",
|
||||
"res = ezkl.gen_witness()\n"
|
||||
"res = await ezkl.gen_witness()\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -150,7 +150,7 @@
|
||||
"res = ezkl.gen_settings(model_path, settings_path)\n",
|
||||
"assert res == True\n",
|
||||
"\n",
|
||||
"res = ezkl.calibrate_settings(data_path, model_path, settings_path, \"resources\")\n",
|
||||
"res = await ezkl.calibrate_settings(data_path, model_path, settings_path, \"resources\")\n",
|
||||
"assert res == True"
|
||||
]
|
||||
},
|
||||
@@ -170,7 +170,7 @@
|
||||
"with open(cal_path, \"w\") as f:\n",
|
||||
" json.dump(cal_data, f)\n",
|
||||
"\n",
|
||||
"res = ezkl.calibrate_settings(cal_path, model_path, settings_path, \"resources\")"
|
||||
"res = await ezkl.calibrate_settings(cal_path, model_path, settings_path, \"resources\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -204,7 +204,7 @@
|
||||
"source": [
|
||||
"# now generate the witness file \n",
|
||||
"\n",
|
||||
"res = ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"res = await ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"assert os.path.isfile(witness_path)"
|
||||
]
|
||||
},
|
||||
@@ -253,6 +253,8 @@
|
||||
" compiled_model_path,\n",
|
||||
" pk_path,\n",
|
||||
" proof_path,\n",
|
||||
" \n",
|
||||
" \"single\",\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
"print(res)\n",
|
||||
@@ -301,4 +303,4 @@
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
}
|
||||
@@ -437,7 +437,7 @@
|
||||
"\n",
|
||||
"# Optimize for resources, we cap logrows at 12 to reduce setup and proving time, at the expense of accuracy\n",
|
||||
"# You may want to increase the max logrows if accuracy is a concern\n",
|
||||
"res = ezkl.calibrate_settings(target = \"resources\", max_logrows = 12, scales = [2])"
|
||||
"res = await ezkl.calibrate_settings(target = \"resources\", max_logrows = 12, scales = [2])"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -526,7 +526,7 @@
|
||||
"# now generate the witness file\n",
|
||||
"witness_path = os.path.join('witness.json')\n",
|
||||
"\n",
|
||||
"res = ezkl.gen_witness()\n",
|
||||
"res = await ezkl.gen_witness()\n",
|
||||
"assert os.path.isfile(witness_path)"
|
||||
]
|
||||
},
|
||||
@@ -546,7 +546,7 @@
|
||||
"\n",
|
||||
"proof_path = os.path.join('proof.json')\n",
|
||||
"\n",
|
||||
"proof = ezkl.prove(proof_path=proof_path)\n",
|
||||
"proof = ezkl.prove(proof_type=\"single\", proof_path=proof_path)\n",
|
||||
"\n",
|
||||
"print(proof)\n",
|
||||
"assert os.path.isfile(proof_path)"
|
||||
|
||||
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
@@ -105,7 +105,7 @@
|
||||
"\n",
|
||||
"class GCNConv(Module):\n",
|
||||
" def __init__(self, in_channels, out_channels):\n",
|
||||
" super(GCNConv, self).__init__() \n",
|
||||
" super(GCNConv, self).__init__() # \"Add\" aggregation.\n",
|
||||
" self.lin = torch.nn.Linear(in_channels, out_channels)\n",
|
||||
"\n",
|
||||
" self.reset_parameters()\n",
|
||||
@@ -467,7 +467,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"\n",
|
||||
"res = ezkl.calibrate_settings(data_path, model_path, settings_path, \"resources\")\n",
|
||||
"res = await ezkl.calibrate_settings(data_path, model_path, settings_path, \"resources\")\n",
|
||||
"assert res == True"
|
||||
]
|
||||
},
|
||||
@@ -508,7 +508,7 @@
|
||||
"source": [
|
||||
"# now generate the witness file\n",
|
||||
"\n",
|
||||
"res = ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"res = await ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"assert os.path.isfile(witness_path)"
|
||||
]
|
||||
},
|
||||
@@ -563,6 +563,7 @@
|
||||
" compiled_model_path,\n",
|
||||
" pk_path,\n",
|
||||
" proof_path,\n",
|
||||
" \"single\",\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
"print(res)\n",
|
||||
@@ -624,4 +625,4 @@
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
}
|
||||
@@ -196,7 +196,7 @@
|
||||
"json.dump(data, open(cal_path, 'w'))\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"res = ezkl.calibrate_settings(data_path, model_path, settings_path, \"resources\")\n",
|
||||
"res = await ezkl.calibrate_settings(data_path, model_path, settings_path, \"resources\")\n",
|
||||
"assert res == True\n"
|
||||
]
|
||||
},
|
||||
@@ -237,7 +237,7 @@
|
||||
"source": [
|
||||
"# now generate the witness file\n",
|
||||
"\n",
|
||||
"res = ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"res = await ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"assert os.path.isfile(witness_path)"
|
||||
]
|
||||
},
|
||||
@@ -286,6 +286,8 @@
|
||||
" compiled_model_path,\n",
|
||||
" pk_path,\n",
|
||||
" proof_path,\n",
|
||||
" \n",
|
||||
" \"single\",\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
"print(res)\n",
|
||||
|
||||
@@ -179,7 +179,7 @@
|
||||
"json.dump(data, open(cal_path, 'w'))\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"res = ezkl.calibrate_settings(data_path, model_path, settings_path, \"resources\")\n",
|
||||
"res = await ezkl.calibrate_settings(data_path, model_path, settings_path, \"resources\")\n",
|
||||
"assert res == True\n"
|
||||
]
|
||||
},
|
||||
@@ -214,7 +214,7 @@
|
||||
"source": [
|
||||
"# now generate the witness file \n",
|
||||
"\n",
|
||||
"res = ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"res = await ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"assert os.path.isfile(witness_path)"
|
||||
]
|
||||
},
|
||||
@@ -248,7 +248,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"execution_count": 10,
|
||||
"id": "c384cbc8",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -263,6 +263,8 @@
|
||||
" compiled_model_path,\n",
|
||||
" pk_path,\n",
|
||||
" proof_path,\n",
|
||||
" \n",
|
||||
" \"single\",\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
"print(res)\n",
|
||||
@@ -311,4 +313,4 @@
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
}
|
||||
@@ -241,7 +241,7 @@
|
||||
"with open(cal_path, \"w\") as f:\n",
|
||||
" json.dump(cal_data, f)\n",
|
||||
"\n",
|
||||
"res = ezkl.calibrate_settings(cal_path, model_path, settings_path, \"resources\")"
|
||||
"res = await ezkl.calibrate_settings(cal_path, model_path, settings_path, \"resources\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -291,7 +291,7 @@
|
||||
"\n",
|
||||
"witness_path = \"witness.json\"\n",
|
||||
"\n",
|
||||
"res = ezkl.gen_witness(data_path, compiled_model_path, witness_path)"
|
||||
"res = await ezkl.gen_witness(data_path, compiled_model_path, witness_path)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -368,7 +368,7 @@
|
||||
" pk_path,\n",
|
||||
" proof_path,\n",
|
||||
" \n",
|
||||
" ",
|
||||
" \"single\",\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
"print(res)\n",
|
||||
@@ -510,4 +510,4 @@
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
}
|
||||
|
||||
@@ -152,7 +152,7 @@
|
||||
"json.dump(data, open(cal_path, 'w'))\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"res = ezkl.calibrate_settings(data_path, model_path, settings_path, \"resources\")\n",
|
||||
"res = await ezkl.calibrate_settings(data_path, model_path, settings_path, \"resources\")\n",
|
||||
"assert res == True\n"
|
||||
]
|
||||
},
|
||||
@@ -188,7 +188,7 @@
|
||||
"# now generate the witness file \n",
|
||||
"witness_path = \"witness.json\"\n",
|
||||
"\n",
|
||||
"res = ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"res = await ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"assert os.path.isfile(witness_path)"
|
||||
]
|
||||
},
|
||||
@@ -236,7 +236,7 @@
|
||||
" pk_path,\n",
|
||||
" proof_path,\n",
|
||||
" \n",
|
||||
" ",
|
||||
" \"single\",\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
"print(res)\n",
|
||||
|
||||
@@ -155,7 +155,7 @@
|
||||
"json.dump(data, open(cal_path, 'w'))\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"res = ezkl.calibrate_settings(data_path, model_path, settings_path, \"resources\")\n",
|
||||
"res = await ezkl.calibrate_settings(data_path, model_path, settings_path, \"resources\")\n",
|
||||
"assert res == True\n"
|
||||
]
|
||||
},
|
||||
@@ -190,7 +190,7 @@
|
||||
"source": [
|
||||
"# now generate the witness file \n",
|
||||
"\n",
|
||||
"res = ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"res = await ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"assert os.path.isfile(witness_path)"
|
||||
]
|
||||
},
|
||||
@@ -240,7 +240,7 @@
|
||||
" pk_path,\n",
|
||||
" proof_path,\n",
|
||||
" \n",
|
||||
" ",
|
||||
" \"single\",\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
"print(res)\n",
|
||||
|
||||
@@ -233,7 +233,7 @@
|
||||
"with open(cal_path, \"w\") as f:\n",
|
||||
" json.dump(cal_data, f)\n",
|
||||
"\n",
|
||||
"res = ezkl.calibrate_settings(cal_path, model_path, settings_path, \"resources\")"
|
||||
"res = await ezkl.calibrate_settings(cal_path, model_path, settings_path, \"resources\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -315,7 +315,7 @@
|
||||
"\n",
|
||||
"witness_path = \"witness.json\"\n",
|
||||
"\n",
|
||||
"res = ezkl.gen_witness(data_path, compiled_model_path, witness_path, vk_path)\n"
|
||||
"res = await ezkl.gen_witness(data_path, compiled_model_path, witness_path, vk_path)\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -358,7 +358,7 @@
|
||||
" pk_path,\n",
|
||||
" proof_path,\n",
|
||||
" \n",
|
||||
" ",
|
||||
" \"single\",\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
"print(res)\n",
|
||||
@@ -512,4 +512,4 @@
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
}
|
||||
|
||||
@@ -193,7 +193,7 @@
|
||||
"json.dump(data, open(cal_path, 'w'))\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"res = ezkl.calibrate_settings(data_path, model_path, settings_path, \"resources\")\n",
|
||||
"res = await ezkl.calibrate_settings(data_path, model_path, settings_path, \"resources\")\n",
|
||||
"assert res == True\n"
|
||||
]
|
||||
},
|
||||
@@ -228,7 +228,7 @@
|
||||
"source": [
|
||||
"# now generate the witness file \n",
|
||||
"\n",
|
||||
"res = ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"res = await ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"assert os.path.isfile(witness_path)"
|
||||
]
|
||||
},
|
||||
@@ -278,7 +278,7 @@
|
||||
" pk_path,\n",
|
||||
" proof_path,\n",
|
||||
" \n",
|
||||
" ",
|
||||
" \"single\",\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
"print(res)\n",
|
||||
|
||||
@@ -1,284 +1,284 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "cf69bb3f-94e6-4dba-92cd-ce08df117d67",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Linear Regression\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"Sklearn based models are slightly finicky to get into a suitable onnx format. \n",
|
||||
"This notebook showcases how to do so using the `hummingbird-ml` python package ! "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "95613ee9",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# check if notebook is in colab\n",
|
||||
"try:\n",
|
||||
" # install ezkl\n",
|
||||
" import google.colab\n",
|
||||
" import subprocess\n",
|
||||
" import sys\n",
|
||||
" subprocess.check_call([sys.executable, \"-m\", \"pip\", \"install\", \"ezkl\"])\n",
|
||||
" subprocess.check_call([sys.executable, \"-m\", \"pip\", \"install\", \"onnx\"])\n",
|
||||
" subprocess.check_call([sys.executable, \"-m\", \"pip\", \"install\", \"hummingbird-ml\"])\n",
|
||||
"\n",
|
||||
"# rely on local installation of ezkl if the notebook is not in colab\n",
|
||||
"except:\n",
|
||||
" pass\n",
|
||||
"\n",
|
||||
"import os\n",
|
||||
"import torch\n",
|
||||
"import ezkl\n",
|
||||
"import json\n",
|
||||
"from hummingbird.ml import convert\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# here we create and (potentially train a model)\n",
|
||||
"\n",
|
||||
"# make sure you have the dependencies required here already installed\n",
|
||||
"import numpy as np\n",
|
||||
"from sklearn.linear_model import LinearRegression\n",
|
||||
"X = np.array([[1, 1], [1, 2], [2, 2], [2, 3]])\n",
|
||||
"# y = 1 * x_0 + 2 * x_1 + 3\n",
|
||||
"y = np.dot(X, np.array([1, 2])) + 3\n",
|
||||
"reg = LinearRegression().fit(X, y)\n",
|
||||
"reg.score(X, y)\n",
|
||||
"\n",
|
||||
"circuit = convert(reg, \"torch\", X[:1]).model\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "b37637c4",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"model_path = os.path.join('network.onnx')\n",
|
||||
"compiled_model_path = os.path.join('network.compiled')\n",
|
||||
"pk_path = os.path.join('test.pk')\n",
|
||||
"vk_path = os.path.join('test.vk')\n",
|
||||
"settings_path = os.path.join('settings.json')\n",
|
||||
"\n",
|
||||
"witness_path = os.path.join('witness.json')\n",
|
||||
"data_path = os.path.join('input.json')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "82db373a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"\n",
|
||||
"\n",
|
||||
"# export to onnx format\n",
|
||||
"# !!!!!!!!!!!!!!!!! This will flash a warning but it is fine !!!!!!!!!!!!!!!!!!!!!\n",
|
||||
"\n",
|
||||
"# Input to the model\n",
|
||||
"shape = X.shape[1:]\n",
|
||||
"x = torch.rand(1, *shape, requires_grad=True)\n",
|
||||
"torch_out = circuit(x)\n",
|
||||
"# Export the model\n",
|
||||
"torch.onnx.export(circuit, # model being run\n",
|
||||
" # model input (or a tuple for multiple inputs)\n",
|
||||
" x,\n",
|
||||
" # where to save the model (can be a file or file-like object)\n",
|
||||
" \"network.onnx\",\n",
|
||||
" export_params=True, # store the trained parameter weights inside the model file\n",
|
||||
" opset_version=10, # the ONNX version to export the model to\n",
|
||||
" do_constant_folding=True, # whether to execute constant folding for optimization\n",
|
||||
" input_names=['input'], # the model's input names\n",
|
||||
" output_names=['output'], # the model's output names\n",
|
||||
" dynamic_axes={'input': {0: 'batch_size'}, # variable length axes\n",
|
||||
" 'output': {0: 'batch_size'}})\n",
|
||||
"\n",
|
||||
"d = ((x).detach().numpy()).reshape([-1]).tolist()\n",
|
||||
"\n",
|
||||
"data = dict(input_shapes=[shape],\n",
|
||||
" input_data=[d],\n",
|
||||
" output_data=[((o).detach().numpy()).reshape([-1]).tolist() for o in torch_out])\n",
|
||||
"\n",
|
||||
"# Serialize data into file:\n",
|
||||
"json.dump(data, open(\"input.json\", 'w'))\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# note that you can also call the following function to generate random data for the model\n",
|
||||
"# it is functionally equivalent to the code above\n",
|
||||
"ezkl.gen_random_data()\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "d5e374a2",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!RUST_LOG=trace\n",
|
||||
"# TODO: Dictionary outputs\n",
|
||||
"res = ezkl.gen_settings(model_path, settings_path)\n",
|
||||
"assert res == True\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"cal_path = os.path.join(\"calibration.json\")\n",
|
||||
"\n",
|
||||
"data_array = (torch.randn(20, *shape).detach().numpy()).reshape([-1]).tolist()\n",
|
||||
"\n",
|
||||
"data = dict(input_data = [data_array])\n",
|
||||
"\n",
|
||||
"# Serialize data into file:\n",
|
||||
"json.dump(data, open(cal_path, 'w'))\n",
|
||||
"\n",
|
||||
"res = ezkl.calibrate_settings(data_path, model_path, settings_path, \"resources\")\n",
|
||||
"assert res == True\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "3aa4f090",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"res = ezkl.compile_circuit(model_path, compiled_model_path, settings_path)\n",
|
||||
"assert res == True"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "8b74dcee",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# srs path\n",
|
||||
"res = await ezkl.get_srs( settings_path)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "18c8b7c7",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# now generate the witness file \n",
|
||||
"\n",
|
||||
"res = ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"assert os.path.isfile(witness_path)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "b1c561a8",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"\n",
|
||||
"# HERE WE SETUP THE CIRCUIT PARAMS\n",
|
||||
"# WE GOT KEYS\n",
|
||||
"# WE GOT CIRCUIT PARAMETERS\n",
|
||||
"# EVERYTHING ANYONE HAS EVER NEEDED FOR ZK\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"res = ezkl.setup(\n",
|
||||
" compiled_model_path,\n",
|
||||
" vk_path,\n",
|
||||
" pk_path,\n",
|
||||
" \n",
|
||||
" )\n",
|
||||
"\n",
|
||||
"assert res == True\n",
|
||||
"assert os.path.isfile(vk_path)\n",
|
||||
"assert os.path.isfile(pk_path)\n",
|
||||
"assert os.path.isfile(settings_path)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "c384cbc8",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# GENERATE A PROOF\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"proof_path = os.path.join('test.pf')\n",
|
||||
"\n",
|
||||
"res = ezkl.prove(\n",
|
||||
" witness_path,\n",
|
||||
" compiled_model_path,\n",
|
||||
" pk_path,\n",
|
||||
" proof_path,\n",
|
||||
" \n",
|
||||
" ",
|
||||
" )\n",
|
||||
"\n",
|
||||
"print(res)\n",
|
||||
"assert os.path.isfile(proof_path)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "76f00d41",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# VERIFY IT\n",
|
||||
"\n",
|
||||
"res = ezkl.verify(\n",
|
||||
" proof_path,\n",
|
||||
" settings_path,\n",
|
||||
" vk_path,\n",
|
||||
" \n",
|
||||
" )\n",
|
||||
"\n",
|
||||
"assert res == True\n",
|
||||
"print(\"verified\")"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.15"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "cf69bb3f-94e6-4dba-92cd-ce08df117d67",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Linear Regression\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"Sklearn based models are slightly finicky to get into a suitable onnx format. \n",
|
||||
"This notebook showcases how to do so using the `hummingbird-ml` python package ! "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "95613ee9",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# check if notebook is in colab\n",
|
||||
"try:\n",
|
||||
" # install ezkl\n",
|
||||
" import google.colab\n",
|
||||
" import subprocess\n",
|
||||
" import sys\n",
|
||||
" subprocess.check_call([sys.executable, \"-m\", \"pip\", \"install\", \"ezkl\"])\n",
|
||||
" subprocess.check_call([sys.executable, \"-m\", \"pip\", \"install\", \"onnx\"])\n",
|
||||
" subprocess.check_call([sys.executable, \"-m\", \"pip\", \"install\", \"hummingbird-ml\"])\n",
|
||||
"\n",
|
||||
"# rely on local installation of ezkl if the notebook is not in colab\n",
|
||||
"except:\n",
|
||||
" pass\n",
|
||||
"\n",
|
||||
"import os\n",
|
||||
"import torch\n",
|
||||
"import ezkl\n",
|
||||
"import json\n",
|
||||
"from hummingbird.ml import convert\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# here we create and (potentially train a model)\n",
|
||||
"\n",
|
||||
"# make sure you have the dependencies required here already installed\n",
|
||||
"import numpy as np\n",
|
||||
"from sklearn.linear_model import LinearRegression\n",
|
||||
"X = np.array([[1, 1], [1, 2], [2, 2], [2, 3]])\n",
|
||||
"# y = 1 * x_0 + 2 * x_1 + 3\n",
|
||||
"y = np.dot(X, np.array([1, 2])) + 3\n",
|
||||
"reg = LinearRegression().fit(X, y)\n",
|
||||
"reg.score(X, y)\n",
|
||||
"\n",
|
||||
"circuit = convert(reg, \"torch\", X[:1]).model\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "b37637c4",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"model_path = os.path.join('network.onnx')\n",
|
||||
"compiled_model_path = os.path.join('network.compiled')\n",
|
||||
"pk_path = os.path.join('test.pk')\n",
|
||||
"vk_path = os.path.join('test.vk')\n",
|
||||
"settings_path = os.path.join('settings.json')\n",
|
||||
"\n",
|
||||
"witness_path = os.path.join('witness.json')\n",
|
||||
"data_path = os.path.join('input.json')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "82db373a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"\n",
|
||||
"\n",
|
||||
"# export to onnx format\n",
|
||||
"# !!!!!!!!!!!!!!!!! This will flash a warning but it is fine !!!!!!!!!!!!!!!!!!!!!\n",
|
||||
"\n",
|
||||
"# Input to the model\n",
|
||||
"shape = X.shape[1:]\n",
|
||||
"x = torch.rand(1, *shape, requires_grad=True)\n",
|
||||
"torch_out = circuit(x)\n",
|
||||
"# Export the model\n",
|
||||
"torch.onnx.export(circuit, # model being run\n",
|
||||
" # model input (or a tuple for multiple inputs)\n",
|
||||
" x,\n",
|
||||
" # where to save the model (can be a file or file-like object)\n",
|
||||
" \"network.onnx\",\n",
|
||||
" export_params=True, # store the trained parameter weights inside the model file\n",
|
||||
" opset_version=10, # the ONNX version to export the model to\n",
|
||||
" do_constant_folding=True, # whether to execute constant folding for optimization\n",
|
||||
" input_names=['input'], # the model's input names\n",
|
||||
" output_names=['output'], # the model's output names\n",
|
||||
" dynamic_axes={'input': {0: 'batch_size'}, # variable length axes\n",
|
||||
" 'output': {0: 'batch_size'}})\n",
|
||||
"\n",
|
||||
"d = ((x).detach().numpy()).reshape([-1]).tolist()\n",
|
||||
"\n",
|
||||
"data = dict(input_shapes=[shape],\n",
|
||||
" input_data=[d],\n",
|
||||
" output_data=[((o).detach().numpy()).reshape([-1]).tolist() for o in torch_out])\n",
|
||||
"\n",
|
||||
"# Serialize data into file:\n",
|
||||
"json.dump(data, open(\"input.json\", 'w'))\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# note that you can also call the following function to generate random data for the model\n",
|
||||
"# it is functionally equivalent to the code above\n",
|
||||
"ezkl.gen_random_data()\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "d5e374a2",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!RUST_LOG=trace\n",
|
||||
"# TODO: Dictionary outputs\n",
|
||||
"res = ezkl.gen_settings(model_path, settings_path)\n",
|
||||
"assert res == True\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"cal_path = os.path.join(\"calibration.json\")\n",
|
||||
"\n",
|
||||
"data_array = (torch.randn(20, *shape).detach().numpy()).reshape([-1]).tolist()\n",
|
||||
"\n",
|
||||
"data = dict(input_data = [data_array])\n",
|
||||
"\n",
|
||||
"# Serialize data into file:\n",
|
||||
"json.dump(data, open(cal_path, 'w'))\n",
|
||||
"\n",
|
||||
"res = await ezkl.calibrate_settings(data_path, model_path, settings_path, \"resources\")\n",
|
||||
"assert res == True\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "3aa4f090",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"res = ezkl.compile_circuit(model_path, compiled_model_path, settings_path)\n",
|
||||
"assert res == True"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "8b74dcee",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# srs path\n",
|
||||
"res = await ezkl.get_srs( settings_path)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "18c8b7c7",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# now generate the witness file \n",
|
||||
"\n",
|
||||
"res = await ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"assert os.path.isfile(witness_path)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "b1c561a8",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"\n",
|
||||
"# HERE WE SETUP THE CIRCUIT PARAMS\n",
|
||||
"# WE GOT KEYS\n",
|
||||
"# WE GOT CIRCUIT PARAMETERS\n",
|
||||
"# EVERYTHING ANYONE HAS EVER NEEDED FOR ZK\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"res = ezkl.setup(\n",
|
||||
" compiled_model_path,\n",
|
||||
" vk_path,\n",
|
||||
" pk_path,\n",
|
||||
" \n",
|
||||
" )\n",
|
||||
"\n",
|
||||
"assert res == True\n",
|
||||
"assert os.path.isfile(vk_path)\n",
|
||||
"assert os.path.isfile(pk_path)\n",
|
||||
"assert os.path.isfile(settings_path)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "c384cbc8",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# GENERATE A PROOF\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"proof_path = os.path.join('test.pf')\n",
|
||||
"\n",
|
||||
"res = ezkl.prove(\n",
|
||||
" witness_path,\n",
|
||||
" compiled_model_path,\n",
|
||||
" pk_path,\n",
|
||||
" proof_path,\n",
|
||||
" \n",
|
||||
" \"single\",\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
"print(res)\n",
|
||||
"assert os.path.isfile(proof_path)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "76f00d41",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# VERIFY IT\n",
|
||||
"\n",
|
||||
"res = ezkl.verify(\n",
|
||||
" proof_path,\n",
|
||||
" settings_path,\n",
|
||||
" vk_path,\n",
|
||||
" \n",
|
||||
" )\n",
|
||||
"\n",
|
||||
"assert res == True\n",
|
||||
"print(\"verified\")"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.15"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
|
||||
@@ -347,7 +347,7 @@
|
||||
"# Serialize data into file:\n",
|
||||
"json.dump(data, open(cal_path, 'w'))\n",
|
||||
"\n",
|
||||
"res = ezkl.calibrate_settings(data_path, model_path, settings_path, \"resources\")\n",
|
||||
"res = await ezkl.calibrate_settings(data_path, model_path, settings_path, \"resources\")\n",
|
||||
"assert res == True\n"
|
||||
]
|
||||
},
|
||||
@@ -383,7 +383,7 @@
|
||||
"# now generate the witness file \n",
|
||||
"witness_path = \"gan_witness.json\"\n",
|
||||
"\n",
|
||||
"res = ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"res = await ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"assert os.path.isfile(witness_path)"
|
||||
]
|
||||
},
|
||||
@@ -442,7 +442,7 @@
|
||||
" pk_path,\n",
|
||||
" proof_path,\n",
|
||||
" \n",
|
||||
" ",
|
||||
" \"single\",\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
"print(res)\n",
|
||||
|
||||
@@ -142,7 +142,7 @@
|
||||
"# Serialize data into file:\n",
|
||||
"json.dump(data, open(cal_path, 'w'))\n",
|
||||
"\n",
|
||||
"res = ezkl.calibrate_settings(data_path, model_path, settings_path, \"resources\")\n",
|
||||
"res = await ezkl.calibrate_settings(data_path, model_path, settings_path, \"resources\")\n",
|
||||
"assert res == True\n"
|
||||
]
|
||||
},
|
||||
@@ -177,7 +177,7 @@
|
||||
"source": [
|
||||
"# now generate the witness file \n",
|
||||
"\n",
|
||||
"res = ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"res = await ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"assert os.path.isfile(witness_path)"
|
||||
]
|
||||
},
|
||||
@@ -227,7 +227,7 @@
|
||||
" pk_path,\n",
|
||||
" proof_path,\n",
|
||||
" \n",
|
||||
" ",
|
||||
" \"single\",\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
"print(res)\n",
|
||||
@@ -276,4 +276,4 @@
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
}
|
||||
|
||||
@@ -139,7 +139,7 @@
|
||||
"res = ezkl.gen_settings(model_path, settings_path, py_run_args=run_args)\n",
|
||||
"assert res == True\n",
|
||||
"\n",
|
||||
"res = ezkl.calibrate_settings(data_path, model_path, settings_path, \"resources\")\n",
|
||||
"res = await ezkl.calibrate_settings(data_path, model_path, settings_path, \"resources\")\n",
|
||||
"assert res == True\n"
|
||||
]
|
||||
},
|
||||
@@ -193,7 +193,7 @@
|
||||
"# now generate the witness file \n",
|
||||
"witness_path = \"lstmwitness.json\"\n",
|
||||
"\n",
|
||||
"res = ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"res = await ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"assert os.path.isfile(witness_path)"
|
||||
]
|
||||
},
|
||||
@@ -252,7 +252,7 @@
|
||||
" pk_path,\n",
|
||||
" proof_path,\n",
|
||||
" \n",
|
||||
" ",
|
||||
" \"single\",\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
"print(res)\n",
|
||||
|
||||
@@ -323,7 +323,7 @@
|
||||
"res = ezkl.gen_settings(model_path, settings_path, py_run_args=run_args)\n",
|
||||
"assert res == True\n",
|
||||
"\n",
|
||||
"res = ezkl.calibrate_settings(cal_path, model_path, settings_path, \"resources\", scales=[2,7])\n",
|
||||
"res = await ezkl.calibrate_settings(cal_path, model_path, settings_path, \"resources\", scales=[2,7])\n",
|
||||
"assert res == True"
|
||||
]
|
||||
},
|
||||
@@ -362,7 +362,7 @@
|
||||
"# now generate the witness file\n",
|
||||
"witness_path = \"witness.json\"\n",
|
||||
"\n",
|
||||
"res = ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"res = await ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"assert os.path.isfile(witness_path)"
|
||||
]
|
||||
},
|
||||
@@ -422,7 +422,7 @@
|
||||
" compiled_model_path,\n",
|
||||
" pk_path,\n",
|
||||
" proof_path,\n",
|
||||
" ",
|
||||
" \"single\",\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
"print(res)\n",
|
||||
@@ -558,4 +558,4 @@
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 0
|
||||
}
|
||||
}
|
||||
|
||||
@@ -289,7 +289,7 @@
|
||||
"json.dump(data, open(cal_path, 'w'))\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"ezkl.calibrate_settings(cal_path, model_path, settings_path, \"resources\", scales=[0,6])"
|
||||
"await ezkl.calibrate_settings(cal_path, model_path, settings_path, \"resources\", scales=[0,6])"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -321,7 +321,7 @@
|
||||
"# now generate the witness file \n",
|
||||
"witness_path = \"gan_witness.json\"\n",
|
||||
"\n",
|
||||
"res = ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"res = await ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"assert os.path.isfile(witness_path)"
|
||||
]
|
||||
},
|
||||
@@ -378,7 +378,7 @@
|
||||
" pk_path,\n",
|
||||
" proof_path,\n",
|
||||
" \n",
|
||||
" ",
|
||||
" \"single\",\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
"print(res)\n",
|
||||
@@ -425,4 +425,4 @@
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
}
|
||||
|
||||
@@ -301,7 +301,7 @@
|
||||
"run_args.param_scale = 0\n",
|
||||
"run_args.logrows = 18\n",
|
||||
"\n",
|
||||
"ezkl.get_srs(logrows=run_args.logrows, )\n"
|
||||
"ezkl.get_srs(logrows=run_args.logrows, commitment=ezkl.PyCommitments.KZG)\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -341,7 +341,7 @@
|
||||
"\n",
|
||||
" # generate settings for the current model\n",
|
||||
" res = ezkl.gen_settings(model_path, settings_path, py_run_args=run_args)\n",
|
||||
" res = ezkl.calibrate_settings(data_path, model_path, settings_path, \"resources\", scales=[run_args.input_scale], max_logrows=run_args.logrows)\n",
|
||||
" res = await ezkl.calibrate_settings(data_path, model_path, settings_path, \"resources\", scales=[run_args.input_scale], max_logrows=run_args.logrows)\n",
|
||||
" assert res == True\n",
|
||||
"\n",
|
||||
" # load settings and print them to the console\n",
|
||||
@@ -361,7 +361,7 @@
|
||||
" assert res == True\n",
|
||||
" assert os.path.isfile(vk_path)\n",
|
||||
" assert os.path.isfile(pk_path)\n",
|
||||
" res = ezkl.gen_witness(data_path, compiled_model_path, witness_path, vk_path)\n",
|
||||
" res = await ezkl.gen_witness(data_path, compiled_model_path, witness_path, vk_path)\n",
|
||||
" run_args.input_scale = settings[\"model_output_scales\"][0]\n",
|
||||
"\n",
|
||||
"for i in range(3):\n",
|
||||
@@ -399,6 +399,7 @@
|
||||
" compiled_model_path,\n",
|
||||
" pk_path,\n",
|
||||
" proof_path,\n",
|
||||
" \"for-aggr\",\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
" print(res)\n",
|
||||
@@ -437,6 +438,28 @@
|
||||
" print(\"----- proving split \"+str(i))\n",
|
||||
" prove_model(i)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"You can also mock aggregate the split proofs into a single proof. This is useful if you want to verify the proof on chain at a lower cost. Here we mock aggregate the proofs to save time. You can use other notebooks to see how to aggregate in full ! "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# now mock aggregate the proofs\n",
|
||||
"# proofs = []\n",
|
||||
"# for i in range(3):\n",
|
||||
"# proof_path = os.path.join('proof_split_'+str(i)+'.json')\n",
|
||||
"# proofs.append(proof_path)\n",
|
||||
"\n",
|
||||
"# ezkl.mock_aggregate(proofs, logrows=26, split_proofs = True)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
|
||||
@@ -215,7 +215,7 @@
|
||||
"json.dump(data, open(cal_path, 'w'))\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"ezkl.calibrate_settings(cal_path, model_path, settings_path, \"resources\")"
|
||||
"await ezkl.calibrate_settings(cal_path, model_path, settings_path, \"resources\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -247,7 +247,7 @@
|
||||
"# now generate the witness file\n",
|
||||
"witness_path = \"ae_witness.json\"\n",
|
||||
"\n",
|
||||
"res = ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"res = await ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"assert os.path.isfile(witness_path)"
|
||||
]
|
||||
},
|
||||
@@ -303,7 +303,7 @@
|
||||
" pk_path,\n",
|
||||
" proof_path,\n",
|
||||
" \n",
|
||||
" ",
|
||||
" \"single\",\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
"print(res)\n",
|
||||
@@ -451,7 +451,7 @@
|
||||
"res = ezkl.gen_settings(model_path, settings_path)\n",
|
||||
"assert res == True\n",
|
||||
"\n",
|
||||
"res = ezkl.calibrate_settings(data_path, model_path, settings_path, \"resources\")\n",
|
||||
"res = await ezkl.calibrate_settings(data_path, model_path, settings_path, \"resources\")\n",
|
||||
"assert res == True\n",
|
||||
"print(\"verified\")"
|
||||
]
|
||||
@@ -485,7 +485,7 @@
|
||||
"# now generate the witness file \n",
|
||||
"witness_path = \"vae_witness.json\"\n",
|
||||
"\n",
|
||||
"res = ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"res = await ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"assert os.path.isfile(witness_path)"
|
||||
]
|
||||
},
|
||||
@@ -543,7 +543,7 @@
|
||||
" pk_path,\n",
|
||||
" proof_path,\n",
|
||||
" \n",
|
||||
" ",
|
||||
" \"single\",\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
"print(res)\n",
|
||||
@@ -590,4 +590,4 @@
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
}
|
||||
|
||||
@@ -845,7 +845,7 @@
|
||||
"res = ezkl.gen_settings(model_path, settings_path)\n",
|
||||
"assert res == True\n",
|
||||
"\n",
|
||||
"res = ezkl.calibrate_settings(data_path, model_path, settings_path, \"resources\", max_logrows = 20, scales = [3])\n",
|
||||
"res = await ezkl.calibrate_settings(data_path, model_path, settings_path, \"resources\", max_logrows = 20, scales = [3])\n",
|
||||
"assert res == True"
|
||||
]
|
||||
},
|
||||
@@ -881,7 +881,7 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"res = ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"res = await ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"assert os.path.isfile(witness_path)"
|
||||
]
|
||||
},
|
||||
@@ -939,7 +939,7 @@
|
||||
" pk_path,\n",
|
||||
" proof_path,\n",
|
||||
" \n",
|
||||
" ",
|
||||
" \"single\",\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
"print(res)\n",
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -234,7 +234,7 @@
|
||||
"run_args.input_scale = 2\n",
|
||||
"run_args.logrows = 15\n",
|
||||
"\n",
|
||||
"ezkl.get_srs(logrows=run_args.logrows, )"
|
||||
"ezkl.get_srs(logrows=run_args.logrows, commitment=ezkl.PyCommitments.KZG)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -282,7 +282,7 @@
|
||||
"\n",
|
||||
" # generate settings for the current model\n",
|
||||
" res = ezkl.gen_settings(model_path, settings_path, py_run_args=run_args)\n",
|
||||
" res = ezkl.calibrate_settings(data_path, model_path, settings_path, \"resources\", scales=[run_args.input_scale], max_logrows=run_args.logrows)\n",
|
||||
" res = await ezkl.calibrate_settings(data_path, model_path, settings_path, \"resources\", scales=[run_args.input_scale], max_logrows=run_args.logrows)\n",
|
||||
" assert res == True\n",
|
||||
"\n",
|
||||
" # load settings and print them to the console\n",
|
||||
@@ -303,7 +303,7 @@
|
||||
" assert os.path.isfile(vk_path)\n",
|
||||
" assert os.path.isfile(pk_path)\n",
|
||||
"\n",
|
||||
" res = ezkl.gen_witness(data_path, compiled_model_path, witness_path, vk_path)\n",
|
||||
" res = await ezkl.gen_witness(data_path, compiled_model_path, witness_path, vk_path)\n",
|
||||
" run_args.input_scale = settings[\"model_output_scales\"][0]\n",
|
||||
"\n",
|
||||
"for i in range(2):\n",
|
||||
@@ -330,6 +330,7 @@
|
||||
" compiled_model_path,\n",
|
||||
" pk_path,\n",
|
||||
" proof_path,\n",
|
||||
" \"for-aggr\",\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
" print(res)\n",
|
||||
@@ -425,6 +426,28 @@
|
||||
"for i in range(2):\n",
|
||||
" prove_model(i)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"You can also mock aggregate the split proofs into a single proof. This is useful if you want to verify the proof on chain at a lower cost. Here we mock aggregate the proofs to save time. You can use other notebooks to see how to aggregate in full ! "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# now mock aggregate the proofs\n",
|
||||
"proofs = []\n",
|
||||
"for i in range(2):\n",
|
||||
" proof_path = os.path.join('proof_split_'+str(i)+'.json')\n",
|
||||
" proofs.append(proof_path)\n",
|
||||
"\n",
|
||||
"ezkl.mock_aggregate(proofs, logrows=22, split_proofs = True)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
|
||||
@@ -176,7 +176,7 @@
|
||||
"json.dump(data, open(cal_path, 'w'))\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"ezkl.calibrate_settings(cal_path, model_path, settings_path, \"resources\")"
|
||||
"await ezkl.calibrate_settings(cal_path, model_path, settings_path, \"resources\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -210,7 +210,7 @@
|
||||
"source": [
|
||||
"# now generate the witness file \n",
|
||||
"\n",
|
||||
"res = ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"res = await ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"assert os.path.isfile(witness_path)"
|
||||
]
|
||||
},
|
||||
@@ -260,7 +260,7 @@
|
||||
" pk_path,\n",
|
||||
" proof_path,\n",
|
||||
" \n",
|
||||
" ",
|
||||
" \"single\",\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
"print(res)\n",
|
||||
@@ -309,4 +309,4 @@
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,336 +1,331 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Reusable Verifiers \n",
|
||||
"\n",
|
||||
"TODO: Update the reusable verifier solidity contract name.. Make it less generic to H2 and more bespoke to us.\n",
|
||||
"\n",
|
||||
"This notebook demonstrates how to create and reuse the same set of separated verifiers for different models. Specifically, we will use the same verifier for the following four models:\n",
|
||||
"\n",
|
||||
"- `1l_mlp sigmoid`\n",
|
||||
"- `1l_mlp relu`\n",
|
||||
"- `1l_conv sigmoid`\n",
|
||||
"- `1l_conv relu`\n",
|
||||
"\n",
|
||||
"When deploying EZKL verifiers on the blockchain, each associated model typically requires its own unique verifier, leading to increased on-chain state usage. \n",
|
||||
"However, with the reusable verifier, we can deploy a single verifier that can be used to verify proofs for any valid H2 circuit. This notebook shows how to do so. \n",
|
||||
"\n",
|
||||
"By reusing the same verifier across multiple models, we significantly reduce the amount of state bloat on the blockchain. Instead of deploying a unique verifier for each model, we register a unique and much smaller verifying key artifact (VKA) on the reusable verifier contract for each model while sharing a common separated verifier. The VKA contains the VK for the model as well circuit specific metadata that was otherwise hardcoded into the stack of the original non-reusable verifier. The VKA is passed as a parameter to the verifyProof method. This VKA calldata needs to be d with the reusable verifier before it can start verifying proofs by calling the registerVKA method. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import torch\n",
|
||||
"import torch.nn as nn\n",
|
||||
"import torch.onnx\n",
|
||||
"\n",
|
||||
"# Define the models\n",
|
||||
"class MLP_Sigmoid(nn.Module):\n",
|
||||
" def __init__(self):\n",
|
||||
" super(MLP_Sigmoid, self).__init__()\n",
|
||||
" self.fc = nn.Linear(3, 3)\n",
|
||||
" self.sigmoid = nn.Sigmoid()\n",
|
||||
"\n",
|
||||
" def forward(self, x):\n",
|
||||
" x = self.fc(x)\n",
|
||||
" x = self.sigmoid(x)\n",
|
||||
" return x\n",
|
||||
"\n",
|
||||
"class MLP_Relu(nn.Module):\n",
|
||||
" def __init__(self):\n",
|
||||
" super(MLP_Relu, self).__init__()\n",
|
||||
" self.fc = nn.Linear(3, 3)\n",
|
||||
" self.relu = nn.ReLU()\n",
|
||||
"\n",
|
||||
" def forward(self, x):\n",
|
||||
" x = self.fc(x)\n",
|
||||
" x = self.relu(x)\n",
|
||||
" return x\n",
|
||||
"\n",
|
||||
"class Conv_Sigmoid(nn.Module):\n",
|
||||
" def __init__(self):\n",
|
||||
" super(Conv_Sigmoid, self).__init__()\n",
|
||||
" self.conv = nn.Conv1d(1, 1, kernel_size=3, stride=1)\n",
|
||||
" self.sigmoid = nn.Sigmoid()\n",
|
||||
"\n",
|
||||
" def forward(self, x):\n",
|
||||
" x = self.conv(x)\n",
|
||||
" x = self.sigmoid(x)\n",
|
||||
" return x\n",
|
||||
"\n",
|
||||
"class Conv_Relu(nn.Module):\n",
|
||||
" def __init__(self):\n",
|
||||
" super(Conv_Relu, self).__init__()\n",
|
||||
" self.conv = nn.Conv1d(1, 1, kernel_size=3, stride=1)\n",
|
||||
" self.relu = nn.ReLU()\n",
|
||||
"\n",
|
||||
" def forward(self, x):\n",
|
||||
" x = self.conv(x)\n",
|
||||
" x = self.relu(x)\n",
|
||||
" return x\n",
|
||||
"\n",
|
||||
"# Instantiate the models\n",
|
||||
"mlp_sigmoid = MLP_Sigmoid()\n",
|
||||
"mlp_relu = MLP_Relu()\n",
|
||||
"conv_sigmoid = Conv_Sigmoid()\n",
|
||||
"conv_relu = Conv_Relu()\n",
|
||||
"\n",
|
||||
"# Dummy input tensor for mlp\n",
|
||||
"dummy_input_mlp = torch.tensor([[-1.5737053155899048, -1.708398461341858, 0.19544155895709991]])\n",
|
||||
"input_mlp_path = 'mlp_input.json'\n",
|
||||
"\n",
|
||||
"# Dummy input tensor for conv\n",
|
||||
"dummy_input_conv = torch.tensor([[[1.4124163389205933, 0.6938204169273376, 1.0664031505584717]]])\n",
|
||||
"input_conv_path = 'conv_input.json'"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"names = ['mlp_sigmoid', 'mlp_relu', 'conv_sigmoid', 'conv_relu']\n",
|
||||
"models = [mlp_sigmoid, mlp_relu, conv_sigmoid, conv_relu]\n",
|
||||
"inputs = [dummy_input_mlp, dummy_input_mlp, dummy_input_conv, dummy_input_conv]\n",
|
||||
"input_paths = [input_mlp_path, input_mlp_path, input_conv_path, input_conv_path]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"import json\n",
|
||||
"import torch\n",
|
||||
"import ezkl\n",
|
||||
"\n",
|
||||
"for name, model, x, input_path in zip(names, models, inputs, input_paths):\n",
|
||||
" # Create a new directory for the model if it doesn't exist\n",
|
||||
" if not os.path.exists(name):\n",
|
||||
" os.mkdir(name)\n",
|
||||
" # Store the paths in each of their respective directories\n",
|
||||
" model_path = os.path.join(name, \"network.onnx\")\n",
|
||||
" compiled_model_path = os.path.join(name, \"network.compiled\")\n",
|
||||
" pk_path = os.path.join(name, \"test.pk\")\n",
|
||||
" vk_path = os.path.join(name, \"test.vk\")\n",
|
||||
" settings_path = os.path.join(name, \"settings.json\")\n",
|
||||
"\n",
|
||||
" witness_path = os.path.join(name, \"witness.json\")\n",
|
||||
" sol_code_path = os.path.join(name, 'test.sol')\n",
|
||||
" vka_path = os.path.join(name, 'vka.bytes')\n",
|
||||
" abi_path = os.path.join(name, 'test.abi')\n",
|
||||
" proof_path = os.path.join(name, \"proof.json\")\n",
|
||||
"\n",
|
||||
" # Flips the neural net into inference mode\n",
|
||||
" model.eval()\n",
|
||||
"\n",
|
||||
" # Export the model\n",
|
||||
" torch.onnx.export(model, x, model_path, export_params=True, opset_version=10,\n",
|
||||
" do_constant_folding=True, input_names=['input'],\n",
|
||||
" output_names=['output'], dynamic_axes={'input': {0: 'batch_size'},\n",
|
||||
" 'output': {0: 'batch_size'}})\n",
|
||||
"\n",
|
||||
" data_array = ((x).detach().numpy()).reshape([-1]).tolist()\n",
|
||||
" data = dict(input_data=[data_array])\n",
|
||||
" json.dump(data, open(input_path, 'w'))\n",
|
||||
"\n",
|
||||
" py_run_args = ezkl.PyRunArgs()\n",
|
||||
" py_run_args.input_visibility = \"private\"\n",
|
||||
" py_run_args.output_visibility = \"public\"\n",
|
||||
" py_run_args.param_visibility = \"fixed\" # private by default\n",
|
||||
"\n",
|
||||
" res = ezkl.gen_settings(model_path, settings_path, py_run_args=py_run_args)\n",
|
||||
" assert res == True\n",
|
||||
"\n",
|
||||
" ezkl.calibrate_settings(input_path, model_path, settings_path, \"resources\")\n",
|
||||
"\n",
|
||||
" res = ezkl.compile_circuit(model_path, compiled_model_path, settings_path)\n",
|
||||
" assert res == True\n",
|
||||
"\n",
|
||||
" res = await ezkl.get_srs(settings_path)\n",
|
||||
" assert res == True\n",
|
||||
"\n",
|
||||
" # now generate the witness file\n",
|
||||
" res = ezkl.gen_witness(input_path, compiled_model_path, witness_path)\n",
|
||||
" assert os.path.isfile(witness_path) == True\n",
|
||||
"\n",
|
||||
" # SETUP \n",
|
||||
" # We recommend disabling selector compression for the setup as it decreases the size of the VK artifact\n",
|
||||
" res = ezkl.setup(compiled_model_path, vk_path, pk_path, disable_selector_compression=True)\n",
|
||||
" assert res == True\n",
|
||||
" assert os.path.isfile(vk_path)\n",
|
||||
" assert os.path.isfile(pk_path)\n",
|
||||
" assert os.path.isfile(settings_path)\n",
|
||||
"\n",
|
||||
" # GENERATE A PROOF\n",
|
||||
" res = ezkl.prove(witness_path, compiled_model_path, pk_path, proof_path)\n",
|
||||
" assert os.path.isfile(proof_path)\n",
|
||||
"\n",
|
||||
" res = await ezkl.create_evm_verifier(vk_path, settings_path, sol_code_path, abi_path, reusable=True)\n",
|
||||
" # TODO: Add a flag force equals true to in the deprication process to preserve OG single purpose verifier?\n",
|
||||
" assert res == True\n",
|
||||
"\n",
|
||||
" # TODO: \n",
|
||||
" res = await ezkl.create_evm_vka(vk_path, settings_path, vka_path, decimals=18)\n",
|
||||
" assert res == True\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import subprocess\n",
|
||||
"import time\n",
|
||||
"\n",
|
||||
"# make sure anvil is running locally\n",
|
||||
"# $ anvil -p 3030\n",
|
||||
"\n",
|
||||
"RPC_URL = \"http://localhost:3030\"\n",
|
||||
"\n",
|
||||
"# Save process globally\n",
|
||||
"anvil_process = None\n",
|
||||
"\n",
|
||||
"def start_anvil():\n",
|
||||
" global anvil_process\n",
|
||||
" if anvil_process is None:\n",
|
||||
" anvil_process = subprocess.Popen([\"anvil\", \"-p\", \"3030\", \"--code-size-limit=41943040\"])\n",
|
||||
" if anvil_process.returncode is not None:\n",
|
||||
" raise Exception(\"failed to start anvil process\")\n",
|
||||
" time.sleep(3)\n",
|
||||
"\n",
|
||||
"def stop_anvil():\n",
|
||||
" global anvil_process\n",
|
||||
" if anvil_process is not None:\n",
|
||||
" anvil_process.terminate()\n",
|
||||
" anvil_process = None\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Check that the generated verifiers are identical for all models."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import filecmp\n",
|
||||
"\n",
|
||||
"def compare_files(file1, file2):\n",
|
||||
" return filecmp.cmp(file1, file2, shallow=False)\n",
|
||||
"\n",
|
||||
"sol_code_path_0 = os.path.join(\"mlp_sigmoid\", 'test.sol')\n",
|
||||
"sol_code_path_1 = os.path.join(\"mlp_relu\", 'test.sol')\n",
|
||||
"\n",
|
||||
"sol_code_path_2 = os.path.join(\"conv_sigmoid\", 'test.sol')\n",
|
||||
"sol_code_path_3 = os.path.join(\"conv_relu\", 'test.sol')\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"assert compare_files(sol_code_path_0, sol_code_path_1) == True\n",
|
||||
"assert compare_files(sol_code_path_2, sol_code_path_3) == True"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Here we deploy reusable verifier that will be shared by the four models. We picked the `1l_mlp sigmoid` model as an example but you could have used any of the generated verifiers since they are all identical. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os \n",
|
||||
"addr_path_verifier = \"addr_verifier.txt\"\n",
|
||||
"sol_code_path = os.path.join(\"mlp_sigmoid\", 'test.sol')\n",
|
||||
"\n",
|
||||
"res = await ezkl.deploy_evm(\n",
|
||||
" addr_path_verifier,\n",
|
||||
" 'http://127.0.0.1:3030',\n",
|
||||
" sol_code_path,\n",
|
||||
" \"verifier/reusable\" # TODO deprecate this option for selecting the type of verifier you want to deploy. \n",
|
||||
" # verifier, verifier/reusable, vka\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"assert res == True\n",
|
||||
"\n",
|
||||
"with open(addr_path_verifier, 'r') as file:\n",
|
||||
" addr = file.read().rstrip()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Finally we deploy each of the unique VK-artifacts and verify them using the shared verifier deployed in the previous step."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"for name in names:\n",
|
||||
" addr_path_vk = \"addr_vk.txt\"\n",
|
||||
" vka_path = os.path.join(name, 'vka.bytes')\n",
|
||||
" res = await ezkl.register_vka(\n",
|
||||
" addr, # address of the reusable verifier. TODO: If we deploy the RV across all chains to a single canoncial address, we can hardcode that address and remove this param.\n",
|
||||
" 'http://127.0.0.1:3030',\n",
|
||||
" vka_path=vka_path, # TODO: Pass in private key and potentially create new command that both creates and registers the vka. Simplify testing pipeline for us and other folks. \n",
|
||||
" )\n",
|
||||
" assert res == True\n",
|
||||
" \n",
|
||||
" proof_path = os.path.join(name, \"proof.json\")\n",
|
||||
" res = await ezkl.verify_evm(\n",
|
||||
" addr,\n",
|
||||
" \"http://127.0.0.1:3030\",\n",
|
||||
" proof_path,\n",
|
||||
" vka_path = vka_path # TODO: Turn this from optional to required if we deprecate the orignal verifier. \n",
|
||||
" # TODO: Make it where the use only needs to deply a vka. \n",
|
||||
" )\n",
|
||||
" assert res == True"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": ".env",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.5"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Reusable Verifiers \n",
|
||||
"\n",
|
||||
"This notebook demonstrates how to create and reuse the same set of separated verifiers for different models. Specifically, we will use the same verifier for the following four models:\n",
|
||||
"\n",
|
||||
"- `1l_mlp sigmoid`\n",
|
||||
"- `1l_mlp relu`\n",
|
||||
"- `1l_conv sigmoid`\n",
|
||||
"- `1l_conv relu`\n",
|
||||
"\n",
|
||||
"When deploying EZKL verifiers on the blockchain, each associated model typically requires its own unique verifier, leading to increased on-chain state usage. \n",
|
||||
"However, with the reusable verifier, we can deploy a single verifier that can be used to verify proofs for any valid H2 circuit. This notebook shows how to do so. \n",
|
||||
"\n",
|
||||
"By reusing the same verifier across multiple models, we significantly reduce the amount of state bloat on the blockchain. Instead of deploying a unique verifier for each model, we deploy a unique and much smaller verifying key artifact (VKA) contract for each model while sharing a common separated verifier. The VKA contains the VK for the model as well circuit specific metadata that was otherwise hardcoded into the stack of the original non-reusable verifier."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import torch\n",
|
||||
"import torch.nn as nn\n",
|
||||
"import torch.onnx\n",
|
||||
"\n",
|
||||
"# Define the models\n",
|
||||
"class MLP_Sigmoid(nn.Module):\n",
|
||||
" def __init__(self):\n",
|
||||
" super(MLP_Sigmoid, self).__init__()\n",
|
||||
" self.fc = nn.Linear(3, 3)\n",
|
||||
" self.sigmoid = nn.Sigmoid()\n",
|
||||
"\n",
|
||||
" def forward(self, x):\n",
|
||||
" x = self.fc(x)\n",
|
||||
" x = self.sigmoid(x)\n",
|
||||
" return x\n",
|
||||
"\n",
|
||||
"class MLP_Relu(nn.Module):\n",
|
||||
" def __init__(self):\n",
|
||||
" super(MLP_Relu, self).__init__()\n",
|
||||
" self.fc = nn.Linear(3, 3)\n",
|
||||
" self.relu = nn.ReLU()\n",
|
||||
"\n",
|
||||
" def forward(self, x):\n",
|
||||
" x = self.fc(x)\n",
|
||||
" x = self.relu(x)\n",
|
||||
" return x\n",
|
||||
"\n",
|
||||
"class Conv_Sigmoid(nn.Module):\n",
|
||||
" def __init__(self):\n",
|
||||
" super(Conv_Sigmoid, self).__init__()\n",
|
||||
" self.conv = nn.Conv1d(1, 1, kernel_size=3, stride=1)\n",
|
||||
" self.sigmoid = nn.Sigmoid()\n",
|
||||
"\n",
|
||||
" def forward(self, x):\n",
|
||||
" x = self.conv(x)\n",
|
||||
" x = self.sigmoid(x)\n",
|
||||
" return x\n",
|
||||
"\n",
|
||||
"class Conv_Relu(nn.Module):\n",
|
||||
" def __init__(self):\n",
|
||||
" super(Conv_Relu, self).__init__()\n",
|
||||
" self.conv = nn.Conv1d(1, 1, kernel_size=3, stride=1)\n",
|
||||
" self.relu = nn.ReLU()\n",
|
||||
"\n",
|
||||
" def forward(self, x):\n",
|
||||
" x = self.conv(x)\n",
|
||||
" x = self.relu(x)\n",
|
||||
" return x\n",
|
||||
"\n",
|
||||
"# Instantiate the models\n",
|
||||
"mlp_sigmoid = MLP_Sigmoid()\n",
|
||||
"mlp_relu = MLP_Relu()\n",
|
||||
"conv_sigmoid = Conv_Sigmoid()\n",
|
||||
"conv_relu = Conv_Relu()\n",
|
||||
"\n",
|
||||
"# Dummy input tensor for mlp\n",
|
||||
"dummy_input_mlp = torch.tensor([[-1.5737053155899048, -1.708398461341858, 0.19544155895709991]])\n",
|
||||
"input_mlp_path = 'mlp_input.json'\n",
|
||||
"\n",
|
||||
"# Dummy input tensor for conv\n",
|
||||
"dummy_input_conv = torch.tensor([[[1.4124163389205933, 0.6938204169273376, 1.0664031505584717]]])\n",
|
||||
"input_conv_path = 'conv_input.json'"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"names = ['mlp_sigmoid', 'mlp_relu', 'conv_sigmoid', 'conv_relu']\n",
|
||||
"models = [mlp_sigmoid, mlp_relu, conv_sigmoid, conv_relu]\n",
|
||||
"inputs = [dummy_input_mlp, dummy_input_mlp, dummy_input_conv, dummy_input_conv]\n",
|
||||
"input_paths = [input_mlp_path, input_mlp_path, input_conv_path, input_conv_path]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"import json\n",
|
||||
"import torch\n",
|
||||
"import ezkl\n",
|
||||
"\n",
|
||||
"for name, model, x, input_path in zip(names, models, inputs, input_paths):\n",
|
||||
" # Create a new directory for the model if it doesn't exist\n",
|
||||
" if not os.path.exists(name):\n",
|
||||
" os.mkdir(name)\n",
|
||||
" # Store the paths in each of their respective directories\n",
|
||||
" model_path = os.path.join(name, \"network.onnx\")\n",
|
||||
" compiled_model_path = os.path.join(name, \"network.compiled\")\n",
|
||||
" pk_path = os.path.join(name, \"test.pk\")\n",
|
||||
" vk_path = os.path.join(name, \"test.vk\")\n",
|
||||
" settings_path = os.path.join(name, \"settings.json\")\n",
|
||||
"\n",
|
||||
" witness_path = os.path.join(name, \"witness.json\")\n",
|
||||
" sol_code_path = os.path.join(name, 'test.sol')\n",
|
||||
" vka_path = os.path.join(name, 'vka.bytes')\n",
|
||||
" abi_path = os.path.join(name, 'test.abi')\n",
|
||||
" proof_path = os.path.join(name, \"proof.json\")\n",
|
||||
"\n",
|
||||
" # Flips the neural net into inference mode\n",
|
||||
" model.eval()\n",
|
||||
"\n",
|
||||
" # Export the model\n",
|
||||
" torch.onnx.export(model, x, model_path, export_params=True, opset_version=10,\n",
|
||||
" do_constant_folding=True, input_names=['input'],\n",
|
||||
" output_names=['output'], dynamic_axes={'input': {0: 'batch_size'},\n",
|
||||
" 'output': {0: 'batch_size'}})\n",
|
||||
"\n",
|
||||
" data_array = ((x).detach().numpy()).reshape([-1]).tolist()\n",
|
||||
" data = dict(input_data=[data_array])\n",
|
||||
" json.dump(data, open(input_path, 'w'))\n",
|
||||
"\n",
|
||||
" py_run_args = ezkl.PyRunArgs()\n",
|
||||
" py_run_args.input_visibility = \"private\"\n",
|
||||
" py_run_args.output_visibility = \"public\"\n",
|
||||
" py_run_args.param_visibility = \"fixed\" # private by default\n",
|
||||
"\n",
|
||||
" res = ezkl.gen_settings(model_path, settings_path, py_run_args=py_run_args)\n",
|
||||
" assert res == True\n",
|
||||
"\n",
|
||||
" await ezkl.calibrate_settings(input_path, model_path, settings_path, \"resources\")\n",
|
||||
"\n",
|
||||
" res = ezkl.compile_circuit(model_path, compiled_model_path, settings_path)\n",
|
||||
" assert res == True\n",
|
||||
"\n",
|
||||
" res = await ezkl.get_srs(settings_path)\n",
|
||||
" assert res == True\n",
|
||||
"\n",
|
||||
" # now generate the witness file\n",
|
||||
" res = await ezkl.gen_witness(input_path, compiled_model_path, witness_path)\n",
|
||||
" assert os.path.isfile(witness_path) == True\n",
|
||||
"\n",
|
||||
" # SETUP \n",
|
||||
" # We recommend disabling selector compression for the setup as it decreases the size of the VK artifact\n",
|
||||
" res = ezkl.setup(compiled_model_path, vk_path, pk_path, disable_selector_compression=True)\n",
|
||||
" assert res == True\n",
|
||||
" assert os.path.isfile(vk_path)\n",
|
||||
" assert os.path.isfile(pk_path)\n",
|
||||
" assert os.path.isfile(settings_path)\n",
|
||||
"\n",
|
||||
" # GENERATE A PROOF\n",
|
||||
" res = ezkl.prove(witness_path, compiled_model_path, pk_path, proof_path, \"single\")\n",
|
||||
" assert os.path.isfile(proof_path)\n",
|
||||
"\n",
|
||||
" res = await ezkl.create_evm_verifier(vk_path, settings_path, sol_code_path, abi_path, reusable=True)\n",
|
||||
" assert res == True\n",
|
||||
"\n",
|
||||
" res = await ezkl.create_evm_vka(vk_path, settings_path, vka_path)\n",
|
||||
" assert res == True\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import subprocess\n",
|
||||
"import time\n",
|
||||
"\n",
|
||||
"# make sure anvil is running locally\n",
|
||||
"# $ anvil -p 3030\n",
|
||||
"\n",
|
||||
"RPC_URL = \"http://localhost:3030\"\n",
|
||||
"\n",
|
||||
"# Save process globally\n",
|
||||
"anvil_process = None\n",
|
||||
"\n",
|
||||
"def start_anvil():\n",
|
||||
" global anvil_process\n",
|
||||
" if anvil_process is None:\n",
|
||||
" anvil_process = subprocess.Popen([\"anvil\", \"-p\", \"3030\", \"--code-size-limit=41943040\"])\n",
|
||||
" if anvil_process.returncode is not None:\n",
|
||||
" raise Exception(\"failed to start anvil process\")\n",
|
||||
" time.sleep(3)\n",
|
||||
"\n",
|
||||
"def stop_anvil():\n",
|
||||
" global anvil_process\n",
|
||||
" if anvil_process is not None:\n",
|
||||
" anvil_process.terminate()\n",
|
||||
" anvil_process = None\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Check that the generated verifiers are identical for all models."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import filecmp\n",
|
||||
"\n",
|
||||
"def compare_files(file1, file2):\n",
|
||||
" return filecmp.cmp(file1, file2, shallow=False)\n",
|
||||
"\n",
|
||||
"sol_code_path_0 = os.path.join(\"mlp_sigmoid\", 'test.sol')\n",
|
||||
"sol_code_path_1 = os.path.join(\"mlp_relu\", 'test.sol')\n",
|
||||
"\n",
|
||||
"sol_code_path_2 = os.path.join(\"conv_sigmoid\", 'test.sol')\n",
|
||||
"sol_code_path_3 = os.path.join(\"conv_relu\", 'test.sol')\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"assert compare_files(sol_code_path_0, sol_code_path_1) == True\n",
|
||||
"assert compare_files(sol_code_path_2, sol_code_path_3) == True"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Here we deploy separate verifier that will be shared by the four models. We picked the `1l_mlp sigmoid` model as an example but you could have used any of the generated verifiers since they are all identical. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os \n",
|
||||
"addr_path_verifier = \"addr_verifier.txt\"\n",
|
||||
"sol_code_path = os.path.join(\"mlp_sigmoid\", 'test.sol')\n",
|
||||
"\n",
|
||||
"res = await ezkl.deploy_evm(\n",
|
||||
" addr_path_verifier,\n",
|
||||
" 'http://127.0.0.1:3030',\n",
|
||||
" sol_code_path,\n",
|
||||
" \"verifier/reusable\"\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"assert res == True\n",
|
||||
"\n",
|
||||
"with open(addr_path_verifier, 'r') as file:\n",
|
||||
" addr = file.read().rstrip()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Finally we deploy each of the unique VK-artifacts and verify them using the shared verifier deployed in the previous step."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"for name in names:\n",
|
||||
" addr_path_vk = \"addr_vk.txt\"\n",
|
||||
" vka_path = os.path.join(name, 'vka.bytes')\n",
|
||||
" res = await ezkl.register_vka(\n",
|
||||
" addr,\n",
|
||||
" 'http://127.0.0.1:3030',\n",
|
||||
" vka_path=vka_path,\n",
|
||||
" )\n",
|
||||
" assert res == True\n",
|
||||
" \n",
|
||||
" proof_path = os.path.join(name, \"proof.json\")\n",
|
||||
" sol_code_path = os.path.join(name, 'vk.sol')\n",
|
||||
" res = await ezkl.verify_evm(\n",
|
||||
" addr,\n",
|
||||
" \"http://127.0.0.1:3030\",\n",
|
||||
" proof_path,\n",
|
||||
" vka_path = vka_path\n",
|
||||
" )\n",
|
||||
" assert res == True"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": ".env",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.5"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
|
||||
@@ -231,7 +231,7 @@
|
||||
"source": [
|
||||
"# now generate the witness file\n",
|
||||
"\n",
|
||||
"res = ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"res = await ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"assert os.path.isfile(witness_path)"
|
||||
]
|
||||
},
|
||||
@@ -267,7 +267,7 @@
|
||||
" # Serialize data into file:\n",
|
||||
"json.dump( data, open(data_path_faulty, 'w' ))\n",
|
||||
"\n",
|
||||
"res = ezkl.gen_witness(data_path_faulty, compiled_model_path, witness_path_faulty)\n",
|
||||
"res = await ezkl.gen_witness(data_path_faulty, compiled_model_path, witness_path_faulty)\n",
|
||||
"assert os.path.isfile(witness_path_faulty)"
|
||||
]
|
||||
},
|
||||
@@ -312,7 +312,7 @@
|
||||
"# Serialize data into file:\n",
|
||||
"json.dump( data, open(data_path_truthy, 'w' ))\n",
|
||||
"\n",
|
||||
"res = ezkl.gen_witness(data_path_truthy, compiled_model_path, witness_path_truthy)\n",
|
||||
"res = await ezkl.gen_witness(data_path_truthy, compiled_model_path, witness_path_truthy)\n",
|
||||
"assert os.path.isfile(witness_path_truthy)"
|
||||
]
|
||||
},
|
||||
@@ -384,7 +384,7 @@
|
||||
" pk_path,\n",
|
||||
" proof_path,\n",
|
||||
" \n",
|
||||
" ",
|
||||
" \"single\",\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
"print(res)\n",
|
||||
@@ -411,7 +411,7 @@
|
||||
" pk_path,\n",
|
||||
" proof_path_faulty,\n",
|
||||
" \n",
|
||||
" ",
|
||||
" \"single\",\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
"print(res)\n",
|
||||
@@ -438,7 +438,7 @@
|
||||
" pk_path,\n",
|
||||
" proof_path_truthy,\n",
|
||||
" \n",
|
||||
" ",
|
||||
" \"single\",\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
"print(res)\n",
|
||||
|
||||
407
examples/notebooks/simple_demo_aggregated_proofs.ipynb
Normal file
407
examples/notebooks/simple_demo_aggregated_proofs.ipynb
Normal file
@@ -0,0 +1,407 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "cf69bb3f-94e6-4dba-92cd-ce08df117d67",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## EZKL Jupyter Notebook Demo (Aggregated Proofs) \n",
|
||||
"\n",
|
||||
"Demonstrates how to use EZKL with aggregated proofs"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "95613ee9",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# check if notebook is in colab\n",
|
||||
"try:\n",
|
||||
" # install ezkl\n",
|
||||
" import google.colab\n",
|
||||
" import subprocess\n",
|
||||
" import sys\n",
|
||||
" subprocess.check_call([sys.executable, \"-m\", \"pip\", \"install\", \"ezkl\"])\n",
|
||||
" subprocess.check_call([sys.executable, \"-m\", \"pip\", \"install\", \"onnx\"])\n",
|
||||
"\n",
|
||||
"# rely on local installation of ezkl if the notebook is not in colab\n",
|
||||
"except:\n",
|
||||
" pass\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# here we create and (potentially train a model)\n",
|
||||
"\n",
|
||||
"# make sure you have the dependencies required here already installed\n",
|
||||
"from torch import nn\n",
|
||||
"import ezkl\n",
|
||||
"import os\n",
|
||||
"import json\n",
|
||||
"import torch\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# Defines the model\n",
|
||||
"# we got convs, we got relu, we got linear layers\n",
|
||||
"# What else could one want ????\n",
|
||||
"\n",
|
||||
"class MyModel(nn.Module):\n",
|
||||
" def __init__(self):\n",
|
||||
" super(MyModel, self).__init__()\n",
|
||||
"\n",
|
||||
" self.conv1 = nn.Conv2d(in_channels=1, out_channels=2, kernel_size=5, stride=2)\n",
|
||||
" self.conv2 = nn.Conv2d(in_channels=2, out_channels=3, kernel_size=5, stride=2)\n",
|
||||
"\n",
|
||||
" self.relu = nn.ReLU()\n",
|
||||
"\n",
|
||||
" self.d1 = nn.Linear(48, 48)\n",
|
||||
" self.d2 = nn.Linear(48, 10)\n",
|
||||
"\n",
|
||||
" def forward(self, x):\n",
|
||||
" # 32x1x28x28 => 32x32x26x26\n",
|
||||
" x = self.conv1(x)\n",
|
||||
" x = self.relu(x)\n",
|
||||
" x = self.conv2(x)\n",
|
||||
" x = self.relu(x)\n",
|
||||
"\n",
|
||||
" # flatten => 32 x (32*26*26)\n",
|
||||
" x = x.flatten(start_dim = 1)\n",
|
||||
"\n",
|
||||
" # 32 x (32*26*26) => 32x128\n",
|
||||
" x = self.d1(x)\n",
|
||||
" x = self.relu(x)\n",
|
||||
"\n",
|
||||
" # logits => 32x10\n",
|
||||
" logits = self.d2(x)\n",
|
||||
"\n",
|
||||
" return logits\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"circuit = MyModel()\n",
|
||||
"\n",
|
||||
"# Train the model as you like here (skipped for brevity)\n",
|
||||
"\n",
|
||||
"\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "b37637c4",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"model_path = os.path.join('network.onnx')\n",
|
||||
"compiled_model_path = os.path.join('network.compiled')\n",
|
||||
"pk_path = os.path.join('test.pk')\n",
|
||||
"vk_path = os.path.join('test.vk')\n",
|
||||
"proof_path = os.path.join('test.pf')\n",
|
||||
"settings_path = os.path.join('settings.json')\n",
|
||||
"srs_path = os.path.join('kzg.srs')\n",
|
||||
"witness_path = os.path.join('witness.json')\n",
|
||||
"data_path = os.path.join('input.json')\n",
|
||||
"aggregate_proof_path = os.path.join('aggr.pf')\n",
|
||||
"aggregate_vk_path = os.path.join('aggr.vk')\n",
|
||||
"aggregate_pk_path = os.path.join('aggr.pk')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "82db373a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"\n",
|
||||
"shape = [1, 28, 28]\n",
|
||||
"# After training, export to onnx (network.onnx) and create a data file (input.json)\n",
|
||||
"x = 0.1*torch.rand(1,*shape, requires_grad=True)\n",
|
||||
"\n",
|
||||
"# Flips the neural net into inference mode\n",
|
||||
"circuit.eval()\n",
|
||||
"\n",
|
||||
" # Export the model\n",
|
||||
"torch.onnx.export(circuit, # model being run\n",
|
||||
" x, # model input (or a tuple for multiple inputs)\n",
|
||||
" model_path, # where to save the model (can be a file or file-like object)\n",
|
||||
" export_params=True, # store the trained parameter weights inside the model file\n",
|
||||
" opset_version=10, # the ONNX version to export the model to\n",
|
||||
" do_constant_folding=True, # whether to execute constant folding for optimization\n",
|
||||
" input_names = ['input'], # the model's input names\n",
|
||||
" output_names = ['output'], # the model's output names\n",
|
||||
" dynamic_axes={'input' : {0 : 'batch_size'}, # variable length axes\n",
|
||||
" 'output' : {0 : 'batch_size'}})\n",
|
||||
"\n",
|
||||
"data_array = ((x).detach().numpy()).reshape([-1]).tolist()\n",
|
||||
"\n",
|
||||
"data = dict(input_data = [data_array])\n",
|
||||
"\n",
|
||||
" # Serialize data into file:\n",
|
||||
"json.dump( data, open(data_path, 'w' ))\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "d5e374a2",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!RUST_LOG=trace\n",
|
||||
"# TODO: Dictionary outputs\n",
|
||||
"res = ezkl.gen_settings(model_path, settings_path)\n",
|
||||
"assert res == True\n",
|
||||
"\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"cal_path = os.path.join(\"calibration.json\")\n",
|
||||
"\n",
|
||||
"data_array = (torch.rand(20, *shape, requires_grad=True).detach().numpy()).reshape([-1]).tolist()\n",
|
||||
"\n",
|
||||
"data = dict(input_data = [data_array])\n",
|
||||
"\n",
|
||||
"# Serialize data into file:\n",
|
||||
"json.dump(data, open(cal_path, 'w'))\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"await ezkl.calibrate_settings(cal_path, model_path, settings_path, \"resources\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "3aa4f090",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"res = ezkl.compile_circuit(model_path, compiled_model_path, settings_path)\n",
|
||||
"assert res == True"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "8b74dcee",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# srs path\n",
|
||||
"res = await ezkl.get_srs( settings_path)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "18c8b7c7",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# now generate the witness file \n",
|
||||
"\n",
|
||||
"res = await ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"assert os.path.isfile(witness_path)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "b1c561a8",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"\n",
|
||||
"# HERE WE SETUP THE CIRCUIT PARAMS\n",
|
||||
"# WE GOT KEYS\n",
|
||||
"# WE GOT CIRCUIT PARAMETERS\n",
|
||||
"# EVERYTHING ANYONE HAS EVER NEEDED FOR ZK\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"res = ezkl.setup(\n",
|
||||
" compiled_model_path,\n",
|
||||
" vk_path,\n",
|
||||
" pk_path,\n",
|
||||
" \n",
|
||||
" )\n",
|
||||
"\n",
|
||||
"assert res == True\n",
|
||||
"assert os.path.isfile(vk_path)\n",
|
||||
"assert os.path.isfile(pk_path)\n",
|
||||
"assert os.path.isfile(settings_path)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "c384cbc8",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# GENERATE A PROOF\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"proof_path = os.path.join('test.pf')\n",
|
||||
"\n",
|
||||
"res = ezkl.prove(\n",
|
||||
" witness_path,\n",
|
||||
" compiled_model_path,\n",
|
||||
" pk_path,\n",
|
||||
" proof_path,\n",
|
||||
" \n",
|
||||
" \"for-aggr\", # IMPORTANT NOTE: To produce an aggregated EVM proof you will want to use poseidon for the smaller proofs\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
"print(res)\n",
|
||||
"assert os.path.isfile(proof_path)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "76f00d41",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# VERIFY IT\n",
|
||||
"\n",
|
||||
"res = ezkl.verify(\n",
|
||||
" proof_path,\n",
|
||||
" settings_path,\n",
|
||||
" vk_path,\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
"assert res == True\n",
|
||||
"print(\"verified\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "0832b909",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Generate a larger SRS. This is needed for the aggregated proof\n",
|
||||
"\n",
|
||||
"res = await ezkl.get_srs(settings_path=None, logrows=21, commitment=ezkl.PyCommitments.KZG)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "c5a64be6",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Run mock aggregate to check whether the proof works\n",
|
||||
"# Use mock to check for validity as it takes a shorter time to check compared to a full aggregated proof\n",
|
||||
"\n",
|
||||
"res = ezkl.mock_aggregate([proof_path], 21)\n",
|
||||
"assert res == True"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "fee8acc6",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Setup the vk and pk for aggregate\n",
|
||||
"res = ezkl.setup_aggregate(\n",
|
||||
" [proof_path],\n",
|
||||
" aggregate_vk_path,\n",
|
||||
" aggregate_pk_path,\n",
|
||||
" 21\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"assert os.path.isfile(aggregate_vk_path)\n",
|
||||
"assert os.path.isfile(aggregate_pk_path)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 26,
|
||||
"id": "171702d3",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Run aggregate proof\n",
|
||||
"res = ezkl.aggregate(\n",
|
||||
" [proof_path],\n",
|
||||
" aggregate_proof_path,\n",
|
||||
" aggregate_pk_path,\n",
|
||||
" \"evm\",\n",
|
||||
" 21,\n",
|
||||
" \"safe\"\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"assert os.path.isfile(aggregate_proof_path)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 27,
|
||||
"id": "671dfdd5",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Check if the proof is valid\n",
|
||||
"res = ezkl.verify_aggr(\n",
|
||||
" aggregate_proof_path,\n",
|
||||
" aggregate_vk_path,\n",
|
||||
" 21,\n",
|
||||
")\n",
|
||||
"assert res == True"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 28,
|
||||
"id": "50eba2f4",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Create a smart contract verifier for the aggregated proof\n",
|
||||
"\n",
|
||||
"sol_code_path = os.path.join(\"Verifier.sol\")\n",
|
||||
"abi_path = os.path.join(\"Verifier_ABI.json\")\n",
|
||||
"\n",
|
||||
"res = await ezkl.create_evm_verifier_aggr(\n",
|
||||
" [settings_path],\n",
|
||||
" aggregate_vk_path,\n",
|
||||
" sol_code_path,\n",
|
||||
" abi_path,\n",
|
||||
" logrows=21)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.12.7"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -171,7 +171,7 @@
|
||||
"json.dump(data, open(cal_path, 'w'))\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"ezkl.calibrate_settings(cal_path, model_path, settings_path, \"resources\")"
|
||||
"await ezkl.calibrate_settings(cal_path, model_path, settings_path, \"resources\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -205,7 +205,7 @@
|
||||
"source": [
|
||||
"# now generate the witness file \n",
|
||||
"\n",
|
||||
"res = ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"res = await ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"assert os.path.isfile(witness_path)"
|
||||
]
|
||||
},
|
||||
@@ -255,7 +255,7 @@
|
||||
" pk_path,\n",
|
||||
" proof_path,\n",
|
||||
" \n",
|
||||
" ",
|
||||
" \"single\",\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
"print(res)\n",
|
||||
@@ -304,4 +304,4 @@
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
}
|
||||
|
||||
@@ -169,7 +169,7 @@
|
||||
"json.dump(data, open(cal_path, 'w'))\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"ezkl.calibrate_settings(cal_path, model_path, settings_path, \"resources\")"
|
||||
"await ezkl.calibrate_settings(cal_path, model_path, settings_path, \"resources\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -203,7 +203,7 @@
|
||||
"source": [
|
||||
"# now generate the witness file \n",
|
||||
"\n",
|
||||
"res = ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"res = await ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"assert os.path.isfile(witness_path)"
|
||||
]
|
||||
},
|
||||
@@ -253,7 +253,7 @@
|
||||
" pk_path,\n",
|
||||
" proof_path,\n",
|
||||
" \n",
|
||||
" ",
|
||||
" \"single\",\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
"print(res)\n",
|
||||
@@ -302,4 +302,4 @@
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
}
|
||||
|
||||
@@ -170,7 +170,7 @@
|
||||
"json.dump(data, open(cal_path, 'w'))\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"ezkl.calibrate_settings(cal_path, model_path, settings_path, \"resources\")"
|
||||
"await ezkl.calibrate_settings(cal_path, model_path, settings_path, \"resources\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -204,7 +204,7 @@
|
||||
"source": [
|
||||
"# now generate the witness file \n",
|
||||
"\n",
|
||||
"res = ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"res = await ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"assert os.path.isfile(witness_path)"
|
||||
]
|
||||
},
|
||||
@@ -254,7 +254,7 @@
|
||||
" pk_path,\n",
|
||||
" proof_path,\n",
|
||||
" \n",
|
||||
" ",
|
||||
" \"single\",\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
"print(res)\n",
|
||||
@@ -303,4 +303,4 @@
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
}
|
||||
|
||||
@@ -149,7 +149,7 @@
|
||||
"json.dump(data, open(cal_path, 'w'))\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"ezkl.calibrate_settings(cal_path, model_path, settings_path, \"resources\")"
|
||||
"await ezkl.calibrate_settings(cal_path, model_path, settings_path, \"resources\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -183,7 +183,7 @@
|
||||
"source": [
|
||||
"# now generate the witness file \n",
|
||||
"\n",
|
||||
"res = ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"res = await ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"assert os.path.isfile(witness_path)"
|
||||
]
|
||||
},
|
||||
@@ -233,7 +233,7 @@
|
||||
" pk_path,\n",
|
||||
" proof_path,\n",
|
||||
" \n",
|
||||
" ",
|
||||
" \"single\",\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
"print(res)\n",
|
||||
|
||||
@@ -298,7 +298,7 @@
|
||||
"\n",
|
||||
"witness_path = \"witness.json\"\n",
|
||||
"\n",
|
||||
"res = ezkl.gen_witness(data_path, compiled_model_path, witness_path, vk_path)\n",
|
||||
"res = await ezkl.gen_witness(data_path, compiled_model_path, witness_path, vk_path)\n",
|
||||
"assert os.path.isfile(witness_path)\n",
|
||||
"\n",
|
||||
"# we force the output to be 1 this corresponds to the solvency test being true -- and we set this to a fixed vis output\n",
|
||||
@@ -323,7 +323,7 @@
|
||||
" pk_path,\n",
|
||||
" proof_path,\n",
|
||||
" \n",
|
||||
" ",
|
||||
" \"single\",\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
"assert os.path.isfile(proof_path)\n",
|
||||
@@ -412,7 +412,7 @@
|
||||
"source": [
|
||||
"# now generate the witness file\n",
|
||||
"\n",
|
||||
"res = ezkl.gen_witness(data_path_faulty, compiled_model_path, witness_path, vk_path)\n",
|
||||
"res = await ezkl.gen_witness(data_path_faulty, compiled_model_path, witness_path, vk_path)\n",
|
||||
"assert os.path.isfile(witness_path)\n",
|
||||
"\n",
|
||||
"# we force the output to be 1 this corresponds to the solvency test being true -- and we set this to a fixed vis output\n",
|
||||
@@ -442,7 +442,7 @@
|
||||
" pk_path,\n",
|
||||
" proof_path,\n",
|
||||
" \n",
|
||||
" ",
|
||||
" \"single\",\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
"print(res)\n",
|
||||
|
||||
@@ -167,7 +167,7 @@
|
||||
"res = ezkl.gen_settings(model_path, settings_path)\n",
|
||||
"assert res == True\n",
|
||||
"\n",
|
||||
"res = ezkl.calibrate_settings(data_path, model_path, settings_path, \"resources\")\n",
|
||||
"res = await ezkl.calibrate_settings(data_path, model_path, settings_path, \"resources\")\n",
|
||||
"assert res == True"
|
||||
]
|
||||
},
|
||||
@@ -187,7 +187,7 @@
|
||||
"json.dump(data, open(cal_path, 'w'))\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"ezkl.calibrate_settings(cal_path, model_path, settings_path, \"resources\")"
|
||||
"await ezkl.calibrate_settings(cal_path, model_path, settings_path, \"resources\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -221,7 +221,7 @@
|
||||
"source": [
|
||||
"# now generate the witness file \n",
|
||||
"\n",
|
||||
"res = ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"res = await ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"assert os.path.isfile(witness_path)"
|
||||
]
|
||||
},
|
||||
@@ -271,7 +271,7 @@
|
||||
" pk_path,\n",
|
||||
" proof_path,\n",
|
||||
" \n",
|
||||
" ",
|
||||
" \"single\",\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
"print(res)\n",
|
||||
|
||||
@@ -152,7 +152,7 @@
|
||||
"json.dump(data, open(cal_path, 'w'))\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"ezkl.calibrate_settings(cal_path, model_path, settings_path, \"resources\")"
|
||||
"await ezkl.calibrate_settings(cal_path, model_path, settings_path, \"resources\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -186,7 +186,7 @@
|
||||
"source": [
|
||||
"# now generate the witness file \n",
|
||||
"\n",
|
||||
"res = ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"res = await ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"assert os.path.isfile(witness_path)"
|
||||
]
|
||||
},
|
||||
@@ -236,7 +236,7 @@
|
||||
" pk_path,\n",
|
||||
" proof_path,\n",
|
||||
" \n",
|
||||
" ",
|
||||
" \"single\",\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
"print(res)\n",
|
||||
@@ -392,7 +392,7 @@
|
||||
"res = ezkl.gen_settings(model_path, settings_path)\n",
|
||||
"assert res == True\n",
|
||||
"\n",
|
||||
"res = ezkl.calibrate_settings(data_path, model_path, settings_path, \"resources\")\n",
|
||||
"res = await ezkl.calibrate_settings(data_path, model_path, settings_path, \"resources\")\n",
|
||||
"assert res == True"
|
||||
]
|
||||
}
|
||||
@@ -418,4 +418,4 @@
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
}
|
||||
|
||||
@@ -637,7 +637,7 @@
|
||||
"json.dump(data, open(cal_path, 'w'))\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"ezkl.calibrate_settings(cal_path, model_path, settings_path, \"resources\", scales = [11])"
|
||||
"await ezkl.calibrate_settings(cal_path, model_path, settings_path, \"resources\", scales = [11])"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -683,7 +683,7 @@
|
||||
" data = json.load(f)\n",
|
||||
" print(len(data['input_data'][0]))\n",
|
||||
"\n",
|
||||
"ezkl.gen_witness(data_path, compiled_model_path, witness_path)"
|
||||
"await ezkl.gen_witness(data_path, compiled_model_path, witness_path)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -707,7 +707,7 @@
|
||||
" pk_path,\n",
|
||||
" proof_path,\n",
|
||||
" \n",
|
||||
" ",
|
||||
" \"single\",\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
"print(res)\n",
|
||||
@@ -758,4 +758,4 @@
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 4
|
||||
}
|
||||
}
|
||||
|
||||
@@ -525,7 +525,7 @@
|
||||
"json.dump(data, open(cal_path, 'w'))\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"ezkl.calibrate_settings(cal_path, model_path, settings_path, \"resources\", scales = [4])"
|
||||
"await ezkl.calibrate_settings(cal_path, model_path, settings_path, \"resources\", scales = [4])"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -572,7 +572,7 @@
|
||||
" data = json.load(f)\n",
|
||||
" print(len(data['input_data'][0]))\n",
|
||||
"\n",
|
||||
"ezkl.gen_witness(data_path, compiled_model_path, witness_path)"
|
||||
"await ezkl.gen_witness(data_path, compiled_model_path, witness_path)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -596,7 +596,7 @@
|
||||
" pk_path,\n",
|
||||
" proof_path,\n",
|
||||
" \n",
|
||||
" ",
|
||||
" \"single\",\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
"print(res)\n",
|
||||
@@ -647,4 +647,4 @@
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 4
|
||||
}
|
||||
}
|
||||
|
||||
@@ -458,7 +458,7 @@
|
||||
"\n",
|
||||
"\n",
|
||||
"ezkl.gen_settings(onnx_filename, settings_filename)\n",
|
||||
"ezkl.calibrate_settings(\n",
|
||||
"await ezkl.calibrate_settings(\n",
|
||||
" input_filename, onnx_filename, settings_filename, \"resources\", scales = [4])\n",
|
||||
"res = await ezkl.get_srs(settings_filename)\n",
|
||||
"ezkl.compile_circuit(onnx_filename, compiled_filename, settings_filename)\n",
|
||||
@@ -527,7 +527,7 @@
|
||||
"\n",
|
||||
"witness_path = \"witness.json\"\n",
|
||||
"\n",
|
||||
"res = ezkl.gen_witness(input_filename, compiled_filename, witness_path)\n",
|
||||
"res = await ezkl.gen_witness(input_filename, compiled_filename, witness_path)\n",
|
||||
"assert os.path.isfile(witness_path)"
|
||||
]
|
||||
},
|
||||
@@ -580,7 +580,7 @@
|
||||
" compiled_filename,\n",
|
||||
" pk_path,\n",
|
||||
" proof_path,\n",
|
||||
" ",
|
||||
" \"single\",\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
"\n",
|
||||
@@ -762,4 +762,4 @@
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 0
|
||||
}
|
||||
}
|
||||
|
||||
@@ -629,7 +629,7 @@
|
||||
"source": [
|
||||
"\n",
|
||||
"\n",
|
||||
"res = ezkl.calibrate_settings(val_data, model_path, settings_path, \"resources\", scales = [4])\n",
|
||||
"res = await ezkl.calibrate_settings(val_data, model_path, settings_path, \"resources\", scales = [4])\n",
|
||||
"assert res == True\n",
|
||||
"print(\"verified\")\n"
|
||||
]
|
||||
@@ -680,7 +680,7 @@
|
||||
"\n",
|
||||
"witness_path = \"witness.json\"\n",
|
||||
"\n",
|
||||
"res = ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"res = await ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"assert os.path.isfile(witness_path)"
|
||||
]
|
||||
},
|
||||
@@ -759,7 +759,7 @@
|
||||
" pk_path,\n",
|
||||
" proof_path,\n",
|
||||
" \n",
|
||||
" ",
|
||||
" \"single\",\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
"print(res)\n",
|
||||
@@ -905,4 +905,4 @@
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
}
|
||||
|
||||
@@ -193,7 +193,7 @@
|
||||
"with open(cal_path, \"w\") as f:\n",
|
||||
" json.dump(cal_data, f)\n",
|
||||
"\n",
|
||||
"res = ezkl.calibrate_settings(cal_path, model_path, settings_path, \"resources\")"
|
||||
"res = await ezkl.calibrate_settings(cal_path, model_path, settings_path, \"resources\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -227,7 +227,7 @@
|
||||
"source": [
|
||||
"# now generate the witness file \n",
|
||||
"\n",
|
||||
"res = ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"res = await ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"assert os.path.isfile(witness_path)"
|
||||
]
|
||||
},
|
||||
@@ -277,7 +277,7 @@
|
||||
" pk_path,\n",
|
||||
" proof_path,\n",
|
||||
" \n",
|
||||
" ",
|
||||
" \"single\",\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
"print(res)\n",
|
||||
|
||||
Binary file not shown.
File diff suppressed because it is too large
Load Diff
@@ -1,79 +0,0 @@
|
||||
from torch import nn
|
||||
import torch.nn.init as init
|
||||
import torch
|
||||
import json
|
||||
|
||||
N = 100
|
||||
|
||||
class Model(nn.Module):
|
||||
def __init__(self, inplace=False):
|
||||
super(Model, self).__init__()
|
||||
|
||||
self.aff1 = nn.Linear(N,N)
|
||||
self.aff2 = nn.Linear(N,N)
|
||||
self.aff3 = nn.Linear(N,N)
|
||||
self.aff4 = nn.Linear(N,N)
|
||||
self.aff5 = nn.Linear(N,N)
|
||||
self.aff6 = nn.Linear(N,N)
|
||||
self.aff7 = nn.Linear(N,N)
|
||||
self.aff8 = nn.Linear(N,N)
|
||||
self.aff9 = nn.Linear(N,N)
|
||||
self.relu = nn.ReLU()
|
||||
self._initialize_weights()
|
||||
|
||||
def forward(self, x):
|
||||
# concat 10 x along dim 0
|
||||
x = x.repeat(10, 1)
|
||||
x = self.aff1(x)
|
||||
x = self.relu(x)
|
||||
x = self.aff2(x)
|
||||
x = self.relu(x)
|
||||
x = self.aff3(x)
|
||||
x = self.relu(x)
|
||||
x = self.aff4(x)
|
||||
x = self.relu(x)
|
||||
x = self.aff5(x)
|
||||
x = self.relu(x)
|
||||
x = self.aff6(x)
|
||||
x = self.relu(x)
|
||||
x = self.aff7(x)
|
||||
x = self.relu(x)
|
||||
x = self.aff8(x)
|
||||
x = self.relu(x)
|
||||
x = self.aff9(x)
|
||||
return x
|
||||
|
||||
|
||||
def _initialize_weights(self):
|
||||
init.orthogonal_(self.aff1.weight)
|
||||
|
||||
model = Model()
|
||||
|
||||
# Flips the neural net into inference mode
|
||||
model.eval()
|
||||
model.to('cpu')
|
||||
|
||||
|
||||
x = torch.randn(1, N)
|
||||
# Export the model
|
||||
torch.onnx.export(model, # model being run
|
||||
# model input (or a tuple for multiple inputs)
|
||||
x,
|
||||
# where to save the model (can be a file or file-like object)
|
||||
"network.onnx",
|
||||
export_params=True, # store the trained parameter weights inside the model file
|
||||
opset_version=12, # the ONNX version to export the model to
|
||||
do_constant_folding=True, # whether to execute constant folding for optimization
|
||||
input_names=['input'], # the model's input names
|
||||
output_names=['output'], # the model's output names
|
||||
dynamic_axes={'input': {0: 'batch_size'}, # variable length axes
|
||||
'output': {0: 'batch_size'}})
|
||||
|
||||
data_array = ((x).detach().numpy()).reshape([-1]).tolist()
|
||||
|
||||
data_json = dict(input_data=[data_array])
|
||||
|
||||
print(data_json)
|
||||
|
||||
# Serialize data into file:
|
||||
json.dump(data_json, open("input.json", 'w'))
|
||||
@@ -1 +0,0 @@
|
||||
{"input_data": [[0.33088353276252747, -0.8819183707237244, 1.245591163635254, -1.807046890258789, 1.9922369718551636, -0.3360576629638672, 0.4529011845588684, -0.3590165674686432, 0.08356846123933792, 0.5126393437385559, 0.44627535343170166, 1.4916497468948364, 0.49731069803237915, -0.9748706817626953, -0.4923185408115387, 1.3548223972320557, 0.2306872010231018, 1.125955581665039, -1.7063908576965332, 0.3777385354042053, -2.7988760471343994, -1.1846797466278076, 0.7473157048225403, 1.490412950515747, 0.017497723922133446, 2.113945245742798, -1.2141249179840088, -0.16120357811450958, 0.021127669140696526, 0.7207374572753906, -1.369688868522644, -0.7369781732559204, -0.630584180355072, -0.4520200788974762, 0.29123976826667786, 0.6334688067436218, -0.869332492351532, -1.258501648902893, 0.3012596666812897, -0.5507447123527527, 0.669975757598877, 0.15088629722595215, -0.1050339788198471, 0.5505334138870239, -0.1287376880645752, -1.4297826290130615, -0.01703289896249771, -1.2296998500823975, 0.5122153162956238, -0.16924428939819336, -0.415036678314209, -1.1979341506958008, 0.05831022188067436, -0.4411357045173645, 2.0713791847229004, 1.4611141681671143, -0.9357407093048096, -0.333297461271286, -0.676478385925293, 1.390028476715088, -0.05827632546424866, 1.535687804222107, 0.3060210347175598, -0.03171076253056526, -0.614985466003418, 1.2040390968322754, 0.31318482756614685, -1.2134959697723389, 0.13110508024692535, -1.4880926609039307, 1.7007993459701538, 1.5412729978561401, 0.09260450303554535, 0.7649128437042236, -0.5009126663208008, -0.5356241464614868, -0.069572813808918, -0.011717632412910461, 0.21314217150211334, -0.1985170543193817, -0.0223808903247118, 1.2128918170928955, 0.8334696888923645, 1.9029873609542847, -0.11491120606660843, -0.10303237289190292, -0.2467050403356552, 1.557223916053772, -1.1108328104019165, -0.9065343141555786, -0.2271333783864975, 0.6959827542304993, -0.48698121309280396, 0.5689510703086853, 1.115319013595581, -0.8907430768013, -0.24722427129745483, -0.7437837719917297, 0.6742106676101685, -1.7830933332443237]]}
|
||||
Binary file not shown.
@@ -104,5 +104,5 @@ json.dump(data, open("input.json", 'w'))
|
||||
# ezkl.gen_settings("network.onnx", "settings.json")
|
||||
|
||||
# !RUST_LOG = full
|
||||
# res = ezkl.calibrate_settings(
|
||||
# res = await ezkl.calibrate_settings(
|
||||
# "input.json", "network.onnx", "settings.json", "resources")
|
||||
|
||||
@@ -1,23 +1,23 @@
|
||||
## The worm
|
||||
## The worm
|
||||
|
||||
This is an onnx file for a [WormVAE](https://github.com/TuragaLab/wormvae?tab=readme-ov-file) model, which is a VAE / latent-space representation of the C. elegans connectome.
|
||||
|
||||
The model "is a large-scale latent variable model with a very high-dimensional latent space
|
||||
consisting of voltage dynamics of 300 neurons over 5 minutes of time at the simulation frequency
|
||||
of 160 Hz. The generative model for these latent variables is described by stochastic differential
|
||||
equations modeling the nonlinear dynamics of the network activity." (see [here](https://openreview.net/pdf?id=CJzi3dRlJE-)).
|
||||
equations modeling the nonlinear dynamics of the network activity." (see [here](https://openreview.net/pdf?id=CJzi3dRlJE-)).
|
||||
|
||||
In effect this is a generative model for a worm's voltage dynamics, which can be used to generate new worm-like voltage dynamics given previous connectome state.
|
||||
|
||||
Using ezkl you can create a zk circuit equivalent to the wormvae model, allowing you to "prove" execution of the worm model. If you're feeling particularly adventurous, you can also use the zk circuit to generate new worm-state that can be verified on chain.
|
||||
Using ezkl you can create a zk circuit equivalent to the wormvae model, allowing you to "prove" execution of the worm model. If you're feeling particularly adventurous, you can also use the zk circuit to generate new worm-state that can be verified on chain.
|
||||
|
||||
To do so you'll first want to fetch the files using git-lfs (as the onnx file is too large to be stored in git).
|
||||
To do so you'll first want to fetch the files using git-lfs (as the onnx file is too large to be stored in git).
|
||||
|
||||
```bash
|
||||
git lfs fetch --all
|
||||
```
|
||||
|
||||
You'll then want to use the usual ezkl loop to generate the zk circuit. We recommend using fixed visibility for the model parameters, as the model is quite large and this will prune the circuit significantly.
|
||||
You'll then want to use the usual ezkl loop to generate the zk circuit. We recommend using fixed visibility for the model parameters, as the model is quite large and this will prune the circuit significantly.
|
||||
|
||||
```bash
|
||||
ezkl gen-settings --param-visibility=fixed
|
||||
@@ -28,7 +28,17 @@ ezkl gen-witness
|
||||
ezkl prove
|
||||
```
|
||||
|
||||
You might also need to aggregate the proof to get it to fit on chain.
|
||||
|
||||
```bash
|
||||
ezkl aggregate
|
||||
```
|
||||
|
||||
You can then create a smart contract that verifies this aggregate proof
|
||||
|
||||
```bash
|
||||
ezkl create-evm-verifier-aggr
|
||||
```
|
||||
|
||||
This can then be deployed on the chain of your choice.
|
||||
|
||||
|
||||
@@ -1,182 +0,0 @@
|
||||
use ezkl::circuit::einsum::analysis::analyze_einsum_usage;
|
||||
use ezkl::circuit::poly::PolyOp;
|
||||
use ezkl::circuit::*;
|
||||
use ezkl::tensor::*;
|
||||
use halo2_proofs::circuit::floor_planner::V1;
|
||||
use halo2_proofs::dev::MockProver;
|
||||
use halo2_proofs::{
|
||||
arithmetic::Field,
|
||||
circuit::{Layouter, Value},
|
||||
plonk::{Circuit, ConstraintSystem, Error},
|
||||
};
|
||||
use halo2curves::bn256::Fr;
|
||||
use halo2curves::ff::PrimeField;
|
||||
use itertools::Itertools;
|
||||
use rand::rngs::OsRng;
|
||||
use std::collections::HashMap;
|
||||
use std::marker::PhantomData;
|
||||
|
||||
static mut LEN: usize = 4;
|
||||
const K: usize = 11;
|
||||
|
||||
#[derive(Clone)]
|
||||
struct MyCircuit<F: PrimeField + TensorType + PartialOrd> {
|
||||
inputs: [ValTensor<F>; 2],
|
||||
einsum: Einsum<F>,
|
||||
}
|
||||
|
||||
#[derive(Clone, Default)]
|
||||
struct Einsum<F: PrimeField + TensorType + PartialOrd> {
|
||||
equation: String,
|
||||
input_axes_to_dims: HashMap<char, usize>,
|
||||
_marker: PhantomData<F>,
|
||||
}
|
||||
|
||||
impl<F: PrimeField + TensorType + PartialOrd> Einsum<F> {
|
||||
pub fn new(equation: &str, inputs: &[&Tensor<Value<F>>]) -> Result<Self, CircuitError> {
|
||||
let mut eq = equation.split("->");
|
||||
let inputs_eq = eq.next().ok_or(CircuitError::InvalidEinsum)?;
|
||||
let inputs_eq = inputs_eq.split(',').collect::<Vec<_>>();
|
||||
|
||||
// Check that the number of inputs matches the number of inputs in the equation
|
||||
if inputs.len() != inputs_eq.len() {
|
||||
return Err(TensorError::DimMismatch("einsum".to_string()).into());
|
||||
}
|
||||
|
||||
let mut input_axes_to_dims = HashMap::new();
|
||||
for (i, input) in inputs.iter().enumerate() {
|
||||
for j in 0..inputs_eq[i].len() {
|
||||
let c = inputs_eq[i]
|
||||
.chars()
|
||||
.nth(j)
|
||||
.ok_or(CircuitError::InvalidEinsum)?;
|
||||
if let std::collections::hash_map::Entry::Vacant(e) = input_axes_to_dims.entry(c) {
|
||||
e.insert(input.dims()[j]);
|
||||
} else if input_axes_to_dims[&c] != input.dims()[j] {
|
||||
return Err(TensorError::DimMismatch("einsum".to_string()).into());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(Self {
|
||||
equation: equation.to_owned(),
|
||||
input_axes_to_dims,
|
||||
_marker: PhantomData,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl Circuit<Fr> for MyCircuit<Fr> {
|
||||
type Config = BaseConfig<Fr>;
|
||||
type FloorPlanner = V1;
|
||||
type Params = Einsum<Fr>;
|
||||
|
||||
fn without_witnesses(&self) -> Self {
|
||||
self.clone()
|
||||
}
|
||||
|
||||
fn configure_with_params(cs: &mut ConstraintSystem<Fr>, params: Self::Params) -> Self::Config {
|
||||
let len = unsafe { LEN };
|
||||
|
||||
let a = VarTensor::new_advice(cs, K, 1, len);
|
||||
let b = VarTensor::new_advice(cs, K, 1, len);
|
||||
let output = VarTensor::new_advice(cs, K, 1, len);
|
||||
|
||||
let mut config = Self::Config::configure(cs, &[a, b], &output, CheckMode::UNSAFE);
|
||||
|
||||
let mut equations = HashMap::new();
|
||||
equations.insert((0, params.equation), params.input_axes_to_dims);
|
||||
let analysis = analyze_einsum_usage(&equations).unwrap();
|
||||
let num_einsum_inner_cols = 2;
|
||||
config
|
||||
.configure_einsums(cs, &analysis, num_einsum_inner_cols, K)
|
||||
.unwrap();
|
||||
let _constant = VarTensor::constant_cols(cs, K, 2, false);
|
||||
|
||||
config
|
||||
}
|
||||
|
||||
fn params(&self) -> Self::Params {
|
||||
Einsum::<Fr>::new(
|
||||
&self.einsum.equation,
|
||||
&[
|
||||
&self.inputs[0].get_inner().unwrap(),
|
||||
&self.inputs[1].get_inner().unwrap(),
|
||||
],
|
||||
)
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
fn configure(_cs: &mut ConstraintSystem<Fr>) -> Self::Config {
|
||||
unimplemented!("call configure_with_params instead")
|
||||
}
|
||||
|
||||
fn synthesize(
|
||||
&self,
|
||||
mut config: Self::Config,
|
||||
mut layouter: impl Layouter<Fr>,
|
||||
) -> Result<(), Error> {
|
||||
let challenges = config
|
||||
.einsums
|
||||
.as_ref()
|
||||
.ok_or(Error::Synthesis)?
|
||||
.challenges()
|
||||
.unwrap()
|
||||
.iter()
|
||||
.map(|c| layouter.get_challenge(*c))
|
||||
.collect_vec();
|
||||
|
||||
layouter.assign_region(
|
||||
|| "",
|
||||
|region| {
|
||||
let mut region = region::RegionCtx::new_with_challenges(
|
||||
region,
|
||||
0,
|
||||
1,
|
||||
1024,
|
||||
2,
|
||||
challenges.clone(),
|
||||
);
|
||||
config
|
||||
.layout(
|
||||
&mut region,
|
||||
&self.inputs.iter().collect_vec(),
|
||||
Box::new(PolyOp::Einsum {
|
||||
equation: self.einsum.equation.clone(),
|
||||
}),
|
||||
)
|
||||
.unwrap();
|
||||
Ok(())
|
||||
},
|
||||
)?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
fn runmatmul() {
|
||||
let i = 10;
|
||||
let n = 10;
|
||||
let j = 40;
|
||||
let k = 10;
|
||||
|
||||
let mut a = Tensor::from((0..i * n * j).map(|_| Value::known(Fr::random(OsRng))));
|
||||
a.reshape(&[i, n, j]).unwrap();
|
||||
|
||||
// parameters
|
||||
let mut b = Tensor::from((0..j * k).map(|_| Value::known(Fr::random(OsRng))));
|
||||
b.reshape(&[j, k]).unwrap();
|
||||
|
||||
let einsum = Einsum::<Fr>::new("inj,jk->ik", &[&a, &b]).unwrap();
|
||||
|
||||
let circuit = MyCircuit {
|
||||
inputs: [ValTensor::from(a), ValTensor::from(b)],
|
||||
einsum,
|
||||
};
|
||||
|
||||
let mock_prover = MockProver::run(K as u32, &circuit, vec![]).unwrap();
|
||||
mock_prover.assert_satisfied();
|
||||
}
|
||||
|
||||
pub fn main() {
|
||||
runmatmul()
|
||||
}
|
||||
573
ezkl.pyi
573
ezkl.pyi
@@ -10,27 +10,31 @@ class PyG1:
|
||||
r"""
|
||||
pyclass containing the struct used for G1, this is mostly a helper class
|
||||
"""
|
||||
|
||||
...
|
||||
|
||||
class PyG1Affine:
|
||||
r"""
|
||||
pyclass containing the struct used for G1
|
||||
"""
|
||||
|
||||
...
|
||||
|
||||
class PyRunArgs:
|
||||
r"""
|
||||
Python class containing the struct used for run_args
|
||||
|
||||
|
||||
Returns
|
||||
-------
|
||||
PyRunArgs
|
||||
"""
|
||||
|
||||
...
|
||||
|
||||
class PyCommitments(Enum):
|
||||
r"""
|
||||
pyclass representing an enum, denoting the type of commitment
|
||||
"""
|
||||
KZG = auto()
|
||||
IPA = auto()
|
||||
|
||||
class PyInputType(Enum):
|
||||
Bool = auto()
|
||||
F16 = auto()
|
||||
@@ -43,19 +47,57 @@ class PyTestDataSource(Enum):
|
||||
r"""
|
||||
pyclass representing an enum
|
||||
"""
|
||||
|
||||
File = auto()
|
||||
OnChain = auto()
|
||||
|
||||
def buffer_to_felts(buffer: typing.Sequence[int]) -> list[str]:
|
||||
def aggregate(aggregation_snarks:typing.Sequence[str | os.PathLike | pathlib.Path],proof_path:str | os.PathLike | pathlib.Path,vk_path:str | os.PathLike | pathlib.Path,transcript:str,logrows:int,check_mode:str,split_proofs:bool,srs_path:typing.Optional[str | os.PathLike | pathlib.Path],commitment:PyCommitments) -> bool:
|
||||
r"""
|
||||
Creates an aggregated proof
|
||||
|
||||
Arguments
|
||||
---------
|
||||
aggregation_snarks: list[str]
|
||||
List of paths to the various proofs
|
||||
|
||||
proof_path: str
|
||||
Path to output the aggregated proof
|
||||
|
||||
vk_path: str
|
||||
Path to the VK file
|
||||
|
||||
transcript:
|
||||
Proof transcript type to be used. `evm` used by default. `poseidon` is also supported
|
||||
|
||||
logrows:
|
||||
Logrows used for aggregation circuit
|
||||
|
||||
check_mode: str
|
||||
Run sanity checks during calculations. Accepts `safe` or `unsafe`
|
||||
|
||||
split-proofs: bool
|
||||
Whether the accumulated proofs are segments of a larger circuit
|
||||
|
||||
srs_path: str
|
||||
Path to the SRS used
|
||||
|
||||
commitment: str
|
||||
Accepts "kzg" or "ipa"
|
||||
|
||||
Returns
|
||||
-------
|
||||
bool
|
||||
"""
|
||||
...
|
||||
|
||||
def buffer_to_felts(buffer:typing.Sequence[int]) -> list[str]:
|
||||
r"""
|
||||
Converts a buffer to vector of field elements
|
||||
|
||||
|
||||
Arguments
|
||||
-------
|
||||
buffer: list[int]
|
||||
List of integers representing a buffer
|
||||
|
||||
|
||||
Returns
|
||||
-------
|
||||
list[str]
|
||||
@@ -63,175 +105,173 @@ def buffer_to_felts(buffer: typing.Sequence[int]) -> list[str]:
|
||||
"""
|
||||
...
|
||||
|
||||
def calibrate_settings(
|
||||
data: str | os.PathLike | pathlib.Path,
|
||||
model: str | os.PathLike | pathlib.Path,
|
||||
settings: str | os.PathLike | pathlib.Path,
|
||||
target: str,
|
||||
lookup_safety_margin: float,
|
||||
scales: typing.Optional[typing.Sequence[int]],
|
||||
scale_rebase_multiplier: typing.Sequence[int],
|
||||
max_logrows: typing.Optional[int],
|
||||
) -> typing.Any:
|
||||
def calibrate_settings(data:str | os.PathLike | pathlib.Path,model:str | os.PathLike | pathlib.Path,settings:str | os.PathLike | pathlib.Path,target:str,lookup_safety_margin:float,scales:typing.Optional[typing.Sequence[int]],scale_rebase_multiplier:typing.Sequence[int],max_logrows:typing.Optional[int]) -> typing.Any:
|
||||
r"""
|
||||
Calibrates the circuit settings
|
||||
|
||||
|
||||
Arguments
|
||||
---------
|
||||
data: str
|
||||
Path to the calibration data
|
||||
|
||||
|
||||
model: str
|
||||
Path to the onnx file
|
||||
|
||||
|
||||
settings: str
|
||||
Path to the settings file
|
||||
|
||||
|
||||
lookup_safety_margin: int
|
||||
the lookup safety margin to use for calibration. if the max lookup is 2^k, then the max lookup will be 2^k * lookup_safety_margin. larger = safer but slower
|
||||
|
||||
|
||||
scales: list[int]
|
||||
Optional scales to specifically try for calibration
|
||||
|
||||
|
||||
scale_rebase_multiplier: list[int]
|
||||
Optional scale rebase multipliers to specifically try for calibration. This is the multiplier at which we divide to return to the input scale.
|
||||
|
||||
|
||||
max_logrows: int
|
||||
Optional max logrows to use for calibration
|
||||
|
||||
|
||||
|
||||
|
||||
Returns
|
||||
-------
|
||||
bool
|
||||
"""
|
||||
...
|
||||
|
||||
def compile_circuit(
|
||||
model: str | os.PathLike | pathlib.Path,
|
||||
compiled_circuit: str | os.PathLike | pathlib.Path,
|
||||
settings_path: str | os.PathLike | pathlib.Path,
|
||||
) -> bool:
|
||||
def compile_circuit(model:str | os.PathLike | pathlib.Path,compiled_circuit:str | os.PathLike | pathlib.Path,settings_path:str | os.PathLike | pathlib.Path) -> bool:
|
||||
r"""
|
||||
Compiles the circuit for use in other steps
|
||||
|
||||
|
||||
Arguments
|
||||
---------
|
||||
model: str
|
||||
Path to the onnx model file
|
||||
|
||||
|
||||
compiled_circuit: str
|
||||
Path to output the compiled circuit
|
||||
|
||||
|
||||
settings_path: str
|
||||
Path to the settings files
|
||||
|
||||
|
||||
Returns
|
||||
-------
|
||||
bool
|
||||
"""
|
||||
...
|
||||
|
||||
def create_evm_verifier(
|
||||
vk_path: str | os.PathLike | pathlib.Path,
|
||||
settings_path: str | os.PathLike | pathlib.Path,
|
||||
sol_code_path: str | os.PathLike | pathlib.Path,
|
||||
abi_path: str | os.PathLike | pathlib.Path,
|
||||
srs_path: typing.Optional[str | os.PathLike | pathlib.Path],
|
||||
reusable: bool,
|
||||
) -> typing.Any:
|
||||
def create_evm_verifier(vk_path:str | os.PathLike | pathlib.Path,settings_path:str | os.PathLike | pathlib.Path,sol_code_path:str | os.PathLike | pathlib.Path,abi_path:str | os.PathLike | pathlib.Path,srs_path:typing.Optional[str | os.PathLike | pathlib.Path],reusable:bool) -> typing.Any:
|
||||
r"""
|
||||
Creates an EVM compatible verifier, you will need solc installed in your environment to run this
|
||||
|
||||
|
||||
Arguments
|
||||
---------
|
||||
vk_path: str
|
||||
The path to the verification key file
|
||||
|
||||
|
||||
settings_path: str
|
||||
The path to the settings file
|
||||
|
||||
|
||||
sol_code_path: str
|
||||
The path to the create the solidity verifier
|
||||
|
||||
|
||||
abi_path: str
|
||||
The path to create the ABI for the solidity verifier
|
||||
|
||||
|
||||
srs_path: str
|
||||
The path to the SRS file
|
||||
|
||||
|
||||
reusable: bool
|
||||
Whether the verifier should be rendered as a reusable contract. If so, then you will need to deploy the VK artifact separately which you can generate using the create_evm_vka command
|
||||
|
||||
|
||||
Returns
|
||||
-------
|
||||
bool
|
||||
"""
|
||||
...
|
||||
|
||||
def create_evm_vka(
|
||||
vk_path: str | os.PathLike | pathlib.Path,
|
||||
settings_path: str | os.PathLike | pathlib.Path,
|
||||
vka_path: str | os.PathLike | pathlib.Path,
|
||||
srs_path: typing.Optional[str | os.PathLike | pathlib.Path],
|
||||
) -> typing.Any:
|
||||
def create_evm_verifier_aggr(aggregation_settings:typing.Sequence[str | os.PathLike | pathlib.Path],vk_path:str | os.PathLike | pathlib.Path,sol_code_path:str | os.PathLike | pathlib.Path,abi_path:str | os.PathLike | pathlib.Path,logrows:int,srs_path:typing.Optional[str | os.PathLike | pathlib.Path],reusable:bool) -> typing.Any:
|
||||
r"""
|
||||
Creates an evm compatible aggregate verifier, you will need solc installed in your environment to run this
|
||||
|
||||
Arguments
|
||||
---------
|
||||
aggregation_settings: str
|
||||
path to the settings file
|
||||
|
||||
vk_path: str
|
||||
The path to load the desired verification key file
|
||||
|
||||
sol_code_path: str
|
||||
The path to the Solidity code
|
||||
|
||||
abi_path: str
|
||||
The path to output the Solidity verifier ABI
|
||||
|
||||
logrows: int
|
||||
Number of logrows used during aggregated setup
|
||||
|
||||
srs_path: str
|
||||
The path to the SRS file
|
||||
|
||||
reusable: bool
|
||||
Whether the verifier should be rendered as a reusable contract. If so, then you will need to deploy the VK artifact separately which you can generate using the create_evm_vka command
|
||||
|
||||
Returns
|
||||
-------
|
||||
bool
|
||||
"""
|
||||
...
|
||||
|
||||
def create_evm_vka(vk_path:str | os.PathLike | pathlib.Path,settings_path:str | os.PathLike | pathlib.Path,vka_path:str | os.PathLike | pathlib.Path,srs_path:typing.Optional[str | os.PathLike | pathlib.Path]) -> typing.Any:
|
||||
r"""
|
||||
Creates an Evm VK artifact. This command generated a VK with circuit specific meta data encoding in memory for use by the reusable H2 verifier.
|
||||
|
||||
This is useful for deploying verifier that were otherwise too big to fit on chain and required aggregation.
|
||||
|
||||
Arguments
|
||||
---------
|
||||
vk_path: str
|
||||
The path to the verification key file
|
||||
|
||||
|
||||
settings_path: str
|
||||
The path to the settings file
|
||||
|
||||
|
||||
vka_path: str
|
||||
The path to the create the vka calldata.
|
||||
|
||||
|
||||
abi_path: str
|
||||
The path to create the ABI for the solidity verifier
|
||||
|
||||
|
||||
srs_path: str
|
||||
The path to the SRS file
|
||||
|
||||
|
||||
Returns
|
||||
-------
|
||||
bool
|
||||
"""
|
||||
...
|
||||
|
||||
def deploy_evm(
|
||||
addr_path: str | os.PathLike | pathlib.Path,
|
||||
sol_code_path: str | os.PathLike | pathlib.Path,
|
||||
rpc_url: typing.Optional[str],
|
||||
contract_type: str,
|
||||
optimizer_runs: int,
|
||||
private_key: typing.Optional[str],
|
||||
) -> typing.Any:
|
||||
def deploy_evm(addr_path:str | os.PathLike | pathlib.Path,sol_code_path:str | os.PathLike | pathlib.Path,rpc_url:typing.Optional[str],contract_type:str,optimizer_runs:int,private_key:typing.Optional[str]) -> typing.Any:
|
||||
r"""
|
||||
deploys the solidity verifier
|
||||
"""
|
||||
...
|
||||
|
||||
def encode_evm_calldata(
|
||||
proof: str | os.PathLike | pathlib.Path,
|
||||
calldata: str | os.PathLike | pathlib.Path,
|
||||
addr_vk: typing.Optional[str],
|
||||
) -> list[int]:
|
||||
def encode_evm_calldata(proof:str | os.PathLike | pathlib.Path,calldata:str | os.PathLike | pathlib.Path,addr_vk:typing.Optional[str]) -> list[int]:
|
||||
r"""
|
||||
Creates encoded evm calldata from a proof file
|
||||
|
||||
|
||||
Arguments
|
||||
---------
|
||||
proof: str
|
||||
Path to the proof file
|
||||
|
||||
|
||||
calldata: str
|
||||
Path to the calldata file to save
|
||||
|
||||
|
||||
addr_vk: str
|
||||
The address of the verification key contract (if the verifier key is to be rendered as a separate contract)
|
||||
|
||||
|
||||
Returns
|
||||
-------
|
||||
vec[u8]
|
||||
@@ -239,16 +279,16 @@ def encode_evm_calldata(
|
||||
"""
|
||||
...
|
||||
|
||||
def felt_to_big_endian(felt: str) -> str:
|
||||
def felt_to_big_endian(felt:str) -> str:
|
||||
r"""
|
||||
Converts a field element hex string to big endian
|
||||
|
||||
|
||||
Arguments
|
||||
-------
|
||||
felt: str
|
||||
The field element represented as a string
|
||||
|
||||
|
||||
|
||||
|
||||
Returns
|
||||
-------
|
||||
str
|
||||
@@ -256,54 +296,54 @@ def felt_to_big_endian(felt: str) -> str:
|
||||
"""
|
||||
...
|
||||
|
||||
def felt_to_float(felt: str, scale: int) -> float:
|
||||
def felt_to_float(felt:str,scale:int) -> float:
|
||||
r"""
|
||||
Converts a field element hex string to a floating point number
|
||||
|
||||
|
||||
Arguments
|
||||
-------
|
||||
felt: str
|
||||
The field element represented as a string
|
||||
|
||||
|
||||
scale: float
|
||||
The scaling factor used to convert the field element into a floating point representation
|
||||
|
||||
|
||||
Returns
|
||||
-------
|
||||
float
|
||||
"""
|
||||
...
|
||||
|
||||
def felt_to_int(felt: str) -> int:
|
||||
def felt_to_int(felt:str) -> int:
|
||||
r"""
|
||||
Converts a field element hex string to an integer
|
||||
|
||||
|
||||
Arguments
|
||||
-------
|
||||
felt: str
|
||||
The field element represented as a string
|
||||
|
||||
|
||||
Returns
|
||||
-------
|
||||
int
|
||||
"""
|
||||
...
|
||||
|
||||
def float_to_felt(input: float, scale: int, input_type: PyInputType) -> str:
|
||||
def float_to_felt(input:float,scale:int,input_type:PyInputType) -> str:
|
||||
r"""
|
||||
Converts a floating point element to a field element hex string
|
||||
|
||||
|
||||
Arguments
|
||||
-------
|
||||
input: float
|
||||
The field element represented as a string
|
||||
|
||||
|
||||
scale: float
|
||||
The scaling factor used to quantize the float into a field element
|
||||
|
||||
|
||||
input_type: PyInputType
|
||||
The type of the input
|
||||
|
||||
|
||||
Returns
|
||||
-------
|
||||
str
|
||||
@@ -311,97 +351,101 @@ def float_to_felt(input: float, scale: int, input_type: PyInputType) -> str:
|
||||
"""
|
||||
...
|
||||
|
||||
def gen_settings(
|
||||
model: str | os.PathLike | pathlib.Path,
|
||||
output: str | os.PathLike | pathlib.Path,
|
||||
py_run_args: typing.Optional[PyRunArgs],
|
||||
) -> bool:
|
||||
def gen_settings(model:str | os.PathLike | pathlib.Path,output:str | os.PathLike | pathlib.Path,py_run_args:typing.Optional[PyRunArgs]) -> bool:
|
||||
r"""
|
||||
Generates the circuit settings
|
||||
|
||||
|
||||
Arguments
|
||||
---------
|
||||
model: str
|
||||
Path to the onnx file
|
||||
|
||||
|
||||
output: str
|
||||
Path to create the settings file
|
||||
|
||||
|
||||
py_run_args: PyRunArgs
|
||||
PyRunArgs object to initialize the settings
|
||||
|
||||
|
||||
Returns
|
||||
-------
|
||||
bool
|
||||
"""
|
||||
...
|
||||
|
||||
def gen_srs(srs_path: str | os.PathLike | pathlib.Path, logrows: int) -> None:
|
||||
def gen_srs(srs_path:str | os.PathLike | pathlib.Path,logrows:int) -> None:
|
||||
r"""
|
||||
Generates the Structured Reference String (SRS), use this only for testing purposes
|
||||
|
||||
|
||||
Arguments
|
||||
---------
|
||||
srs_path: str
|
||||
Path to the create the SRS file
|
||||
|
||||
|
||||
logrows: int
|
||||
The number of logrows for the SRS file
|
||||
"""
|
||||
...
|
||||
|
||||
def gen_vk_from_pk_single(
|
||||
path_to_pk: str | os.PathLike | pathlib.Path,
|
||||
circuit_settings_path: str | os.PathLike | pathlib.Path,
|
||||
vk_output_path: str | os.PathLike | pathlib.Path,
|
||||
) -> bool:
|
||||
def gen_vk_from_pk_aggr(path_to_pk:str | os.PathLike | pathlib.Path,vk_output_path:str | os.PathLike | pathlib.Path) -> bool:
|
||||
r"""
|
||||
Generates a vk from a pk for a model circuit and saves it to a file
|
||||
|
||||
Generates a vk from a pk for an aggregate circuit and saves it to a file
|
||||
|
||||
Arguments
|
||||
-------
|
||||
path_to_pk: str
|
||||
Path to the proving key
|
||||
|
||||
circuit_settings_path: str
|
||||
Path to the witness file
|
||||
|
||||
|
||||
vk_output_path: str
|
||||
Path to create the vk file
|
||||
|
||||
|
||||
Returns
|
||||
-------
|
||||
bool
|
||||
"""
|
||||
...
|
||||
|
||||
def gen_witness(
|
||||
data: str | os.PathLike | pathlib.Path,
|
||||
model: str | os.PathLike | pathlib.Path,
|
||||
output: typing.Optional[str | os.PathLike | pathlib.Path],
|
||||
vk_path: typing.Optional[str | os.PathLike | pathlib.Path],
|
||||
srs_path: typing.Optional[str | os.PathLike | pathlib.Path],
|
||||
) -> typing.Any:
|
||||
def gen_vk_from_pk_single(path_to_pk:str | os.PathLike | pathlib.Path,circuit_settings_path:str | os.PathLike | pathlib.Path,vk_output_path:str | os.PathLike | pathlib.Path) -> bool:
|
||||
r"""
|
||||
Generates a vk from a pk for a model circuit and saves it to a file
|
||||
|
||||
Arguments
|
||||
-------
|
||||
path_to_pk: str
|
||||
Path to the proving key
|
||||
|
||||
circuit_settings_path: str
|
||||
Path to the witness file
|
||||
|
||||
vk_output_path: str
|
||||
Path to create the vk file
|
||||
|
||||
Returns
|
||||
-------
|
||||
bool
|
||||
"""
|
||||
...
|
||||
|
||||
def gen_witness(data:str | os.PathLike | pathlib.Path,model:str | os.PathLike | pathlib.Path,output:typing.Optional[str | os.PathLike | pathlib.Path],vk_path:typing.Optional[str | os.PathLike | pathlib.Path],srs_path:typing.Optional[str | os.PathLike | pathlib.Path]) -> typing.Any:
|
||||
r"""
|
||||
Runs the forward pass operation to generate a witness
|
||||
|
||||
|
||||
Arguments
|
||||
---------
|
||||
data: str
|
||||
Path to the data file
|
||||
|
||||
|
||||
model: str
|
||||
Path to the compiled model file
|
||||
|
||||
|
||||
output: str
|
||||
Path to create the witness file
|
||||
|
||||
|
||||
vk_path: str
|
||||
Path to the verification key
|
||||
|
||||
|
||||
srs_path: str
|
||||
Path to the SRS file
|
||||
|
||||
|
||||
Returns
|
||||
-------
|
||||
dict
|
||||
@@ -409,89 +453,126 @@ def gen_witness(
|
||||
"""
|
||||
...
|
||||
|
||||
def get_srs(
|
||||
settings_path: typing.Optional[str | os.PathLike | pathlib.Path],
|
||||
logrows: typing.Optional[int],
|
||||
srs_path: typing.Optional[str | os.PathLike | pathlib.Path],
|
||||
) -> typing.Any:
|
||||
def get_srs(settings_path:typing.Optional[str | os.PathLike | pathlib.Path],logrows:typing.Optional[int],srs_path:typing.Optional[str | os.PathLike | pathlib.Path],commitment:typing.Optional[PyCommitments]) -> typing.Any:
|
||||
r"""
|
||||
Gets a public srs
|
||||
|
||||
|
||||
Arguments
|
||||
---------
|
||||
settings_path: str
|
||||
Path to the settings file
|
||||
|
||||
|
||||
logrows: int
|
||||
The number of logrows for the SRS file
|
||||
|
||||
|
||||
srs_path: str
|
||||
Path to the create the SRS file
|
||||
|
||||
|
||||
commitment: str
|
||||
Specify the commitment used ("kzg", "ipa")
|
||||
|
||||
Returns
|
||||
-------
|
||||
bool
|
||||
"""
|
||||
...
|
||||
|
||||
def kzg_commit(
|
||||
message: typing.Sequence[str],
|
||||
vk_path: str | os.PathLike | pathlib.Path,
|
||||
settings_path: str | os.PathLike | pathlib.Path,
|
||||
srs_path: typing.Optional[str | os.PathLike | pathlib.Path],
|
||||
) -> list[PyG1Affine]:
|
||||
def ipa_commit(message:typing.Sequence[str],vk_path:str | os.PathLike | pathlib.Path,settings_path:str | os.PathLike | pathlib.Path,srs_path:typing.Optional[str | os.PathLike | pathlib.Path]) -> list[PyG1Affine]:
|
||||
r"""
|
||||
Generate a kzg commitment.
|
||||
|
||||
Generate an ipa commitment.
|
||||
|
||||
Arguments
|
||||
-------
|
||||
message: list[str]
|
||||
List of field elements represnted as strings
|
||||
|
||||
|
||||
vk_path: str
|
||||
Path to the verification key
|
||||
|
||||
|
||||
settings_path: str
|
||||
Path to the settings file
|
||||
|
||||
|
||||
srs_path: str
|
||||
Path to the Structure Reference String (SRS) file
|
||||
|
||||
|
||||
Returns
|
||||
-------
|
||||
list[PyG1Affine]
|
||||
"""
|
||||
...
|
||||
|
||||
def mock(
|
||||
witness: str | os.PathLike | pathlib.Path, model: str | os.PathLike | pathlib.Path
|
||||
) -> bool:
|
||||
def kzg_commit(message:typing.Sequence[str],vk_path:str | os.PathLike | pathlib.Path,settings_path:str | os.PathLike | pathlib.Path,srs_path:typing.Optional[str | os.PathLike | pathlib.Path]) -> list[PyG1Affine]:
|
||||
r"""
|
||||
Generate a kzg commitment.
|
||||
|
||||
Arguments
|
||||
-------
|
||||
message: list[str]
|
||||
List of field elements represnted as strings
|
||||
|
||||
vk_path: str
|
||||
Path to the verification key
|
||||
|
||||
settings_path: str
|
||||
Path to the settings file
|
||||
|
||||
srs_path: str
|
||||
Path to the Structure Reference String (SRS) file
|
||||
|
||||
Returns
|
||||
-------
|
||||
list[PyG1Affine]
|
||||
"""
|
||||
...
|
||||
|
||||
def mock(witness:str | os.PathLike | pathlib.Path,model:str | os.PathLike | pathlib.Path) -> bool:
|
||||
r"""
|
||||
Mocks the prover
|
||||
|
||||
|
||||
Arguments
|
||||
---------
|
||||
witness: str
|
||||
Path to the witness file
|
||||
|
||||
|
||||
model: str
|
||||
Path to the compiled model file
|
||||
|
||||
|
||||
Returns
|
||||
-------
|
||||
bool
|
||||
"""
|
||||
...
|
||||
|
||||
def poseidon_hash(message: typing.Sequence[str]) -> list[str]:
|
||||
def mock_aggregate(aggregation_snarks:typing.Sequence[str | os.PathLike | pathlib.Path],logrows:int,split_proofs:bool) -> bool:
|
||||
r"""
|
||||
Mocks the aggregate prover
|
||||
|
||||
Arguments
|
||||
---------
|
||||
aggregation_snarks: list[str]
|
||||
List of paths to the relevant proof files
|
||||
|
||||
logrows: int
|
||||
Number of logrows to use for the aggregation circuit
|
||||
|
||||
split_proofs: bool
|
||||
Indicates whether the accumulated are segments of a larger proof
|
||||
|
||||
Returns
|
||||
-------
|
||||
bool
|
||||
"""
|
||||
...
|
||||
|
||||
def poseidon_hash(message:typing.Sequence[str]) -> list[str]:
|
||||
r"""
|
||||
Generate a poseidon hash.
|
||||
|
||||
|
||||
Arguments
|
||||
-------
|
||||
message: list[str]
|
||||
List of field elements represented as strings
|
||||
|
||||
|
||||
Returns
|
||||
-------
|
||||
list[str]
|
||||
@@ -499,104 +580,126 @@ def poseidon_hash(message: typing.Sequence[str]) -> list[str]:
|
||||
"""
|
||||
...
|
||||
|
||||
def prove(
|
||||
witness: str | os.PathLike | pathlib.Path,
|
||||
model: str | os.PathLike | pathlib.Path,
|
||||
pk_path: str | os.PathLike | pathlib.Path,
|
||||
proof_path: typing.Optional[str | os.PathLike | pathlib.Path],
|
||||
srs_path: typing.Optional[str | os.PathLike | pathlib.Path],
|
||||
) -> typing.Any:
|
||||
def prove(witness:str | os.PathLike | pathlib.Path,model:str | os.PathLike | pathlib.Path,pk_path:str | os.PathLike | pathlib.Path,proof_path:typing.Optional[str | os.PathLike | pathlib.Path],proof_type:str,srs_path:typing.Optional[str | os.PathLike | pathlib.Path]) -> typing.Any:
|
||||
r"""
|
||||
Runs the prover on a set of inputs
|
||||
|
||||
|
||||
Arguments
|
||||
---------
|
||||
witness: str
|
||||
Path to the witness file
|
||||
|
||||
|
||||
model: str
|
||||
Path to the compiled model file
|
||||
|
||||
|
||||
pk_path: str
|
||||
Path to the proving key file
|
||||
|
||||
|
||||
proof_path: str
|
||||
Path to create the proof file
|
||||
|
||||
|
||||
proof_type: str
|
||||
Accepts `single`, `for-aggr`
|
||||
|
||||
srs_path: str
|
||||
Path to the SRS file
|
||||
|
||||
|
||||
Returns
|
||||
-------
|
||||
bool
|
||||
"""
|
||||
...
|
||||
|
||||
def setup(
|
||||
model: str | os.PathLike | pathlib.Path,
|
||||
vk_path: str | os.PathLike | pathlib.Path,
|
||||
pk_path: str | os.PathLike | pathlib.Path,
|
||||
srs_path: typing.Optional[str | os.PathLike | pathlib.Path],
|
||||
witness_path: typing.Optional[str | os.PathLike | pathlib.Path],
|
||||
disable_selector_compression: bool,
|
||||
) -> bool:
|
||||
def setup(model:str | os.PathLike | pathlib.Path,vk_path:str | os.PathLike | pathlib.Path,pk_path:str | os.PathLike | pathlib.Path,srs_path:typing.Optional[str | os.PathLike | pathlib.Path],witness_path:typing.Optional[str | os.PathLike | pathlib.Path],disable_selector_compression:bool) -> bool:
|
||||
r"""
|
||||
Runs the setup process
|
||||
|
||||
|
||||
Arguments
|
||||
---------
|
||||
model: str
|
||||
Path to the compiled model file
|
||||
|
||||
|
||||
vk_path: str
|
||||
Path to create the verification key file
|
||||
|
||||
|
||||
pk_path: str
|
||||
Path to create the proving key file
|
||||
|
||||
|
||||
srs_path: str
|
||||
Path to the SRS file
|
||||
|
||||
|
||||
witness_path: str
|
||||
Path to the witness file
|
||||
|
||||
|
||||
disable_selector_compression: bool
|
||||
Whether to compress the selectors or not
|
||||
|
||||
|
||||
Returns
|
||||
-------
|
||||
bool
|
||||
"""
|
||||
...
|
||||
|
||||
def swap_proof_commitments(
|
||||
proof_path: str | os.PathLike | pathlib.Path,
|
||||
witness_path: str | os.PathLike | pathlib.Path,
|
||||
) -> None:
|
||||
def setup_aggregate(sample_snarks:typing.Sequence[str | os.PathLike | pathlib.Path],vk_path:str | os.PathLike | pathlib.Path,pk_path:str | os.PathLike | pathlib.Path,logrows:int,split_proofs:bool,srs_path:typing.Optional[str | os.PathLike | pathlib.Path],disable_selector_compression:bool,commitment:PyCommitments) -> bool:
|
||||
r"""
|
||||
Runs the setup process for an aggregate setup
|
||||
|
||||
Arguments
|
||||
---------
|
||||
sample_snarks: list[str]
|
||||
List of paths to the various proofs
|
||||
|
||||
vk_path: str
|
||||
Path to create the aggregated VK
|
||||
|
||||
pk_path: str
|
||||
Path to create the aggregated PK
|
||||
|
||||
logrows: int
|
||||
Number of logrows to use
|
||||
|
||||
split_proofs: bool
|
||||
Whether the accumulated are segments of a larger proof
|
||||
|
||||
srs_path: str
|
||||
Path to the SRS file
|
||||
|
||||
disable_selector_compression: bool
|
||||
Whether to compress selectors
|
||||
|
||||
commitment: str
|
||||
Accepts `kzg`, `ipa`
|
||||
|
||||
Returns
|
||||
-------
|
||||
bool
|
||||
"""
|
||||
...
|
||||
|
||||
|
||||
def swap_proof_commitments(proof_path:str | os.PathLike | pathlib.Path,witness_path:str | os.PathLike | pathlib.Path) -> None:
|
||||
r"""
|
||||
Swap the commitments in a proof
|
||||
|
||||
|
||||
Arguments
|
||||
-------
|
||||
proof_path: str
|
||||
Path to the proof file
|
||||
|
||||
|
||||
witness_path: str
|
||||
Path to the witness file
|
||||
"""
|
||||
...
|
||||
|
||||
def table(
|
||||
model: str | os.PathLike | pathlib.Path, py_run_args: typing.Optional[PyRunArgs]
|
||||
) -> str:
|
||||
def table(model:str | os.PathLike | pathlib.Path,py_run_args:typing.Optional[PyRunArgs]) -> str:
|
||||
r"""
|
||||
Displays the table as a string in python
|
||||
|
||||
|
||||
Arguments
|
||||
---------
|
||||
model: str
|
||||
Path to the onnx file
|
||||
|
||||
|
||||
Returns
|
||||
---------
|
||||
str
|
||||
@@ -604,59 +707,78 @@ def table(
|
||||
"""
|
||||
...
|
||||
|
||||
def verify(
|
||||
proof_path: str | os.PathLike | pathlib.Path,
|
||||
settings_path: str | os.PathLike | pathlib.Path,
|
||||
vk_path: str | os.PathLike | pathlib.Path,
|
||||
srs_path: typing.Optional[str | os.PathLike | pathlib.Path],
|
||||
reduced_srs: bool,
|
||||
) -> bool:
|
||||
def verify(proof_path:str | os.PathLike | pathlib.Path,settings_path:str | os.PathLike | pathlib.Path,vk_path:str | os.PathLike | pathlib.Path,srs_path:typing.Optional[str | os.PathLike | pathlib.Path],reduced_srs:bool) -> bool:
|
||||
r"""
|
||||
Verifies a given proof
|
||||
|
||||
|
||||
Arguments
|
||||
---------
|
||||
proof_path: str
|
||||
Path to create the proof file
|
||||
|
||||
|
||||
settings_path: str
|
||||
Path to the settings file
|
||||
|
||||
|
||||
vk_path: str
|
||||
Path to the verification key file
|
||||
|
||||
|
||||
srs_path: str
|
||||
Path to the SRS file
|
||||
|
||||
|
||||
non_reduced_srs: bool
|
||||
Whether to reduce the number of SRS logrows to the number of instances rather than the number of logrows used for proofs (only works if the srs were generated in the same ceremony)
|
||||
|
||||
|
||||
Returns
|
||||
-------
|
||||
bool
|
||||
"""
|
||||
...
|
||||
|
||||
def verify_evm(
|
||||
addr_verifier: str,
|
||||
proof_path: str | os.PathLike | pathlib.Path,
|
||||
rpc_url: typing.Optional[str],
|
||||
vka_path: typing.Optional[str],
|
||||
) -> typing.Any:
|
||||
def verify_aggr(proof_path:str | os.PathLike | pathlib.Path,vk_path:str | os.PathLike | pathlib.Path,logrows:int,commitment:PyCommitments,reduced_srs:bool,srs_path:typing.Optional[str | os.PathLike | pathlib.Path]) -> bool:
|
||||
r"""
|
||||
Verifies and aggregate proof
|
||||
|
||||
Arguments
|
||||
---------
|
||||
proof_path: str
|
||||
The path to the proof file
|
||||
|
||||
vk_path: str
|
||||
The path to the verification key file
|
||||
|
||||
logrows: int
|
||||
logrows used for aggregation circuit
|
||||
|
||||
commitment: str
|
||||
Accepts "kzg" or "ipa"
|
||||
|
||||
reduced_srs: bool
|
||||
Whether to reduce the number of SRS logrows to the number of instances rather than the number of logrows used for proofs (only works if the srs were generated in the same ceremony)
|
||||
|
||||
srs_path: str
|
||||
The path to the SRS file
|
||||
|
||||
Returns
|
||||
-------
|
||||
bool
|
||||
"""
|
||||
...
|
||||
|
||||
def verify_evm(addr_verifier:str,proof_path:str | os.PathLike | pathlib.Path,rpc_url:typing.Optional[str],vka_path:typing.Optional[str]) -> typing.Any:
|
||||
r"""
|
||||
verifies an evm compatible proof, you will need solc installed in your environment to run this
|
||||
|
||||
|
||||
Arguments
|
||||
---------
|
||||
addr_verifier: str
|
||||
The verifier contract's address as a hex string
|
||||
|
||||
|
||||
proof_path: str
|
||||
The path to the proof file (generated using the prove command)
|
||||
|
||||
|
||||
rpc_url: str
|
||||
RPC URL for an Ethereum node, if None will use Anvil but WON'T persist state
|
||||
|
||||
|
||||
vka_path: str
|
||||
The path to the VKA calldata bytes file (generated using the create_evm_vka command)
|
||||
Returns
|
||||
@@ -664,3 +786,4 @@ def verify_evm(
|
||||
bool
|
||||
"""
|
||||
...
|
||||
|
||||
|
||||
60
in-browser-evm-verifier/README.md
Normal file
60
in-browser-evm-verifier/README.md
Normal file
@@ -0,0 +1,60 @@
|
||||
# inbrowser-evm-verify
|
||||
|
||||
We would like the Solidity verifier to be canonical and usually all you ever need. For this, we need to be able to run that verifier in browser.
|
||||
|
||||
## How to use (Node js)
|
||||
|
||||
```ts
|
||||
import localEVMVerify from '@ezkljs/verify';
|
||||
|
||||
// Load in the proof file as a buffer
|
||||
const proofFileBuffer = fs.readFileSync(`${path}/${example}/proof.pf`)
|
||||
|
||||
// Stringified EZKL evm verifier bytecode (this is just an example don't use in production)
|
||||
const bytecode = '0x608060405234801561001057600080fd5b5060d38061001f6000396000f3fe608060405234801561001057600080fd5b50600436106100415760003560e01c8063cfae321714610046575b600080fd5b6100496100f1565b60405161005691906100f1565b60405180910390f35b'
|
||||
|
||||
const result = await localEVMVerify(proofFileBuffer, bytecode)
|
||||
|
||||
console.log('result', result)
|
||||
```
|
||||
|
||||
**Note**: Run `ezkl create-evm-verifier` to get the Solidity verifier, with which you can retrieve the bytecode once compiled. We recommend compiling to the Shanghai hardfork target, else you will have to pass an additional parameter specifying the EVM version to the `localEVMVerify` function like so (for Paris hardfork):
|
||||
|
||||
```ts
|
||||
import localEVMVerify, { hardfork } from '@ezkljs/verify';
|
||||
|
||||
const result = await localEVMVerify(proofFileBuffer, bytecode, hardfork['Paris'])
|
||||
```
|
||||
|
||||
**Note**: You can also verify separated vk verifiers using the `localEVMVerify` function. Just pass the vk verifier bytecode as the third parameter like so:
|
||||
```ts
|
||||
import localEVMVerify from '@ezkljs/verify';
|
||||
|
||||
const result = await localEVMVerify(proofFileBuffer, verifierBytecode, VKBytecode)
|
||||
```
|
||||
|
||||
|
||||
## How to use (Browser)
|
||||
|
||||
```ts
|
||||
import localEVMVerify from '@ezkljs/verify';
|
||||
|
||||
// Load in the proof file as a buffer using the web apis (fetch, FileReader, etc)
|
||||
// We use fetch in this example to load the proof file as a buffer
|
||||
const proofFileBuffer = await fetch(`${path}/${example}/proof.pf`).then(res => res.arrayBuffer())
|
||||
|
||||
// Stringified EZKL evm verifier bytecode (this is just an example don't use in production)
|
||||
const bytecode = '0x608060405234801561001057600080fd5b5060d38061001f6000396000f3fe608060405234801561001057600080fd5b50600436106100415760003560e01c8063cfae321714610046575b600080fd5b6100496100f1565b60405161005691906100f1565b60405180910390f35b'
|
||||
|
||||
const result = await browserEVMVerify(proofFileBuffer, bytecode)
|
||||
|
||||
console.log('result', result)
|
||||
```
|
||||
|
||||
Output:
|
||||
|
||||
```ts
|
||||
result: true
|
||||
```
|
||||
|
||||
|
||||
42
in-browser-evm-verifier/package.json
Normal file
42
in-browser-evm-verifier/package.json
Normal file
@@ -0,0 +1,42 @@
|
||||
{
|
||||
"name": "@ezkljs/verify",
|
||||
"version": "v10.4.2",
|
||||
"publishConfig": {
|
||||
"access": "public"
|
||||
},
|
||||
"description": "Evm verify EZKL proofs in the browser.",
|
||||
"main": "dist/commonjs/index.js",
|
||||
"module": "dist/esm/index.js",
|
||||
"types": "dist/commonjs/index.d.ts",
|
||||
"files": [
|
||||
"dist",
|
||||
"LICENSE",
|
||||
"README.md"
|
||||
],
|
||||
"scripts": {
|
||||
"clean": "rm -r dist || true",
|
||||
"build:commonjs": "tsc --project tsconfig.commonjs.json && resolve-tspaths -p tsconfig.commonjs.json",
|
||||
"build:esm": "tsc --project tsconfig.esm.json && resolve-tspaths -p tsconfig.esm.json",
|
||||
"build": "npm run clean && npm run build:commonjs && npm run build:esm"
|
||||
},
|
||||
"dependencies": {
|
||||
"@ethereumjs/common": "4.0.0",
|
||||
"@ethereumjs/evm": "2.0.0",
|
||||
"@ethereumjs/statemanager": "2.0.0",
|
||||
"@ethereumjs/tx": "5.0.0",
|
||||
"@ethereumjs/util": "9.0.0",
|
||||
"@ethereumjs/vm": "7.0.0",
|
||||
"@ethersproject/abi": "5.7.0",
|
||||
"@ezkljs/engine": "10.4.2",
|
||||
"ethers": "6.7.1",
|
||||
"json-bigint": "1.0.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@types/node": "^20.8.3",
|
||||
"ts-loader": "^9.5.0",
|
||||
"ts-node": "^10.9.1",
|
||||
"resolve-tspaths": "^0.8.16",
|
||||
"tsconfig-paths": "^4.2.0",
|
||||
"typescript": "^5.2.2"
|
||||
}
|
||||
}
|
||||
1479
in-browser-evm-verifier/pnpm-lock.yaml
generated
Normal file
1479
in-browser-evm-verifier/pnpm-lock.yaml
generated
Normal file
File diff suppressed because it is too large
Load Diff
144
in-browser-evm-verifier/src/index.ts
Normal file
144
in-browser-evm-verifier/src/index.ts
Normal file
@@ -0,0 +1,144 @@
|
||||
import { defaultAbiCoder as AbiCoder } from '@ethersproject/abi'
|
||||
import { Address, hexToBytes } from '@ethereumjs/util'
|
||||
import { Chain, Common, Hardfork } from '@ethereumjs/common'
|
||||
import { LegacyTransaction, LegacyTxData } from '@ethereumjs/tx'
|
||||
// import { DefaultStateManager } from '@ethereumjs/statemanager'
|
||||
// import { Blockchain } from '@ethereumjs/blockchain'
|
||||
import { VM } from '@ethereumjs/vm'
|
||||
import { EVM } from '@ethereumjs/evm'
|
||||
import { buildTransaction, encodeDeployment } from './utils/tx-builder'
|
||||
import { getAccountNonce, insertAccount } from './utils/account-utils'
|
||||
import { encodeVerifierCalldata } from '../nodejs/ezkl';
|
||||
|
||||
async function deployContract(
|
||||
vm: VM,
|
||||
common: Common,
|
||||
senderPrivateKey: Uint8Array,
|
||||
deploymentBytecode: string
|
||||
): Promise<Address> {
|
||||
// Contracts are deployed by sending their deployment bytecode to the address 0
|
||||
// The contract params should be abi-encoded and appended to the deployment bytecode.
|
||||
// const data =
|
||||
const data = encodeDeployment(deploymentBytecode)
|
||||
const txData = {
|
||||
data,
|
||||
nonce: await getAccountNonce(vm, senderPrivateKey),
|
||||
}
|
||||
|
||||
const tx = LegacyTransaction.fromTxData(
|
||||
buildTransaction(txData) as LegacyTxData,
|
||||
{ common, allowUnlimitedInitCodeSize: true },
|
||||
).sign(senderPrivateKey)
|
||||
|
||||
const deploymentResult = await vm.runTx({
|
||||
tx,
|
||||
skipBlockGasLimitValidation: true,
|
||||
skipNonce: true
|
||||
})
|
||||
|
||||
if (deploymentResult.execResult.exceptionError) {
|
||||
throw deploymentResult.execResult.exceptionError
|
||||
}
|
||||
|
||||
return deploymentResult.createdAddress!
|
||||
}
|
||||
|
||||
async function verify(
|
||||
vm: VM,
|
||||
contractAddress: Address,
|
||||
caller: Address,
|
||||
proof: Uint8Array | Uint8ClampedArray,
|
||||
vkAddress?: Address | Uint8Array,
|
||||
): Promise<boolean> {
|
||||
if (proof instanceof Uint8Array) {
|
||||
proof = new Uint8ClampedArray(proof.buffer)
|
||||
}
|
||||
if (vkAddress) {
|
||||
const vkAddressBytes = hexToBytes(vkAddress.toString())
|
||||
const vkAddressArray = Array.from(vkAddressBytes)
|
||||
|
||||
let string = JSON.stringify(vkAddressArray)
|
||||
|
||||
const uint8Array = new TextEncoder().encode(string);
|
||||
|
||||
// Step 3: Convert to Uint8ClampedArray
|
||||
vkAddress = new Uint8Array(uint8Array.buffer);
|
||||
|
||||
// convert uitn8array of length
|
||||
console.error('vkAddress', vkAddress)
|
||||
}
|
||||
const data = encodeVerifierCalldata(proof, vkAddress)
|
||||
|
||||
const verifyResult = await vm.evm.runCall({
|
||||
to: contractAddress,
|
||||
caller: caller,
|
||||
origin: caller, // The tx.origin is also the caller here
|
||||
data: data,
|
||||
})
|
||||
|
||||
if (verifyResult.execResult.exceptionError) {
|
||||
throw verifyResult.execResult.exceptionError
|
||||
}
|
||||
|
||||
const results = AbiCoder.decode(['bool'], verifyResult.execResult.returnValue)
|
||||
|
||||
return results[0]
|
||||
}
|
||||
|
||||
/**
|
||||
* Spins up an ephemeral EVM instance for executing the bytecode of a solidity verifier
|
||||
* @param proof Json serialized proof file
|
||||
* @param bytecode The bytecode of a compiled solidity verifier.
|
||||
* @param bytecode_vk The bytecode of a contract that stores the vk. (Optional, only required if the vk is stored in a separate contract)
|
||||
* @param evmVersion The evm version to use for the verification. (Default: London)
|
||||
* @returns The result of the evm verification.
|
||||
* @throws If the verify transaction reverts
|
||||
*/
|
||||
export default async function localEVMVerify(
|
||||
proof: Uint8Array | Uint8ClampedArray,
|
||||
bytecode_verifier: string,
|
||||
bytecode_vk?: string,
|
||||
evmVersion?: Hardfork,
|
||||
): Promise<boolean> {
|
||||
try {
|
||||
const hardfork = evmVersion ? evmVersion : Hardfork['Shanghai']
|
||||
const common = new Common({ chain: Chain.Mainnet, hardfork })
|
||||
const accountPk = hexToBytes(
|
||||
'0xe331b6d69882b4cb4ea581d88e0b604039a3de5967688d3dcffdd2270c0fd109', // anvil deterministic Pk
|
||||
)
|
||||
|
||||
const evm = new EVM({
|
||||
allowUnlimitedContractSize: true,
|
||||
allowUnlimitedInitCodeSize: true,
|
||||
})
|
||||
|
||||
const vm = await VM.create({ common, evm })
|
||||
const accountAddress = Address.fromPrivateKey(accountPk)
|
||||
|
||||
await insertAccount(vm, accountAddress)
|
||||
|
||||
const verifierAddress = await deployContract(
|
||||
vm,
|
||||
common,
|
||||
accountPk,
|
||||
bytecode_verifier
|
||||
)
|
||||
|
||||
if (bytecode_vk) {
|
||||
const accountPk = hexToBytes("0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80"); // anvil deterministic Pk
|
||||
const accountAddress = Address.fromPrivateKey(accountPk)
|
||||
await insertAccount(vm, accountAddress)
|
||||
const output = await deployContract(vm, common, accountPk, bytecode_vk)
|
||||
const result = await verify(vm, verifierAddress, accountAddress, proof, output)
|
||||
return true
|
||||
}
|
||||
|
||||
const result = await verify(vm, verifierAddress, accountAddress, proof)
|
||||
|
||||
return result
|
||||
} catch (error) {
|
||||
// log or re-throw the error, depending on your needs
|
||||
console.error('An error occurred:', error)
|
||||
throw error
|
||||
}
|
||||
}
|
||||
32
in-browser-evm-verifier/src/utils/account-utils.ts
Normal file
32
in-browser-evm-verifier/src/utils/account-utils.ts
Normal file
@@ -0,0 +1,32 @@
|
||||
import { VM } from '@ethereumjs/vm'
|
||||
import { Account, Address } from '@ethereumjs/util'
|
||||
|
||||
export const keyPair = {
|
||||
secretKey:
|
||||
'0x3cd7232cd6f3fc66a57a6bedc1a8ed6c228fff0a327e169c2bcc5e869ed49511',
|
||||
publicKey:
|
||||
'0x0406cc661590d48ee972944b35ad13ff03c7876eae3fd191e8a2f77311b0a3c6613407b5005e63d7d8d76b89d5f900cde691497688bb281e07a5052ff61edebdc0',
|
||||
}
|
||||
|
||||
export const insertAccount = async (vm: VM, address: Address) => {
|
||||
const acctData = {
|
||||
nonce: 0,
|
||||
balance: BigInt('1000000000000000000'), // 1 eth
|
||||
}
|
||||
const account = Account.fromAccountData(acctData)
|
||||
|
||||
await vm.stateManager.putAccount(address, account)
|
||||
}
|
||||
|
||||
export const getAccountNonce = async (
|
||||
vm: VM,
|
||||
accountPrivateKey: Uint8Array,
|
||||
) => {
|
||||
const address = Address.fromPrivateKey(accountPrivateKey)
|
||||
const account = await vm.stateManager.getAccount(address)
|
||||
if (account) {
|
||||
return account.nonce
|
||||
} else {
|
||||
return BigInt(0)
|
||||
}
|
||||
}
|
||||
59
in-browser-evm-verifier/src/utils/tx-builder.ts
Normal file
59
in-browser-evm-verifier/src/utils/tx-builder.ts
Normal file
@@ -0,0 +1,59 @@
|
||||
import { Interface, defaultAbiCoder as AbiCoder } from '@ethersproject/abi'
|
||||
import {
|
||||
AccessListEIP2930TxData,
|
||||
FeeMarketEIP1559TxData,
|
||||
TxData,
|
||||
} from '@ethereumjs/tx'
|
||||
|
||||
type TransactionsData =
|
||||
| TxData
|
||||
| AccessListEIP2930TxData
|
||||
| FeeMarketEIP1559TxData
|
||||
|
||||
export const encodeFunction = (
|
||||
method: string,
|
||||
params?: {
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
types: any[]
|
||||
values: unknown[]
|
||||
},
|
||||
): string => {
|
||||
const parameters = params?.types ?? []
|
||||
const methodWithParameters = `function ${method}(${parameters.join(',')})`
|
||||
const signatureHash = new Interface([methodWithParameters]).getSighash(method)
|
||||
const encodedArgs = AbiCoder.encode(parameters, params?.values ?? [])
|
||||
|
||||
return signatureHash + encodedArgs.slice(2)
|
||||
}
|
||||
|
||||
export const encodeDeployment = (
|
||||
bytecode: string,
|
||||
params?: {
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
types: any[]
|
||||
values: unknown[]
|
||||
},
|
||||
) => {
|
||||
const deploymentData = '0x' + bytecode
|
||||
if (params) {
|
||||
const argumentsEncoded = AbiCoder.encode(params.types, params.values)
|
||||
return deploymentData + argumentsEncoded.slice(2)
|
||||
}
|
||||
return deploymentData
|
||||
}
|
||||
|
||||
export const buildTransaction = (
|
||||
data: Partial<TransactionsData>,
|
||||
): TransactionsData => {
|
||||
const defaultData: Partial<TransactionsData> = {
|
||||
gasLimit: 3_000_000_000_000_000,
|
||||
gasPrice: 7,
|
||||
value: 0,
|
||||
data: '0x',
|
||||
}
|
||||
|
||||
return {
|
||||
...defaultData,
|
||||
...data,
|
||||
}
|
||||
}
|
||||
7
in-browser-evm-verifier/tsconfig.commonjs.json
Normal file
7
in-browser-evm-verifier/tsconfig.commonjs.json
Normal file
@@ -0,0 +1,7 @@
|
||||
{
|
||||
"extends": "./tsconfig.json",
|
||||
"compilerOptions": {
|
||||
"module": "CommonJS",
|
||||
"outDir": "./dist/commonjs"
|
||||
}
|
||||
}
|
||||
7
in-browser-evm-verifier/tsconfig.esm.json
Normal file
7
in-browser-evm-verifier/tsconfig.esm.json
Normal file
@@ -0,0 +1,7 @@
|
||||
{
|
||||
"extends": "./tsconfig.json",
|
||||
"compilerOptions": {
|
||||
"module": "ES2020",
|
||||
"outDir": "./dist/esm"
|
||||
}
|
||||
}
|
||||
62
in-browser-evm-verifier/tsconfig.json
Normal file
62
in-browser-evm-verifier/tsconfig.json
Normal file
@@ -0,0 +1,62 @@
|
||||
{
|
||||
"compilerOptions": {
|
||||
"rootDir": "src",
|
||||
"target": "es2017",
|
||||
"outDir": "dist",
|
||||
"declaration": true,
|
||||
"lib": [
|
||||
"dom",
|
||||
"dom.iterable",
|
||||
"esnext"
|
||||
],
|
||||
"allowJs": true,
|
||||
"checkJs": true,
|
||||
"skipLibCheck": true,
|
||||
"strict": true,
|
||||
"forceConsistentCasingInFileNames": true,
|
||||
"noEmit": false,
|
||||
"esModuleInterop": true,
|
||||
"module": "CommonJS",
|
||||
"moduleResolution": "node",
|
||||
"resolveJsonModule": true,
|
||||
"isolatedModules": true,
|
||||
"jsx": "preserve",
|
||||
// "incremental": true,
|
||||
"noUncheckedIndexedAccess": true,
|
||||
"baseUrl": ".",
|
||||
"paths": {
|
||||
"@/*": [
|
||||
"./src/*"
|
||||
]
|
||||
}
|
||||
},
|
||||
"include": [
|
||||
"src/**/*.ts",
|
||||
"src/**/*.tsx",
|
||||
"src/**/*.cjs",
|
||||
"src/**/*.mjs"
|
||||
],
|
||||
"exclude": [
|
||||
"node_modules"
|
||||
],
|
||||
// NEW: Options for file/directory watching
|
||||
"watchOptions": {
|
||||
// Use native file system events for files and directories
|
||||
"watchFile": "useFsEvents",
|
||||
"watchDirectory": "useFsEvents",
|
||||
// Poll files for updates more frequently
|
||||
// when they're updated a lot.
|
||||
"fallbackPolling": "dynamicPriority",
|
||||
// Don't coalesce watch notification
|
||||
"synchronousWatchDirectory": true,
|
||||
// Finally, two additional settings for reducing the amount of possible
|
||||
// files to track work from these directories
|
||||
"excludeDirectories": [
|
||||
"**/node_modules",
|
||||
"_build"
|
||||
],
|
||||
"excludeFiles": [
|
||||
"build/fileWhichChangesOften.ts"
|
||||
]
|
||||
}
|
||||
}
|
||||
4
jest.config.js
Normal file
4
jest.config.js
Normal file
@@ -0,0 +1,4 @@
|
||||
module.exports = {
|
||||
preset: 'ts-jest',
|
||||
testEnvironment: 'node',
|
||||
};
|
||||
30
package.json
Normal file
30
package.json
Normal file
@@ -0,0 +1,30 @@
|
||||
{
|
||||
"name": "ezkljs-tests",
|
||||
"version": "0.1.0",
|
||||
"author": "Ethan Cemer",
|
||||
"private": true,
|
||||
"scripts": {
|
||||
"test": "jest"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@ezkljs/engine": "^9.4.4",
|
||||
"@ezkljs/verify": "^0.0.6",
|
||||
"@jest/types": "^29.6.3",
|
||||
"@types/file-saver": "^2.0.5",
|
||||
"@types/jest": "^29.5.3",
|
||||
"@types/json-bigint": "^1.0.1",
|
||||
"@types/node": "20.4.5",
|
||||
"buffer": "^6.0.3",
|
||||
"env": "^0.0.2",
|
||||
"fs": "0.0.1-security",
|
||||
"jest": "^29.6.3",
|
||||
"json-bigint": "^1.0.0",
|
||||
"minimist": "^1.2.8",
|
||||
"solc": "^0.8.21",
|
||||
"ts-jest": "^29.1.1",
|
||||
"ts-loader": "^9.4.4",
|
||||
"ts-node": "^10.9.1",
|
||||
"tsconfig-paths": "^4.2.0",
|
||||
"typescript": "5.1.6"
|
||||
}
|
||||
}
|
||||
3596
pnpm-lock.yaml
generated
Normal file
3596
pnpm-lock.yaml
generated
Normal file
File diff suppressed because it is too large
Load Diff
@@ -9,6 +9,6 @@ pytest==8.1.1
|
||||
tomli==2.0.1
|
||||
typing-extensions==4.10.0
|
||||
zipp==3.18.1
|
||||
onnx==1.17.0
|
||||
onnx==1.15.0
|
||||
onnxruntime==1.17.1
|
||||
numpy==1.26.4
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
[toolchain]
|
||||
channel = "nightly-2025-05-01"
|
||||
channel = "nightly-2025-02-17"
|
||||
components = ["rustfmt", "clippy"]
|
||||
|
||||
229
setup-gpu.sh
229
setup-gpu.sh
@@ -1,229 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -e
|
||||
|
||||
# Colors for output
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
NC='\033[0m' # No Color
|
||||
|
||||
# Default installation directory
|
||||
DEFAULT_INSTALL_DIR="/opt/icicle/lib/backend/halo2"
|
||||
|
||||
# Halo2 repository details
|
||||
HALO2_REPO="https://github.com/zkonduit/halo2"
|
||||
HALO2_BRANCH="ac/conditional-compilation-icicle2"
|
||||
|
||||
# Parse command line arguments
|
||||
AUTO_YES=false
|
||||
for arg in "$@"; do
|
||||
case $arg in
|
||||
-y|--yes)
|
||||
AUTO_YES=true
|
||||
shift
|
||||
;;
|
||||
-h|--help)
|
||||
echo "Usage: $0 [OPTIONS]"
|
||||
echo "Options:"
|
||||
echo " -y, --yes Automatically answer 'yes' to all prompts"
|
||||
echo " -h, --help Show this help message"
|
||||
exit 0
|
||||
;;
|
||||
*)
|
||||
echo "Unknown option: $arg"
|
||||
echo "Use -h or --help for usage information"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
echo -e "${GREEN}EZKL GPU Setup Script${NC}"
|
||||
echo -e "${GREEN}=====================${NC}"
|
||||
echo ""
|
||||
|
||||
# Parse commit hash from Cargo.lock
|
||||
echo "Parsing halo2 commit hash from Cargo.lock..."
|
||||
if [ ! -f "Cargo.lock" ]; then
|
||||
echo -e "${RED}Error: Cargo.lock not found. Please run this script from the project root.${NC}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
HALO2_COMMIT=$(grep "github\.com/zkonduit/halo2?" Cargo.lock | grep -v "halo2wrong" | head -1 | grep -o "#[a-f0-9]\{40\}" | cut -c2-)
|
||||
|
||||
if [ -z "$HALO2_COMMIT" ]; then
|
||||
echo -e "${RED}Error: Could not parse halo2 commit hash from Cargo.lock${NC}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo -e "${GREEN}Found halo2 commit: $HALO2_COMMIT${NC}"
|
||||
echo ""
|
||||
echo "This script will:"
|
||||
echo "1. Sparse checkout the halo2 repository at commit $HALO2_COMMIT"
|
||||
echo "2. Extract only the icicle/backend/cuda/ directory"
|
||||
echo "3. Set the ICICLE_BACKEND_INSTALL_DIR environment variable"
|
||||
echo ""
|
||||
|
||||
# Check if user wants to override the default directory
|
||||
if [ "$AUTO_YES" = true ]; then
|
||||
INSTALL_DIR="$DEFAULT_INSTALL_DIR"
|
||||
echo -e "${GREEN}Using default installation directory: ${INSTALL_DIR}${NC}"
|
||||
else
|
||||
echo -e "${YELLOW}Default installation directory: ${DEFAULT_INSTALL_DIR}${NC}"
|
||||
read -p "Do you want to use a different directory? [y/N]: " -n 1 -r
|
||||
echo
|
||||
if [[ $REPLY =~ ^[Yy]$ ]]; then
|
||||
read -p "Enter the installation directory: " INSTALL_DIR
|
||||
INSTALL_DIR="${INSTALL_DIR/#\~/$HOME}" # Expand ~ to $HOME
|
||||
else
|
||||
INSTALL_DIR="$DEFAULT_INSTALL_DIR"
|
||||
fi
|
||||
|
||||
# Confirm the installation directory
|
||||
echo ""
|
||||
echo -e "${YELLOW}Installation directory: ${INSTALL_DIR}${NC}"
|
||||
read -p "Continue with this directory? [y/N]: " -n 1 -r
|
||||
echo
|
||||
if [[ ! $REPLY =~ ^[Yy]$ ]]; then
|
||||
echo -e "${RED}Setup cancelled by user.${NC}"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
# Check if ICICLE_BACKEND_INSTALL_DIR is already set
|
||||
if [ ! -z "$ICICLE_BACKEND_INSTALL_DIR" ] && [ "$AUTO_YES" = false ]; then
|
||||
echo ""
|
||||
echo -e "${YELLOW}Warning: ICICLE_BACKEND_INSTALL_DIR is already set to: $ICICLE_BACKEND_INSTALL_DIR${NC}"
|
||||
read -p "Do you want to override it? [y/N]: " -n 1 -r
|
||||
echo
|
||||
if [[ ! $REPLY =~ ^[Yy]$ ]]; then
|
||||
echo -e "${RED}Setup cancelled by user.${NC}"
|
||||
exit 1
|
||||
fi
|
||||
elif [ ! -z "$ICICLE_BACKEND_INSTALL_DIR" ] && [ "$AUTO_YES" = true ]; then
|
||||
echo -e "${GREEN}Overriding existing ICICLE_BACKEND_INSTALL_DIR (was: $ICICLE_BACKEND_INSTALL_DIR)${NC}"
|
||||
fi
|
||||
|
||||
echo ""
|
||||
echo -e "${GREEN}Starting GPU setup...${NC}"
|
||||
|
||||
# Create installation directory
|
||||
echo "Creating installation directory..."
|
||||
mkdir -p "$INSTALL_DIR"
|
||||
|
||||
# Create temporary directory for sparse checkout
|
||||
TEMP_DIR=$(mktemp -d)
|
||||
echo "Using temporary directory: $TEMP_DIR"
|
||||
|
||||
# Clone with sparse checkout
|
||||
echo "Cloning halo2 repository with sparse checkout..."
|
||||
cd "$TEMP_DIR"
|
||||
git clone --filter=blob:none --sparse "$HALO2_REPO" halo2
|
||||
cd halo2
|
||||
|
||||
# Checkout the specific branch and commit
|
||||
echo "Checking out branch $HALO2_BRANCH at commit $HALO2_COMMIT..."
|
||||
git checkout "$HALO2_BRANCH"
|
||||
git checkout "$HALO2_COMMIT"
|
||||
|
||||
# Configure sparse checkout
|
||||
echo "Configuring sparse checkout for icicle/backend/cuda/..."
|
||||
git sparse-checkout init --cone
|
||||
git sparse-checkout set icicle/backend/cuda/
|
||||
|
||||
# Copy the icicle directory to the installation location
|
||||
if [ -d "icicle/backend/cuda" ]; then
|
||||
echo "Copying icicle/backend/cuda/ to $INSTALL_DIR..."
|
||||
cp -r icicle/backend/cuda/* "$INSTALL_DIR/"
|
||||
echo -e "${GREEN}Files copied successfully!${NC}"
|
||||
else
|
||||
echo -e "${RED}Error: icicle/backend/cuda directory not found in the repository${NC}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Clean up temporary directory
|
||||
echo "Cleaning up temporary files..."
|
||||
rm -rf "$TEMP_DIR"
|
||||
|
||||
# Ask user about setting environment variable permanently
|
||||
SETUP_PERMANENT_ENV=false
|
||||
if [ "$AUTO_YES" = true ]; then
|
||||
SETUP_PERMANENT_ENV=true
|
||||
echo ""
|
||||
echo -e "${GREEN}Setting ICICLE_BACKEND_INSTALL_DIR environment variable permanently...${NC}"
|
||||
else
|
||||
echo ""
|
||||
echo -e "${YELLOW}Do you want to set ICICLE_BACKEND_INSTALL_DIR environment variable permanently?${NC}"
|
||||
echo "This will add 'export ICICLE_BACKEND_INSTALL_DIR=\"$INSTALL_DIR\"' to your shell configuration file."
|
||||
read -p "Set environment variable permanently? [y/N]: " -n 1 -r
|
||||
echo
|
||||
if [[ $REPLY =~ ^[Yy]$ ]]; then
|
||||
SETUP_PERMANENT_ENV=true
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ "$SETUP_PERMANENT_ENV" = true ]; then
|
||||
echo "Setting ICICLE_BACKEND_INSTALL_DIR environment variable..."
|
||||
|
||||
# Detect shell and set environment variable accordingly
|
||||
if [ -n "$ZSH_VERSION" ]; then
|
||||
SHELL_RC="$HOME/.zshrc"
|
||||
elif [ -n "$BASH_VERSION" ]; then
|
||||
SHELL_RC="$HOME/.bashrc"
|
||||
else
|
||||
# Try to detect based on $SHELL
|
||||
case "$SHELL" in
|
||||
*/zsh)
|
||||
SHELL_RC="$HOME/.zshrc"
|
||||
;;
|
||||
*/bash)
|
||||
SHELL_RC="$HOME/.bashrc"
|
||||
;;
|
||||
*)
|
||||
SHELL_RC="$HOME/.profile"
|
||||
;;
|
||||
esac
|
||||
fi
|
||||
|
||||
# Add environment variable to shell configuration
|
||||
ENV_EXPORT="export ICICLE_BACKEND_INSTALL_DIR=\"$INSTALL_DIR\""
|
||||
|
||||
# Check if the variable is already set in the file
|
||||
if [ -f "$SHELL_RC" ] && grep -q "ICICLE_BACKEND_INSTALL_DIR" "$SHELL_RC"; then
|
||||
# Replace existing line
|
||||
if [[ "$OSTYPE" == "darwin"* ]]; then
|
||||
# macOS
|
||||
sed -i '' "s|export ICICLE_BACKEND_INSTALL_DIR=.*|$ENV_EXPORT|" "$SHELL_RC"
|
||||
else
|
||||
# Linux
|
||||
sed -i "s|export ICICLE_BACKEND_INSTALL_DIR=.*|$ENV_EXPORT|" "$SHELL_RC"
|
||||
fi
|
||||
echo "Updated existing ICICLE_BACKEND_INSTALL_DIR in $SHELL_RC"
|
||||
else
|
||||
# Add new line
|
||||
echo "$ENV_EXPORT" >> "$SHELL_RC"
|
||||
echo "Added ICICLE_BACKEND_INSTALL_DIR to $SHELL_RC"
|
||||
fi
|
||||
|
||||
echo -e "${GREEN}Environment variable set permanently.${NC}"
|
||||
else
|
||||
echo "Skipping permanent environment variable setup."
|
||||
fi
|
||||
|
||||
# Export for current session regardless
|
||||
export ICICLE_BACKEND_INSTALL_DIR="$INSTALL_DIR"
|
||||
echo "Environment variable set for current session."
|
||||
|
||||
echo ""
|
||||
echo -e "${GREEN}GPU setup completed successfully!${NC}"
|
||||
echo ""
|
||||
echo -e "${YELLOW}Important:${NC}"
|
||||
echo "1. The ICICLE_BACKEND_INSTALL_DIR environment variable has been set to: $INSTALL_DIR"
|
||||
if [ "$SETUP_PERMANENT_ENV" = true ]; then
|
||||
echo "2. Please restart your terminal or run: source $SHELL_RC"
|
||||
else
|
||||
echo "2. To use GPU features, set: export ICICLE_BACKEND_INSTALL_DIR=\"$INSTALL_DIR\""
|
||||
fi
|
||||
echo "3. You can now build with GPU support using: cargo build --features gpu-accelerated"
|
||||
echo ""
|
||||
echo -e "${GREEN}Setup complete!${NC}"
|
||||
@@ -1,5 +1,12 @@
|
||||
// ignore file if compiling for wasm
|
||||
|
||||
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
|
||||
use mimalloc::MiMalloc;
|
||||
|
||||
#[global_allocator]
|
||||
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
|
||||
static GLOBAL: MiMalloc = MiMalloc;
|
||||
|
||||
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
|
||||
use clap::{CommandFactory, Parser};
|
||||
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
|
||||
@@ -15,6 +22,9 @@ use log::{error, info};
|
||||
#[cfg(not(any(target_arch = "wasm32", feature = "no-banner")))]
|
||||
use rand::prelude::SliceRandom;
|
||||
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
|
||||
#[cfg(feature = "icicle")]
|
||||
use std::env;
|
||||
|
||||
#[tokio::main(flavor = "current_thread")]
|
||||
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
|
||||
pub async fn main() {
|
||||
@@ -28,7 +38,12 @@ pub async fn main() {
|
||||
init_logger();
|
||||
#[cfg(not(any(target_arch = "wasm32", feature = "no-banner")))]
|
||||
banner();
|
||||
|
||||
#[cfg(feature = "icicle")]
|
||||
if env::var("ENABLE_ICICLE_GPU").is_ok() {
|
||||
info!("Running with ICICLE GPU");
|
||||
} else {
|
||||
info!("Running with CPU");
|
||||
}
|
||||
debug!(
|
||||
"command: \n {}",
|
||||
&command.as_json().to_colored_json_auto().unwrap()
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user