mirror of
https://github.com/zkonduit/ezkl.git
synced 2026-01-13 08:17:57 -05:00
Compare commits
51 Commits
ac/fix-eag
...
v22.0.0
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
0f6199d546 | ||
|
|
bf9cf14ab7 | ||
|
|
6818962ac2 | ||
|
|
70469e3bf9 | ||
|
|
52ff187e55 | ||
|
|
4e57a5a486 | ||
|
|
fe978caa85 | ||
|
|
1bef92407c | ||
|
|
5ff1c48ede | ||
|
|
ab4997d0c2 | ||
|
|
701e69dd2f | ||
|
|
f631445e26 | ||
|
|
fcbb27677f | ||
|
|
bc26691bd5 | ||
|
|
73c813a81d | ||
|
|
ae076aef09 | ||
|
|
a7544f4060 | ||
|
|
c19fa5218a | ||
|
|
eb205d0c73 | ||
|
|
db498f8d7c | ||
|
|
a363c91160 | ||
|
|
f7f04415fa | ||
|
|
de8d419e5d | ||
|
|
a38d318923 | ||
|
|
864990fe2d | ||
|
|
29c3e4f977 | ||
|
|
0689115828 | ||
|
|
99f741304a | ||
|
|
20ac99fdbf | ||
|
|
532fa65e93 | ||
|
|
cfe5db545c | ||
|
|
21ad56aea1 | ||
|
|
4ed7e0fd29 | ||
|
|
05d1f10615 | ||
|
|
9a8c754e45 | ||
|
|
d82766d413 | ||
|
|
820a80122b | ||
|
|
9c64e42bd3 | ||
|
|
27b5e5dde3 | ||
|
|
83c4afce3b | ||
|
|
50740a22df | ||
|
|
a2624f6303 | ||
|
|
fc5be4f949 | ||
|
|
d0ba505baa | ||
|
|
f35688917d | ||
|
|
7ae541ed35 | ||
|
|
675628cd08 | ||
|
|
4fe7290240 | ||
|
|
3e027db9b6 | ||
|
|
e566acc22a | ||
|
|
75ea99e81d |
88
.github/workflows/benchmarks.yml
vendored
88
.github/workflows/benchmarks.yml
vendored
@@ -8,10 +8,14 @@ on:
|
||||
jobs:
|
||||
|
||||
bench_poseidon:
|
||||
permissions:
|
||||
contents: read
|
||||
runs-on: self-hosted
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions-rs/toolchain@v1
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 #v4.2.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: actions-rs/toolchain@b2417cde72dcf67f306c0ae8e0828a81bf0b189f #v1.0.6
|
||||
with:
|
||||
toolchain: nightly-2023-06-27
|
||||
override: true
|
||||
@@ -20,11 +24,15 @@ jobs:
|
||||
run: cargo bench --verbose --bench poseidon
|
||||
|
||||
bench_einsum_accum_matmul:
|
||||
permissions:
|
||||
contents: read
|
||||
runs-on: self-hosted
|
||||
needs: [bench_poseidon]
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions-rs/toolchain@v1
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 #v4.2.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: actions-rs/toolchain@b2417cde72dcf67f306c0ae8e0828a81bf0b189f #v1.0.6
|
||||
with:
|
||||
toolchain: nightly-2023-06-27
|
||||
override: true
|
||||
@@ -33,11 +41,15 @@ jobs:
|
||||
run: cargo bench --verbose --bench accum_einsum_matmul
|
||||
|
||||
bench_accum_matmul_relu:
|
||||
permissions:
|
||||
contents: read
|
||||
runs-on: self-hosted
|
||||
needs: [bench_poseidon]
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions-rs/toolchain@v1
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 #v4.2.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: actions-rs/toolchain@b2417cde72dcf67f306c0ae8e0828a81bf0b189f #v1.0.6
|
||||
with:
|
||||
toolchain: nightly-2023-06-27
|
||||
override: true
|
||||
@@ -46,11 +58,15 @@ jobs:
|
||||
run: cargo bench --verbose --bench accum_matmul_relu
|
||||
|
||||
bench_accum_matmul_relu_overflow:
|
||||
permissions:
|
||||
contents: read
|
||||
runs-on: self-hosted
|
||||
needs: [bench_poseidon]
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions-rs/toolchain@v1
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 #v4.2.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: actions-rs/toolchain@b2417cde72dcf67f306c0ae8e0828a81bf0b189f #v1.0.6
|
||||
with:
|
||||
toolchain: nightly-2023-06-27
|
||||
override: true
|
||||
@@ -59,11 +75,15 @@ jobs:
|
||||
run: cargo bench --verbose --bench accum_matmul_relu_overflow
|
||||
|
||||
bench_relu:
|
||||
permissions:
|
||||
contents: read
|
||||
runs-on: self-hosted
|
||||
needs: [bench_poseidon]
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions-rs/toolchain@v1
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 #v4.2.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: actions-rs/toolchain@b2417cde72dcf67f306c0ae8e0828a81bf0b189f #v1.0.6
|
||||
with:
|
||||
toolchain: nightly-2023-06-27
|
||||
override: true
|
||||
@@ -72,11 +92,15 @@ jobs:
|
||||
run: cargo bench --verbose --bench relu
|
||||
|
||||
bench_accum_dot:
|
||||
permissions:
|
||||
contents: read
|
||||
runs-on: self-hosted
|
||||
needs: [bench_poseidon]
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions-rs/toolchain@v1
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 #v4.2.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: actions-rs/toolchain@b2417cde72dcf67f306c0ae8e0828a81bf0b189f #v1.0.6
|
||||
with:
|
||||
toolchain: nightly-2023-06-27
|
||||
override: true
|
||||
@@ -85,11 +109,15 @@ jobs:
|
||||
run: cargo bench --verbose --bench accum_dot
|
||||
|
||||
bench_accum_conv:
|
||||
permissions:
|
||||
contents: read
|
||||
runs-on: self-hosted
|
||||
needs: [bench_poseidon]
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions-rs/toolchain@v1
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 #v4.2.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: actions-rs/toolchain@b2417cde72dcf67f306c0ae8e0828a81bf0b189f #v1.0.6
|
||||
with:
|
||||
toolchain: nightly-2023-06-27
|
||||
override: true
|
||||
@@ -98,11 +126,15 @@ jobs:
|
||||
run: cargo bench --verbose --bench accum_conv
|
||||
|
||||
bench_accum_sumpool:
|
||||
permissions:
|
||||
contents: read
|
||||
runs-on: self-hosted
|
||||
needs: [bench_poseidon]
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions-rs/toolchain@v1
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 #v4.2.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: actions-rs/toolchain@b2417cde72dcf67f306c0ae8e0828a81bf0b189f #v1.0.6
|
||||
with:
|
||||
toolchain: nightly-2023-06-27
|
||||
override: true
|
||||
@@ -111,11 +143,15 @@ jobs:
|
||||
run: cargo bench --verbose --bench accum_sumpool
|
||||
|
||||
bench_pairwise_add:
|
||||
permissions:
|
||||
contents: read
|
||||
runs-on: self-hosted
|
||||
needs: [bench_poseidon]
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions-rs/toolchain@v1
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 #v4.2.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: actions-rs/toolchain@b2417cde72dcf67f306c0ae8e0828a81bf0b189f #v1.0.6
|
||||
with:
|
||||
toolchain: nightly-2023-06-27
|
||||
override: true
|
||||
@@ -124,11 +160,15 @@ jobs:
|
||||
run: cargo bench --verbose --bench pairwise_add
|
||||
|
||||
bench_accum_sum:
|
||||
permissions:
|
||||
contents: read
|
||||
runs-on: self-hosted
|
||||
needs: [bench_poseidon]
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions-rs/toolchain@v1
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 #v4.2.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: actions-rs/toolchain@b2417cde72dcf67f306c0ae8e0828a81bf0b189f #v1.0.6
|
||||
with:
|
||||
toolchain: nightly-2023-06-27
|
||||
override: true
|
||||
@@ -137,11 +177,15 @@ jobs:
|
||||
run: cargo bench --verbose --bench accum_sum
|
||||
|
||||
bench_pairwise_pow:
|
||||
permissions:
|
||||
contents: read
|
||||
runs-on: self-hosted
|
||||
needs: [bench_poseidon]
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions-rs/toolchain@v1
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 #v4.2.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: actions-rs/toolchain@b2417cde72dcf67f306c0ae8e0828a81bf0b189f #v1.0.6
|
||||
with:
|
||||
toolchain: nightly-2023-06-27
|
||||
override: true
|
||||
|
||||
106
.github/workflows/engine.yml
vendored
106
.github/workflows/engine.yml
vendored
@@ -15,17 +15,24 @@ defaults:
|
||||
working-directory: .
|
||||
jobs:
|
||||
publish-wasm-bindings:
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
name: publish-wasm-bindings
|
||||
env:
|
||||
RELEASE_TAG: ${{ github.ref_name }}
|
||||
runs-on: ubuntu-latest
|
||||
if: startsWith(github.ref, 'refs/tags/')
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions-rs/toolchain@v1
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 #v4.2.2
|
||||
with:
|
||||
toolchain: nightly-2024-07-18
|
||||
persist-credentials: false
|
||||
- uses: actions-rs/toolchain@b2417cde72dcf67f306c0ae8e0828a81bf0b189f #v1.0.6
|
||||
with:
|
||||
toolchain: nightly-2025-02-17
|
||||
override: true
|
||||
components: rustfmt, clippy
|
||||
- uses: jetli/wasm-pack-action@v0.4.0
|
||||
- uses: jetli/wasm-pack-action@0d096b08b4e5a7de8c28de67e11e945404e9eefa #v0.4.0
|
||||
with:
|
||||
# Pin to version 0.12.1
|
||||
version: 'v0.12.1'
|
||||
@@ -33,7 +40,7 @@ jobs:
|
||||
run: rustup target add wasm32-unknown-unknown
|
||||
|
||||
- name: Add rust-src
|
||||
run: rustup component add rust-src --toolchain nightly-2024-07-18-x86_64-unknown-linux-gnu
|
||||
run: rustup component add rust-src --toolchain nightly-2025-02-17-x86_64-unknown-linux-gnu
|
||||
- name: Install binaryen
|
||||
run: |
|
||||
set -e
|
||||
@@ -42,41 +49,41 @@ jobs:
|
||||
wasm-opt --version
|
||||
- name: Build wasm files for both web and nodejs compilation targets
|
||||
run: |
|
||||
wasm-pack build --release --target nodejs --out-dir ./pkg/nodejs . -- -Z build-std="panic_abort,std"
|
||||
wasm-pack build --release --target web --out-dir ./pkg/web . -- -Z build-std="panic_abort,std" --features web
|
||||
wasm-pack build --release --target nodejs --out-dir ./pkg/nodejs . -- -Z build-std="panic_abort,std"
|
||||
wasm-pack build --release --target web --out-dir ./pkg/web . -- -Z build-std="panic_abort,std" --features web
|
||||
- name: Create package.json in pkg folder
|
||||
shell: bash
|
||||
env:
|
||||
RELEASE_TAG: ${{ github.ref_name }}
|
||||
run: |
|
||||
echo '{
|
||||
"name": "@ezkljs/engine",
|
||||
"version": "${{ github.ref_name }}",
|
||||
"dependencies": {
|
||||
"@types/json-bigint": "^1.0.1",
|
||||
"json-bigint": "^1.0.0"
|
||||
},
|
||||
"files": [
|
||||
"nodejs/ezkl_bg.wasm",
|
||||
"nodejs/ezkl.js",
|
||||
"nodejs/ezkl.d.ts",
|
||||
"nodejs/package.json",
|
||||
"nodejs/utils.js",
|
||||
"web/ezkl_bg.wasm",
|
||||
"web/ezkl.js",
|
||||
"web/ezkl.d.ts",
|
||||
"web/snippets/**/*",
|
||||
"web/package.json",
|
||||
"web/utils.js",
|
||||
"ezkl.d.ts"
|
||||
],
|
||||
"main": "nodejs/ezkl.js",
|
||||
"module": "web/ezkl.js",
|
||||
"types": "nodejs/ezkl.d.ts",
|
||||
"sideEffects": [
|
||||
"web/snippets/*"
|
||||
]
|
||||
}' > pkg/package.json
|
||||
cat > pkg/package.json << EOF
|
||||
{
|
||||
"name": "@ezkljs/engine",
|
||||
"version": "$RELEASE_TAG",
|
||||
"dependencies": {
|
||||
"@types/json-bigint": "^1.0.1",
|
||||
"json-bigint": "^1.0.0"
|
||||
},
|
||||
"files": [
|
||||
"nodejs/ezkl_bg.wasm",
|
||||
"nodejs/ezkl.js",
|
||||
"nodejs/ezkl.d.ts",
|
||||
"nodejs/package.json",
|
||||
"nodejs/utils.js",
|
||||
"web/ezkl_bg.wasm",
|
||||
"web/ezkl.js",
|
||||
"web/ezkl.d.ts",
|
||||
"web/snippets/**/*",
|
||||
"web/package.json",
|
||||
"web/utils.js",
|
||||
"ezkl.d.ts"
|
||||
],
|
||||
"main": "nodejs/ezkl.js",
|
||||
"module": "web/ezkl.js",
|
||||
"types": "nodejs/ezkl.d.ts",
|
||||
"sideEffects": [
|
||||
"web/snippets/*"
|
||||
]
|
||||
}
|
||||
EOF
|
||||
|
||||
- name: Replace memory definition in nodejs
|
||||
run: |
|
||||
@@ -169,7 +176,7 @@ jobs:
|
||||
curl -s "https://raw.githubusercontent.com/zkonduit/ezkljs-engine/main/README.md" > ./pkg/README.md
|
||||
|
||||
- name: Set up Node.js
|
||||
uses: actions/setup-node@v3
|
||||
uses: actions/setup-node@1a4442cacd436585916779262731d5b162bc6ec7 #v3.8.2
|
||||
with:
|
||||
node-version: "18.12.1"
|
||||
registry-url: "https://registry.npmjs.org"
|
||||
@@ -184,21 +191,26 @@ jobs:
|
||||
|
||||
|
||||
in-browser-evm-ver-publish:
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
name: publish-in-browser-evm-verifier-package
|
||||
needs: [publish-wasm-bindings]
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
RELEASE_TAG: ${{ github.ref_name }}
|
||||
if: startsWith(github.ref, 'refs/tags/')
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 #v4.2.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
- name: Update version in package.json
|
||||
shell: bash
|
||||
env:
|
||||
RELEASE_TAG: ${{ github.ref_name }}
|
||||
run: |
|
||||
sed -i "s|\"version\": \".*\"|\"version\": \"${{ github.ref_name }}\"|" in-browser-evm-verifier/package.json
|
||||
sed -i "s|\"version\": \".*\"|\"version\": \"$RELEASE_TAG\"|" in-browser-evm-verifier/package.json
|
||||
- name: Prepare tag and fetch package integrity
|
||||
run: |
|
||||
CLEANED_TAG=${{ github.ref_name }} # Get the tag from ref_name
|
||||
CLEANED_TAG=${RELEASE_TAG} # Get the tag from ref_name
|
||||
CLEANED_TAG="${CLEANED_TAG#v}" # Remove leading 'v'
|
||||
echo "CLEANED_TAG=${CLEANED_TAG}" >> $GITHUB_ENV # Set it as an environment variable for later steps
|
||||
ENGINE_INTEGRITY=$(npm view @ezkljs/engine@$CLEANED_TAG dist.integrity)
|
||||
@@ -218,13 +230,13 @@ jobs:
|
||||
NR==30{$0=" specifier: \"" tag "\""}
|
||||
NR==31{$0=" version: \"" tag "\""}
|
||||
NR==400{$0=" /@ezkljs/engine@" tag ":"}
|
||||
NR==401{$0=" resolution: {integrity: \"" integrity "\"}"} 1' in-browser-evm-verifier/pnpm-lock.yaml > temp.yaml && mv temp.yaml in-browser-evm-verifier/pnpm-lock.yaml
|
||||
NR==401{$0=" resolution: {integrity: \"" integrity "\"}"} 1' in-browser-evm-verifier/pnpm-lock.yaml > temp.yaml && mv temp.yaml in-browser-evm-verifier/pnpm-lock.yaml
|
||||
- name: Use pnpm 8
|
||||
uses: pnpm/action-setup@v2
|
||||
uses: pnpm/action-setup@eae0cfeb286e66ffb5155f1a79b90583a127a68b #v2.4.1
|
||||
with:
|
||||
version: 8
|
||||
- name: Set up Node.js
|
||||
uses: actions/setup-node@v3
|
||||
uses: actions/setup-node@1a4442cacd436585916779262731d5b162bc6ec7 #v3.8.2
|
||||
with:
|
||||
node-version: "18.12.1"
|
||||
registry-url: "https://registry.npmjs.org"
|
||||
@@ -235,4 +247,4 @@ jobs:
|
||||
pnpm run build
|
||||
pnpm publish --no-git-checks
|
||||
env:
|
||||
NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }}
|
||||
NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }}
|
||||
|
||||
10
.github/workflows/large-tests.yml
vendored
10
.github/workflows/large-tests.yml
vendored
@@ -6,12 +6,16 @@ on:
|
||||
description: "Test scenario tags"
|
||||
jobs:
|
||||
large-tests:
|
||||
permissions:
|
||||
contents: read
|
||||
runs-on: kaiju
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions-rs/toolchain@v1
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 #v4.2.2
|
||||
with:
|
||||
toolchain: nightly-2024-07-18
|
||||
persist-credentials: false
|
||||
- uses: actions-rs/toolchain@b2417cde72dcf67f306c0ae8e0828a81bf0b189f #v1.0.6
|
||||
with:
|
||||
toolchain: nightly-2025-02-17
|
||||
override: true
|
||||
components: rustfmt, clippy
|
||||
- name: nanoGPT Mock
|
||||
|
||||
43
.github/workflows/pypi-gpu.yml
vendored
43
.github/workflows/pypi-gpu.yml
vendored
@@ -18,39 +18,46 @@ defaults:
|
||||
jobs:
|
||||
|
||||
linux:
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
runs-on: GPU
|
||||
strategy:
|
||||
matrix:
|
||||
target: [x86_64]
|
||||
env:
|
||||
RELEASE_TAG: ${{ github.ref_name }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-python@v4
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 #v4.2.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: actions/setup-python@b64ffcaf5b410884ad320a9cfac8866006a109aa #v4.8.0
|
||||
with:
|
||||
python-version: 3.12
|
||||
architecture: x64
|
||||
|
||||
- name: Set pyproject.toml version to match github tag
|
||||
- name: Set pyproject.toml version to match github tag and rename ezkl to ezkl-gpu
|
||||
shell: bash
|
||||
run: |
|
||||
mv pyproject.toml pyproject.toml.orig
|
||||
sed "s/ezkl/ezkl-gpu/" pyproject.toml.orig >pyproject.toml
|
||||
sed "s/0\\.0\\.0/${RELEASE_TAG//v}/" pyproject.toml.orig >pyproject.toml
|
||||
sed "s/ezkl/ezkl-gpu/" pyproject.toml.orig > pyproject.toml.tmp
|
||||
sed "s/0\\.0\\.0/${RELEASE_TAG//v}/" pyproject.toml.tmp > pyproject.toml
|
||||
|
||||
- uses: actions-rs/toolchain@v1
|
||||
- uses: actions-rs/toolchain@b2417cde72dcf67f306c0ae8e0828a81bf0b189f #v1.0.6
|
||||
with:
|
||||
toolchain: nightly-2023-06-27
|
||||
override: true
|
||||
components: rustfmt, clippy
|
||||
|
||||
- name: Set Cargo.toml version to match github tag
|
||||
- name: Set Cargo.toml version to match github tag and rename ezkl to ezkl-gpu
|
||||
shell: bash
|
||||
env:
|
||||
RELEASE_TAG: ${{ github.ref_name }}
|
||||
# the ezkl substitution here looks for the first instance of name = "ezkl" and changes it to "ezkl-gpu"
|
||||
run: |
|
||||
mv Cargo.toml Cargo.toml.orig
|
||||
sed "s/0\\.0\\.0/${RELEASE_TAG//v}/" Cargo.toml.orig >Cargo.toml
|
||||
sed "0,/name = \"ezkl\"/s/name = \"ezkl\"/name = \"ezkl-gpu\"/" Cargo.toml.orig > Cargo.toml.tmp
|
||||
sed "s/0\\.0\\.0/${RELEASE_TAG//v}/" Cargo.toml.tmp > Cargo.toml
|
||||
mv Cargo.lock Cargo.lock.orig
|
||||
sed "s/0\\.0\\.0/${RELEASE_TAG//v}/" Cargo.lock.orig >Cargo.lock
|
||||
sed "s/0\\.0\\.0/${RELEASE_TAG//v}/" Cargo.lock.orig > Cargo.lock
|
||||
|
||||
- name: Install required libraries
|
||||
shell: bash
|
||||
@@ -58,7 +65,7 @@ jobs:
|
||||
sudo apt-get update && sudo apt-get install -y openssl pkg-config libssl-dev
|
||||
|
||||
- name: Build wheels
|
||||
uses: PyO3/maturin-action@v1
|
||||
uses: PyO3/maturin-action@5f8a1b3b0aad13193f46c9131f9b9e663def8ce5 #v1.46.0
|
||||
with:
|
||||
target: ${{ matrix.target }}
|
||||
manylinux: auto
|
||||
@@ -71,7 +78,7 @@ jobs:
|
||||
pip install ezkl-gpu --no-index --find-links dist --force-reinstall
|
||||
|
||||
- name: Upload wheels
|
||||
uses: actions/upload-artifact@v3
|
||||
uses: actions/upload-artifact@65c4c4a1ddee5b72f698fdd19549f0f0fb45cf08 #v4.6.0
|
||||
with:
|
||||
name: wheels
|
||||
path: dist
|
||||
@@ -87,7 +94,7 @@ jobs:
|
||||
# needs: [ macos, windows, linux, linux-cross, musllinux, musllinux-cross ]
|
||||
needs: [linux]
|
||||
steps:
|
||||
- uses: actions/download-artifact@v3
|
||||
- uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 #v4.1.8
|
||||
with:
|
||||
name: wheels
|
||||
- name: List Files
|
||||
@@ -99,14 +106,14 @@ jobs:
|
||||
# publishes to PyPI
|
||||
- name: Publish package distributions to PyPI
|
||||
continue-on-error: true
|
||||
uses: pypa/gh-action-pypi-publish@unstable/v1
|
||||
uses: pypa/gh-action-pypi-publish@76f52bc884231f62b9a034ebfe128415bbaabdfc #v1.12.4
|
||||
with:
|
||||
packages-dir: ./
|
||||
packages-dir: ./wheels
|
||||
|
||||
# publishes to TestPyPI
|
||||
- name: Publish package distribution to TestPyPI
|
||||
continue-on-error: true
|
||||
uses: pypa/gh-action-pypi-publish@unstable/v1
|
||||
uses: pypa/gh-action-pypi-publish@76f52bc884231f62b9a034ebfe128415bbaabdfc #v1.12.4
|
||||
with:
|
||||
repository-url: https://test.pypi.org/legacy/
|
||||
packages-dir: ./
|
||||
packages-dir: ./wheels
|
||||
|
||||
213
.github/workflows/pypi.yml
vendored
213
.github/workflows/pypi.yml
vendored
@@ -16,36 +16,53 @@ defaults:
|
||||
|
||||
jobs:
|
||||
macos:
|
||||
permissions:
|
||||
contents: read
|
||||
runs-on: macos-latest
|
||||
if: startsWith(github.ref, 'refs/tags/')
|
||||
strategy:
|
||||
matrix:
|
||||
target: [x86_64, universal2-apple-darwin]
|
||||
env:
|
||||
RELEASE_TAG: ${{ github.ref_name }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-python@v4
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 #v4.2.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: actions/setup-python@b64ffcaf5b410884ad320a9cfac8866006a109aa #v4.8.0
|
||||
with:
|
||||
python-version: 3.12
|
||||
architecture: x64
|
||||
|
||||
- name: Set pyproject.toml version to match github tag
|
||||
shell: bash
|
||||
run: |
|
||||
mv pyproject.toml pyproject.toml.orig
|
||||
sed "s/0\\.0\\.0/${RELEASE_TAG//v}/" pyproject.toml.orig >pyproject.toml
|
||||
|
||||
- name: Set Cargo.toml version to match github tag
|
||||
shell: bash
|
||||
env:
|
||||
RELEASE_TAG: ${{ github.ref_name }}
|
||||
run: |
|
||||
mv Cargo.toml Cargo.toml.orig
|
||||
sed "s/0\\.0\\.0/${RELEASE_TAG//v}/" Cargo.toml.orig >Cargo.toml
|
||||
mv Cargo.lock Cargo.lock.orig
|
||||
sed "s/0\\.0\\.0/${RELEASE_TAG//v}/" Cargo.lock.orig >Cargo.lock
|
||||
|
||||
- uses: actions-rs/toolchain@v1
|
||||
- uses: actions-rs/toolchain@b2417cde72dcf67f306c0ae8e0828a81bf0b189f #v1.0.6
|
||||
with:
|
||||
toolchain: nightly-2024-07-18
|
||||
toolchain: nightly-2025-02-17
|
||||
override: true
|
||||
components: rustfmt, clippy
|
||||
|
||||
- name: Build wheels
|
||||
uses: PyO3/maturin-action@v1
|
||||
if: matrix.target == 'universal2-apple-darwin'
|
||||
uses: PyO3/maturin-action@5f8a1b3b0aad13193f46c9131f9b9e663def8ce5 #v1.46.0
|
||||
with:
|
||||
target: ${{ matrix.target }}
|
||||
args: --release --out dist --features python-bindings
|
||||
- name: Build wheels
|
||||
if: matrix.target == 'x86_64'
|
||||
uses: PyO3/maturin-action@5f8a1b3b0aad13193f46c9131f9b9e663def8ce5 #v1.46.0
|
||||
with:
|
||||
target: ${{ matrix.target }}
|
||||
args: --release --out dist --features python-bindings
|
||||
@@ -56,24 +73,36 @@ jobs:
|
||||
python -c "import ezkl"
|
||||
|
||||
- name: Upload wheels
|
||||
uses: actions/upload-artifact@v3
|
||||
uses: actions/upload-artifact@65c4c4a1ddee5b72f698fdd19549f0f0fb45cf08 #v4.6.0
|
||||
with:
|
||||
name: wheels
|
||||
name: dist-macos-${{ matrix.target }}
|
||||
path: dist
|
||||
|
||||
windows:
|
||||
permissions:
|
||||
contents: read
|
||||
runs-on: windows-latest
|
||||
if: startsWith(github.ref, 'refs/tags/')
|
||||
strategy:
|
||||
matrix:
|
||||
target: [x64, x86]
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-python@v4
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 #v4.2.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: actions/setup-python@b64ffcaf5b410884ad320a9cfac8866006a109aa #v4.8.0
|
||||
with:
|
||||
python-version: 3.12
|
||||
architecture: ${{ matrix.target }}
|
||||
|
||||
- name: Set pyproject.toml version to match github tag
|
||||
shell: bash
|
||||
env:
|
||||
RELEASE_TAG: ${{ github.ref_name }}
|
||||
run: |
|
||||
mv pyproject.toml pyproject.toml.orig
|
||||
sed "s/0\\.0\\.0/${RELEASE_TAG//v}/" pyproject.toml.orig >pyproject.toml
|
||||
|
||||
- name: Set Cargo.toml version to match github tag
|
||||
shell: bash
|
||||
env:
|
||||
@@ -84,14 +113,14 @@ jobs:
|
||||
mv Cargo.lock Cargo.lock.orig
|
||||
sed "s/0\\.0\\.0/${RELEASE_TAG//v}/" Cargo.lock.orig >Cargo.lock
|
||||
|
||||
- uses: actions-rs/toolchain@v1
|
||||
- uses: actions-rs/toolchain@b2417cde72dcf67f306c0ae8e0828a81bf0b189f #v1.0.6
|
||||
with:
|
||||
toolchain: nightly-2024-07-18
|
||||
toolchain: nightly-2025-02-17
|
||||
override: true
|
||||
components: rustfmt, clippy
|
||||
|
||||
- name: Build wheels
|
||||
uses: PyO3/maturin-action@v1
|
||||
uses: PyO3/maturin-action@5f8a1b3b0aad13193f46c9131f9b9e663def8ce5 #v1.46.0
|
||||
with:
|
||||
target: ${{ matrix.target }}
|
||||
args: --release --out dist --features python-bindings
|
||||
@@ -101,24 +130,36 @@ jobs:
|
||||
python -c "import ezkl"
|
||||
|
||||
- name: Upload wheels
|
||||
uses: actions/upload-artifact@v3
|
||||
uses: actions/upload-artifact@65c4c4a1ddee5b72f698fdd19549f0f0fb45cf08 #v4.6.0 #v4.6.0
|
||||
with:
|
||||
name: wheels
|
||||
name: dist-windows-${{ matrix.target }}
|
||||
path: dist
|
||||
|
||||
linux:
|
||||
permissions:
|
||||
contents: read
|
||||
runs-on: ubuntu-latest
|
||||
if: startsWith(github.ref, 'refs/tags/')
|
||||
strategy:
|
||||
matrix:
|
||||
target: [x86_64]
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-python@v4
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 #v4.2.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: actions/setup-python@b64ffcaf5b410884ad320a9cfac8866006a109aa #v4.8.0
|
||||
with:
|
||||
python-version: 3.12
|
||||
architecture: x64
|
||||
|
||||
- name: Set pyproject.toml version to match github tag
|
||||
shell: bash
|
||||
env:
|
||||
RELEASE_TAG: ${{ github.ref_name }}
|
||||
run: |
|
||||
mv pyproject.toml pyproject.toml.orig
|
||||
sed "s/0\\.0\\.0/${RELEASE_TAG//v}/" pyproject.toml.orig >pyproject.toml
|
||||
|
||||
- name: Set Cargo.toml version to match github tag
|
||||
shell: bash
|
||||
env:
|
||||
@@ -129,14 +170,13 @@ jobs:
|
||||
mv Cargo.lock Cargo.lock.orig
|
||||
sed "s/0\\.0\\.0/${RELEASE_TAG//v}/" Cargo.lock.orig >Cargo.lock
|
||||
|
||||
|
||||
- name: Install required libraries
|
||||
shell: bash
|
||||
run: |
|
||||
sudo apt-get update && sudo apt-get install -y openssl pkg-config libssl-dev
|
||||
|
||||
- name: Build wheels
|
||||
uses: PyO3/maturin-action@v1
|
||||
uses: PyO3/maturin-action@5f8a1b3b0aad13193f46c9131f9b9e663def8ce5 #v1.46.0
|
||||
with:
|
||||
target: ${{ matrix.target }}
|
||||
manylinux: auto
|
||||
@@ -163,63 +203,14 @@ jobs:
|
||||
python -c "import ezkl"
|
||||
|
||||
- name: Upload wheels
|
||||
uses: actions/upload-artifact@v3
|
||||
uses: actions/upload-artifact@65c4c4a1ddee5b72f698fdd19549f0f0fb45cf08 #v4.6.0
|
||||
with:
|
||||
name: wheels
|
||||
name: dist-linux-${{ matrix.target }}
|
||||
path: dist
|
||||
|
||||
# There's a problem with the maturin-action toolchain for arm arch leading to failed builds
|
||||
# linux-cross:
|
||||
# runs-on: ubuntu-latest
|
||||
# strategy:
|
||||
# matrix:
|
||||
# target: [aarch64, armv7]
|
||||
# steps:
|
||||
# - uses: actions/checkout@v4
|
||||
# - uses: actions/setup-python@v4
|
||||
# with:
|
||||
# python-version: 3.12
|
||||
|
||||
# - name: Install cross-compilation tools for aarch64
|
||||
# if: matrix.target == 'aarch64'
|
||||
# run: |
|
||||
# sudo apt-get update
|
||||
# sudo apt-get install -y gcc make gcc-aarch64-linux-gnu binutils-aarch64-linux-gnu libc6-dev-arm64-cross libusb-1.0-0-dev libatomic1-arm64-cross
|
||||
|
||||
# - name: Install cross-compilation tools for armv7
|
||||
# if: matrix.target == 'armv7'
|
||||
# run: |
|
||||
# sudo apt-get update
|
||||
# sudo apt-get install -y gcc make gcc-arm-linux-gnueabihf binutils-arm-linux-gnueabihf libc6-dev-armhf-cross libusb-1.0-0-dev libatomic1-armhf-cross
|
||||
|
||||
# - name: Build wheels
|
||||
# uses: PyO3/maturin-action@v1
|
||||
# with:
|
||||
# target: ${{ matrix.target }}
|
||||
# manylinux: auto
|
||||
# args: --release --out dist --features python-bindings
|
||||
|
||||
# - uses: uraimo/run-on-arch-action@v2.5.0
|
||||
# name: Install built wheel
|
||||
# with:
|
||||
# arch: ${{ matrix.target }}
|
||||
# distro: ubuntu20.04
|
||||
# githubToken: ${{ github.token }}
|
||||
# install: |
|
||||
# apt-get update
|
||||
# apt-get install -y --no-install-recommends python3 python3-pip
|
||||
# pip3 install -U pip
|
||||
# run: |
|
||||
# pip3 install ezkl --no-index --find-links dist/ --force-reinstall
|
||||
# python3 -c "import ezkl"
|
||||
|
||||
# - name: Upload wheels
|
||||
# uses: actions/upload-artifact@v3
|
||||
# with:
|
||||
# name: wheels
|
||||
# path: dist
|
||||
|
||||
musllinux:
|
||||
permissions:
|
||||
contents: read
|
||||
runs-on: ubuntu-latest
|
||||
if: startsWith(github.ref, 'refs/tags/')
|
||||
strategy:
|
||||
@@ -227,8 +218,10 @@ jobs:
|
||||
target:
|
||||
- x86_64-unknown-linux-musl
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-python@v4
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 #v4.2.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: actions/setup-python@b64ffcaf5b410884ad320a9cfac8866006a109aa #v4.8.0
|
||||
with:
|
||||
python-version: 3.12
|
||||
architecture: x64
|
||||
@@ -250,13 +243,14 @@ jobs:
|
||||
sed "s/0\\.0\\.0/${RELEASE_TAG//v}/" Cargo.toml.orig >Cargo.toml
|
||||
mv Cargo.lock Cargo.lock.orig
|
||||
sed "s/0\\.0\\.0/${RELEASE_TAG//v}/" Cargo.lock.orig >Cargo.lock
|
||||
|
||||
- name: Install required libraries
|
||||
shell: bash
|
||||
run: |
|
||||
sudo apt-get update && sudo apt-get install -y pkg-config libssl-dev
|
||||
|
||||
- name: Build wheels
|
||||
uses: PyO3/maturin-action@v1
|
||||
uses: PyO3/maturin-action@5f8a1b3b0aad13193f46c9131f9b9e663def8ce5 #v1.46.0
|
||||
with:
|
||||
target: ${{ matrix.target }}
|
||||
manylinux: musllinux_1_2
|
||||
@@ -264,7 +258,7 @@ jobs:
|
||||
|
||||
- name: Install built wheel
|
||||
if: matrix.target == 'x86_64-unknown-linux-musl'
|
||||
uses: addnab/docker-run-action@v3
|
||||
uses: addnab/docker-run-action@3e77f186b7a929ef010f183a9e24c0f9955ea609
|
||||
with:
|
||||
image: alpine:latest
|
||||
options: -v ${{ github.workspace }}:/io -w /io
|
||||
@@ -277,12 +271,14 @@ jobs:
|
||||
python3 -c "import ezkl"
|
||||
|
||||
- name: Upload wheels
|
||||
uses: actions/upload-artifact@v3
|
||||
uses: actions/upload-artifact@65c4c4a1ddee5b72f698fdd19549f0f0fb45cf08 #v4.6.0
|
||||
with:
|
||||
name: wheels
|
||||
name: dist-musllinux-${{ matrix.target }}
|
||||
path: dist
|
||||
|
||||
musllinux-cross:
|
||||
permissions:
|
||||
contents: read
|
||||
runs-on: ubuntu-latest
|
||||
if: startsWith(github.ref, 'refs/tags/')
|
||||
strategy:
|
||||
@@ -291,11 +287,21 @@ jobs:
|
||||
- target: aarch64-unknown-linux-musl
|
||||
arch: aarch64
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-python@v4
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 #v4.2.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: actions/setup-python@b64ffcaf5b410884ad320a9cfac8866006a109aa #v4.8.0
|
||||
with:
|
||||
python-version: 3.12
|
||||
|
||||
- name: Set pyproject.toml version to match github tag
|
||||
shell: bash
|
||||
env:
|
||||
RELEASE_TAG: ${{ github.ref_name }}
|
||||
run: |
|
||||
mv pyproject.toml pyproject.toml.orig
|
||||
sed "s/0\\.0\\.0/${RELEASE_TAG//v}/" pyproject.toml.orig >pyproject.toml
|
||||
|
||||
- name: Set Cargo.toml version to match github tag
|
||||
shell: bash
|
||||
env:
|
||||
@@ -307,13 +313,13 @@ jobs:
|
||||
sed "s/0\\.0\\.0/${RELEASE_TAG//v}/" Cargo.lock.orig >Cargo.lock
|
||||
|
||||
- name: Build wheels
|
||||
uses: PyO3/maturin-action@v1
|
||||
uses: PyO3/maturin-action@5f8a1b3b0aad13193f46c9131f9b9e663def8ce5 #v1.46.0
|
||||
with:
|
||||
target: ${{ matrix.platform.target }}
|
||||
manylinux: musllinux_1_2
|
||||
args: --release --out dist --features python-bindings
|
||||
|
||||
- uses: uraimo/run-on-arch-action@v2.8.1
|
||||
- uses: uraimo/run-on-arch-action@5397f9e30a9b62422f302092631c99ae1effcd9e #v2.8.1
|
||||
name: Install built wheel
|
||||
with:
|
||||
arch: ${{ matrix.platform.arch }}
|
||||
@@ -328,9 +334,9 @@ jobs:
|
||||
python3 -c "import ezkl"
|
||||
|
||||
- name: Upload wheels
|
||||
uses: actions/upload-artifact@v3
|
||||
uses: actions/upload-artifact@65c4c4a1ddee5b72f698fdd19549f0f0fb45cf08 #v4.6.0
|
||||
with:
|
||||
name: wheels
|
||||
name: dist-musllinux-${{ matrix.platform.target }}
|
||||
path: dist
|
||||
|
||||
pypi-publish:
|
||||
@@ -339,44 +345,43 @@ jobs:
|
||||
permissions:
|
||||
id-token: write
|
||||
if: "startsWith(github.ref, 'refs/tags/')"
|
||||
# TODO: Uncomment if linux-cross is working
|
||||
# needs: [ macos, windows, linux, linux-cross, musllinux, musllinux-cross ]
|
||||
needs: [macos, windows, linux, musllinux, musllinux-cross]
|
||||
steps:
|
||||
- uses: actions/download-artifact@v3
|
||||
- uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 #v4.1.8
|
||||
with:
|
||||
name: wheels
|
||||
pattern: dist-*
|
||||
merge-multiple: true
|
||||
path: wheels
|
||||
- name: List Files
|
||||
run: ls -R
|
||||
|
||||
# Both publish steps will fail if there is no trusted publisher setup
|
||||
# On failure the publish step will then simply continue to the next one
|
||||
# # publishes to TestPyPI
|
||||
# - name: Publish package distribution to TestPyPI
|
||||
# uses: pypa/gh-action-pypi-publish@76f52bc884231f62b9a034ebfe128415bbaabdfc #v1.12.4
|
||||
# with:
|
||||
# repository-url: https://test.pypi.org/legacy/
|
||||
# packages-dir: ./
|
||||
|
||||
# publishes to PyPI
|
||||
- name: Publish package distributions to PyPI
|
||||
continue-on-error: true
|
||||
uses: pypa/gh-action-pypi-publish@unstable/v1
|
||||
uses: pypa/gh-action-pypi-publish@76f52bc884231f62b9a034ebfe128415bbaabdfc #v1.12.4
|
||||
with:
|
||||
packages-dir: ./
|
||||
packages-dir: ./wheels
|
||||
|
||||
# publishes to TestPyPI
|
||||
- name: Publish package distribution to TestPyPI
|
||||
continue-on-error: true
|
||||
uses: pypa/gh-action-pypi-publish@unstable/v1
|
||||
with:
|
||||
repository-url: https://test.pypi.org/legacy/
|
||||
packages-dir: ./
|
||||
|
||||
doc-publish:
|
||||
permissions:
|
||||
contents: read
|
||||
name: Trigger ReadTheDocs Build
|
||||
runs-on: ubuntu-latest
|
||||
needs: pypi-publish
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
with:
|
||||
persist-credentials: false
|
||||
- name: Trigger RTDs build
|
||||
uses: dfm/rtds-action@v1
|
||||
uses: dfm/rtds-action@618148c547f4b56cdf4fa4dcf3a94c91ce025f2d
|
||||
with:
|
||||
webhook_url: ${{ secrets.RTDS_WEBHOOK_URL }}
|
||||
webhook_token: ${{ secrets.RTDS_WEBHOOK_TOKEN }}
|
||||
commit_ref: ${{ github.ref_name }}
|
||||
commit_ref: ${{ github.ref_name }}
|
||||
51
.github/workflows/release.yml
vendored
51
.github/workflows/release.yml
vendored
@@ -10,6 +10,9 @@ on:
|
||||
- "*"
|
||||
jobs:
|
||||
create-release:
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
name: create-release
|
||||
runs-on: ubuntu-22.04
|
||||
if: startsWith(github.ref, 'refs/tags/')
|
||||
@@ -27,12 +30,15 @@ jobs:
|
||||
|
||||
- name: Create Github Release
|
||||
id: create-release
|
||||
uses: softprops/action-gh-release@v1
|
||||
uses: softprops/action-gh-release@c95fe1489396fe8a9eb87c0abf8aa5b2ef267fda #v2.2.1
|
||||
with:
|
||||
token: ${{ secrets.RELEASE_TOKEN }}
|
||||
tag_name: ${{ env.EZKL_VERSION }}
|
||||
|
||||
build-release-gpu:
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
name: build-release-gpu
|
||||
needs: ["create-release"]
|
||||
runs-on: GPU
|
||||
@@ -43,13 +49,16 @@ jobs:
|
||||
RUST_BACKTRACE: 1
|
||||
PCRE2_SYS_STATIC: 1
|
||||
steps:
|
||||
- uses: actions-rs/toolchain@v1
|
||||
- uses: actions-rs/toolchain@b2417cde72dcf67f306c0ae8e0828a81bf0b189f #v1.0.6
|
||||
with:
|
||||
toolchain: nightly-2024-07-18
|
||||
toolchain: nightly-2025-02-17
|
||||
override: true
|
||||
components: rustfmt, clippy
|
||||
- name: Checkout repo
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 #v4.2.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
|
||||
- name: Get release version from tag
|
||||
shell: bash
|
||||
@@ -81,7 +90,7 @@ jobs:
|
||||
echo "ASSET=build-artifacts/ezkl-linux-gpu.tar.gz" >> $GITHUB_ENV
|
||||
|
||||
- name: Upload release archive
|
||||
uses: actions/upload-release-asset@v1.0.2
|
||||
uses: actions/upload-release-asset@e8f9f06c4b078e705bd2ea027f0926603fc9b4d5 #v1.0.2
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.RELEASE_TOKEN }}
|
||||
with:
|
||||
@@ -91,6 +100,10 @@ jobs:
|
||||
asset_content_type: application/octet-stream
|
||||
|
||||
build-release:
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
issues: write
|
||||
name: build-release
|
||||
needs: ["create-release"]
|
||||
runs-on: ${{ matrix.os }}
|
||||
@@ -106,32 +119,34 @@ jobs:
|
||||
include:
|
||||
- build: windows-msvc
|
||||
os: windows-latest
|
||||
rust: nightly-2024-07-18
|
||||
rust: nightly-2025-02-17
|
||||
target: x86_64-pc-windows-msvc
|
||||
- build: macos
|
||||
os: macos-13
|
||||
rust: nightly-2024-07-18
|
||||
rust: nightly-2025-02-17
|
||||
target: x86_64-apple-darwin
|
||||
- build: macos-aarch64
|
||||
os: macos-13
|
||||
rust: nightly-2024-07-18
|
||||
rust: nightly-2025-02-17
|
||||
target: aarch64-apple-darwin
|
||||
- build: linux-musl
|
||||
os: ubuntu-22.04
|
||||
rust: nightly-2024-07-18
|
||||
rust: nightly-2025-02-17
|
||||
target: x86_64-unknown-linux-musl
|
||||
- build: linux-gnu
|
||||
os: ubuntu-22.04
|
||||
rust: nightly-2024-07-18
|
||||
rust: nightly-2025-02-17
|
||||
target: x86_64-unknown-linux-gnu
|
||||
- build: linux-aarch64
|
||||
os: ubuntu-22.04
|
||||
rust: nightly-2024-07-18
|
||||
rust: nightly-2025-02-17
|
||||
target: aarch64-unknown-linux-gnu
|
||||
|
||||
steps:
|
||||
- name: Checkout repo
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 #v4.2.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
- name: Get release version from tag
|
||||
shell: bash
|
||||
@@ -155,7 +170,7 @@ jobs:
|
||||
fi
|
||||
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@nightly
|
||||
uses: dtolnay/rust-toolchain@4f94fbe7e03939b0e674bcc9ca609a16088f63ff #nightly branch, TODO: update when required
|
||||
with:
|
||||
target: ${{ matrix.target }}
|
||||
|
||||
@@ -181,14 +196,18 @@ jobs:
|
||||
echo "target flag is: ${{ env.TARGET_FLAGS }}"
|
||||
echo "target dir is: ${{ env.TARGET_DIR }}"
|
||||
|
||||
- name: Build release binary (no asm)
|
||||
if: matrix.build != 'linux-gnu'
|
||||
- name: Build release binary (no asm or metal)
|
||||
if: matrix.build != 'linux-gnu' && matrix.build != 'macos-aarch64'
|
||||
run: ${{ env.CARGO }} build --release ${{ env.TARGET_FLAGS }} -Z sparse-registry
|
||||
|
||||
- name: Build release binary (asm)
|
||||
if: matrix.build == 'linux-gnu'
|
||||
run: ${{ env.CARGO }} build --release ${{ env.TARGET_FLAGS }} -Z sparse-registry --features asm
|
||||
|
||||
- name: Build release binary (metal)
|
||||
if: matrix.build == 'macos-aarch64'
|
||||
run: ${{ env.CARGO }} build --release ${{ env.TARGET_FLAGS }} -Z sparse-registry --features macos-metal
|
||||
|
||||
- name: Strip release binary
|
||||
if: matrix.build != 'windows-msvc' && matrix.build != 'linux-aarch64'
|
||||
run: strip "target/${{ matrix.target }}/release/ezkl"
|
||||
@@ -214,7 +233,7 @@ jobs:
|
||||
echo "ASSET=build-artifacts/ezkl-win.zip" >> $GITHUB_ENV
|
||||
|
||||
- name: Upload release archive
|
||||
uses: actions/upload-release-asset@v1.0.2
|
||||
uses: actions/upload-release-asset@e8f9f06c4b078e705bd2ea027f0926603fc9b4d5 #v1.0.2
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.RELEASE_TOKEN }}
|
||||
with:
|
||||
|
||||
701
.github/workflows/rust.yml
vendored
701
.github/workflows/rust.yml
vendored
File diff suppressed because it is too large
Load Diff
32
.github/workflows/static-analysis.yml
vendored
Normal file
32
.github/workflows/static-analysis.yml
vendored
Normal file
@@ -0,0 +1,32 @@
|
||||
name: Static Analysis
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ main ]
|
||||
pull_request:
|
||||
branches: [ main ]
|
||||
|
||||
jobs:
|
||||
analyze:
|
||||
permissions:
|
||||
contents: read
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 #v4.2.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: actions-rs/toolchain@b2417cde72dcf67f306c0ae8e0828a81bf0b189f #v1.0.6
|
||||
with:
|
||||
toolchain: nightly-2025-02-17
|
||||
override: true
|
||||
components: rustfmt, clippy
|
||||
|
||||
# Run Zizmor static analysis
|
||||
|
||||
- name: Install Zizmor
|
||||
run: cargo install --locked zizmor
|
||||
|
||||
- name: Run Zizmor Analysis
|
||||
run: zizmor .
|
||||
|
||||
|
||||
17
.github/workflows/swift-pm.yml
vendored
17
.github/workflows/swift-pm.yml
vendored
@@ -9,18 +9,24 @@ on:
|
||||
|
||||
jobs:
|
||||
build-and-update:
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
runs-on: macos-latest
|
||||
env:
|
||||
EZKL_SWIFT_PACKAGE_REPO: github.com/zkonduit/ezkl-swift-package.git
|
||||
RELEASE_TAG: ${{ github.ref_name }}
|
||||
|
||||
steps:
|
||||
- name: Checkout EZKL
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 #v4.2.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
- name: Extract TAG from github.ref_name
|
||||
run: |
|
||||
# github.ref_name is provided by GitHub Actions and contains the tag name directly.
|
||||
TAG="${{ github.ref_name }}"
|
||||
TAG="${RELEASE_TAG}"
|
||||
echo "Original TAG: $TAG"
|
||||
# Remove leading 'v' if present to match the Swift Package Manager version format.
|
||||
NEW_TAG=${TAG#v}
|
||||
@@ -28,7 +34,7 @@ jobs:
|
||||
echo "TAG=$NEW_TAG" >> $GITHUB_ENV
|
||||
|
||||
- name: Install Rust (nightly)
|
||||
uses: actions-rs/toolchain@v1
|
||||
uses: actions-rs/toolchain@b2417cde72dcf67f306c0ae8e0828a81bf0b189f #v1.0.6
|
||||
with:
|
||||
toolchain: nightly
|
||||
override: true
|
||||
@@ -47,7 +53,8 @@ jobs:
|
||||
|
||||
- name: Copy Test Files
|
||||
run: |
|
||||
rm -rf ezkl-swift-package/Tests/EzklAssets/*
|
||||
rm -rf ezkl-swift-package/Tests/EzklAssets/
|
||||
mkdir -p ezkl-swift-package/Tests/EzklAssets/
|
||||
cp tests/assets/kzg ezkl-swift-package/Tests/EzklAssets/kzg.srs
|
||||
cp tests/assets/input.json ezkl-swift-package/Tests/EzklAssets/input.json
|
||||
cp tests/assets/model.compiled ezkl-swift-package/Tests/EzklAssets/network.ezkl
|
||||
@@ -105,7 +112,6 @@ jobs:
|
||||
cd ezkl-swift-package
|
||||
git add Sources/EzklCoreBindings Tests/EzklAssets
|
||||
git commit -m "Automatically updated EzklCoreBindings for EZKL"
|
||||
|
||||
if ! git push origin; then
|
||||
echo "::error::Failed to push changes to ${{ env.EZKL_SWIFT_PACKAGE_REPO }}. Please ensure that EZKL_PORTER_TOKEN has the correct permissions."
|
||||
exit 1
|
||||
@@ -115,7 +121,6 @@ jobs:
|
||||
run: |
|
||||
cd ezkl-swift-package
|
||||
source $GITHUB_ENV
|
||||
|
||||
# Tag the latest commit on the current branch
|
||||
if git rev-parse "$TAG" >/dev/null 2>&1; then
|
||||
echo "Tag $TAG already exists locally. Skipping tag creation."
|
||||
|
||||
8
.github/workflows/tagging.yml
vendored
8
.github/workflows/tagging.yml
vendored
@@ -11,10 +11,12 @@ jobs:
|
||||
contents: write
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 #v4.2.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
- name: Bump version and push tag
|
||||
id: tag_version
|
||||
uses: mathieudutour/github-tag-action@v6.2
|
||||
uses: mathieudutour/github-tag-action@a22cf08638b34d5badda920f9daf6e72c477b07b #v6.2
|
||||
with:
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
@@ -44,7 +46,7 @@ jobs:
|
||||
git tag $RELEASE_TAG
|
||||
|
||||
- name: Push changes
|
||||
uses: ad-m/github-push-action@master
|
||||
uses: ad-m/github-push-action@77c5b412c50b723d2a4fbc6d71fb5723bcd439aa #master
|
||||
env:
|
||||
RELEASE_TAG: ${{ steps.tag_version.outputs.new_tag }}
|
||||
with:
|
||||
|
||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -9,6 +9,7 @@ pkg
|
||||
!AttestData.sol
|
||||
!VerifierBase.sol
|
||||
!LoadInstances.sol
|
||||
!AttestData.t.sol
|
||||
*.pf
|
||||
*.vk
|
||||
*.pk
|
||||
@@ -49,3 +50,5 @@ timingData.json
|
||||
!tests/assets/vk.key
|
||||
docs/python/build
|
||||
!tests/assets/vk_aggr.key
|
||||
cache
|
||||
out
|
||||
|
||||
132
Cargo.lock
generated
132
Cargo.lock
generated
@@ -1,6 +1,6 @@
|
||||
# This file is automatically @generated by Cargo.
|
||||
# It is not intended for manual editing.
|
||||
version = 3
|
||||
version = 4
|
||||
|
||||
[[package]]
|
||||
name = "addr2line"
|
||||
@@ -944,7 +944,7 @@ dependencies = [
|
||||
"bitflags 2.5.0",
|
||||
"cexpr",
|
||||
"clang-sys",
|
||||
"itertools 0.12.1",
|
||||
"itertools 0.11.0",
|
||||
"lazy_static",
|
||||
"lazycell",
|
||||
"log",
|
||||
@@ -1760,7 +1760,7 @@ checksum = "a650a461c6a8ff1ef205ed9a2ad56579309853fecefc2423f73dced342f92258"
|
||||
[[package]]
|
||||
name = "ecc"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/zkonduit/halo2wrong?branch=ac/chunked-mv-lookup#b43ebe30e84825d0d004fa27803d99c4187d419f"
|
||||
source = "git+https://github.com/zkonduit/halo2wrong?branch=ac%2Fchunked-mv-lookup#b43ebe30e84825d0d004fa27803d99c4187d419f"
|
||||
dependencies = [
|
||||
"integer",
|
||||
"num-bigint",
|
||||
@@ -1835,6 +1835,16 @@ dependencies = [
|
||||
"syn 2.0.90",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "env_filter"
|
||||
version = "0.1.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "186e05a59d4c50738528153b83b0b0194d3a29507dfec16eccd4b342903397d0"
|
||||
dependencies = [
|
||||
"log",
|
||||
"regex",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "env_logger"
|
||||
version = "0.10.2"
|
||||
@@ -1848,6 +1858,19 @@ dependencies = [
|
||||
"termcolor",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "env_logger"
|
||||
version = "0.11.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "dcaee3d8e3cfc3fd92428d477bc97fc29ec8716d180c0d74c643bb26166660e0"
|
||||
dependencies = [
|
||||
"anstream",
|
||||
"anstyle",
|
||||
"env_filter",
|
||||
"humantime",
|
||||
"log",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "equivalent"
|
||||
version = "1.0.1"
|
||||
@@ -1923,7 +1946,7 @@ dependencies = [
|
||||
"console_error_panic_hook",
|
||||
"criterion 0.5.1",
|
||||
"ecc",
|
||||
"env_logger",
|
||||
"env_logger 0.10.2",
|
||||
"ethabi",
|
||||
"foundry-compilers",
|
||||
"gag",
|
||||
@@ -1931,7 +1954,7 @@ dependencies = [
|
||||
"halo2_gadgets",
|
||||
"halo2_proofs",
|
||||
"halo2_solidity_verifier",
|
||||
"halo2curves 0.7.0",
|
||||
"halo2curves 0.7.0 (git+https://github.com/privacy-scaling-explorations/halo2curves?rev=b753a832e92d5c86c5c997327a9cf9de86a18851)",
|
||||
"hex",
|
||||
"indicatif",
|
||||
"instant",
|
||||
@@ -1939,20 +1962,17 @@ dependencies = [
|
||||
"lazy_static",
|
||||
"log",
|
||||
"maybe-rayon",
|
||||
"metal",
|
||||
"mimalloc",
|
||||
"mnist",
|
||||
"num",
|
||||
"objc",
|
||||
"openssl",
|
||||
"pg_bigdecimal",
|
||||
"portable-atomic",
|
||||
"pyo3",
|
||||
"pyo3-async-runtimes",
|
||||
"pyo3-log",
|
||||
"pyo3-stub-gen",
|
||||
"rand 0.8.5",
|
||||
"regex",
|
||||
"reqwest",
|
||||
"semver 1.0.22",
|
||||
"seq-macro",
|
||||
@@ -2377,7 +2397,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "halo2_gadgets"
|
||||
version = "0.2.0"
|
||||
source = "git+https://github.com/zkonduit/halo2#d7ecad83c7439fa1cb450ee4a89c2d0b45604ceb"
|
||||
source = "git+https://github.com/zkonduit/halo2#f441c920be45f8f05d2c06a173d82e8885a5ed4d"
|
||||
dependencies = [
|
||||
"arrayvec 0.7.4",
|
||||
"bitvec",
|
||||
@@ -2394,14 +2414,14 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "halo2_proofs"
|
||||
version = "0.3.0"
|
||||
source = "git+https://github.com/zkonduit/halo2#ee4e1a09ebdb1f79f797685b78951c6034c430a6#ee4e1a09ebdb1f79f797685b78951c6034c430a6"
|
||||
source = "git+https://github.com/zkonduit/halo2#f441c920be45f8f05d2c06a173d82e8885a5ed4d#f441c920be45f8f05d2c06a173d82e8885a5ed4d"
|
||||
dependencies = [
|
||||
"bincode",
|
||||
"blake2b_simd",
|
||||
"env_logger",
|
||||
"env_logger 0.10.2",
|
||||
"ff",
|
||||
"group",
|
||||
"halo2curves 0.7.0",
|
||||
"halo2curves 0.7.0 (git+https://github.com/privacy-scaling-explorations/halo2curves?rev=b753a832e92d5c86c5c997327a9cf9de86a18851)",
|
||||
"icicle-bn254",
|
||||
"icicle-core",
|
||||
"icicle-cuda-runtime",
|
||||
@@ -2409,6 +2429,7 @@ dependencies = [
|
||||
"lazy_static",
|
||||
"log",
|
||||
"maybe-rayon",
|
||||
"mopro-msm",
|
||||
"rand_chacha",
|
||||
"rand_core 0.6.4",
|
||||
"rustc-hash 2.0.0",
|
||||
@@ -2420,7 +2441,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "halo2_solidity_verifier"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/alexander-camuto/halo2-solidity-verifier#7def6101d32331182f91483832e4fd293d75f33e"
|
||||
source = "git+https://github.com/alexander-camuto/halo2-solidity-verifier#80c20d6ab57d3b28b2a28df4b63c30923bde17e1"
|
||||
dependencies = [
|
||||
"askama",
|
||||
"blake2b_simd",
|
||||
@@ -2494,6 +2515,36 @@ dependencies = [
|
||||
"subtle",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "halo2curves"
|
||||
version = "0.7.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d380afeef3f1d4d3245b76895172018cfb087d9976a7cabcd5597775b2933e07"
|
||||
dependencies = [
|
||||
"blake2",
|
||||
"digest 0.10.7",
|
||||
"ff",
|
||||
"group",
|
||||
"halo2derive 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"hex",
|
||||
"lazy_static",
|
||||
"num-bigint",
|
||||
"num-integer",
|
||||
"num-traits",
|
||||
"pairing",
|
||||
"pasta_curves",
|
||||
"paste",
|
||||
"rand 0.8.5",
|
||||
"rand_core 0.6.4",
|
||||
"rayon",
|
||||
"serde",
|
||||
"serde_arrays",
|
||||
"sha2",
|
||||
"static_assertions",
|
||||
"subtle",
|
||||
"unroll",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "halo2curves"
|
||||
version = "0.7.0"
|
||||
@@ -2503,7 +2554,7 @@ dependencies = [
|
||||
"digest 0.10.7",
|
||||
"ff",
|
||||
"group",
|
||||
"halo2derive",
|
||||
"halo2derive 0.1.0 (git+https://github.com/privacy-scaling-explorations/halo2curves?rev=b753a832e92d5c86c5c997327a9cf9de86a18851)",
|
||||
"hex",
|
||||
"lazy_static",
|
||||
"num-bigint",
|
||||
@@ -2523,6 +2574,20 @@ dependencies = [
|
||||
"unroll",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "halo2derive"
|
||||
version = "0.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "bdb99e7492b4f5ff469d238db464131b86c2eaac814a78715acba369f64d2c76"
|
||||
dependencies = [
|
||||
"num-bigint",
|
||||
"num-integer",
|
||||
"num-traits",
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn 1.0.109",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "halo2derive"
|
||||
version = "0.1.0"
|
||||
@@ -2539,7 +2604,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "halo2wrong"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/zkonduit/halo2wrong?branch=ac/chunked-mv-lookup#b43ebe30e84825d0d004fa27803d99c4187d419f"
|
||||
source = "git+https://github.com/zkonduit/halo2wrong?branch=ac%2Fchunked-mv-lookup#b43ebe30e84825d0d004fa27803d99c4187d419f"
|
||||
dependencies = [
|
||||
"halo2_proofs",
|
||||
"num-bigint",
|
||||
@@ -2890,7 +2955,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "integer"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/zkonduit/halo2wrong?branch=ac/chunked-mv-lookup#b43ebe30e84825d0d004fa27803d99c4187d419f"
|
||||
source = "git+https://github.com/zkonduit/halo2wrong?branch=ac%2Fchunked-mv-lookup#b43ebe30e84825d0d004fa27803d99c4187d419f"
|
||||
dependencies = [
|
||||
"maingate",
|
||||
"num-bigint",
|
||||
@@ -3201,7 +3266,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "maingate"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/zkonduit/halo2wrong?branch=ac/chunked-mv-lookup#b43ebe30e84825d0d004fa27803d99c4187d419f"
|
||||
source = "git+https://github.com/zkonduit/halo2wrong?branch=ac%2Fchunked-mv-lookup#b43ebe30e84825d0d004fa27803d99c4187d419f"
|
||||
dependencies = [
|
||||
"halo2wrong",
|
||||
"num-bigint",
|
||||
@@ -3283,7 +3348,8 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "metal"
|
||||
version = "0.29.0"
|
||||
source = "git+https://github.com/gfx-rs/metal-rs#0e1918b34689c4b8cd13a43372f9898680547ee9"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7ecfd3296f8c56b7c1f6fbac3c71cefa9d78ce009850c45000015f206dc7fa21"
|
||||
dependencies = [
|
||||
"bitflags 2.5.0",
|
||||
"block",
|
||||
@@ -3354,6 +3420,28 @@ dependencies = [
|
||||
"byteorder",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "mopro-msm"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/zkonduit/metal-msm-gpu-acceleration.git#be5f647b1a6c1a6ea9024390744a2b4d87f5d002"
|
||||
dependencies = [
|
||||
"bincode",
|
||||
"env_logger 0.11.6",
|
||||
"halo2curves 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"instant",
|
||||
"itertools 0.13.0",
|
||||
"lazy_static",
|
||||
"log",
|
||||
"metal",
|
||||
"objc",
|
||||
"once_cell",
|
||||
"rand 0.8.5",
|
||||
"rayon",
|
||||
"serde",
|
||||
"thiserror",
|
||||
"walkdir",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "native-tls"
|
||||
version = "0.2.11"
|
||||
@@ -3587,9 +3675,9 @@ checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf"
|
||||
|
||||
[[package]]
|
||||
name = "openssl-src"
|
||||
version = "300.2.3+3.2.1"
|
||||
version = "300.4.1+3.4.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5cff92b6f71555b61bb9315f7c64da3ca43d87531622120fea0195fc761b4843"
|
||||
checksum = "faa4eac4138c62414b5622d1b31c5c304f34b406b013c079c2bbc652fdd6678c"
|
||||
dependencies = [
|
||||
"cc",
|
||||
]
|
||||
@@ -5142,7 +5230,7 @@ checksum = "b7c388c1b5e93756d0c740965c41e8822f866621d41acbdf6336a6a168f8840c"
|
||||
[[package]]
|
||||
name = "snark-verifier"
|
||||
version = "0.1.1"
|
||||
source = "git+https://github.com/zkonduit/snark-verifier?branch=ac/chunked-mv-lookup#8762701ab8fa04e7d243a346030afd85633ec970"
|
||||
source = "git+https://github.com/zkonduit/snark-verifier?branch=ac%2Fchunked-mv-lookup#8762701ab8fa04e7d243a346030afd85633ec970"
|
||||
dependencies = [
|
||||
"ecc",
|
||||
"halo2_proofs",
|
||||
@@ -6146,7 +6234,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "uniffi_testing"
|
||||
version = "0.28.0"
|
||||
source = "git+https://github.com/ElusAegis/uniffi-rs?branch=feat/testing-feature-build-fix#4684b9e7da2d9c964c2b3a73883947aab7370a06"
|
||||
source = "git+https://github.com/ElusAegis/uniffi-rs?branch=feat%2Ftesting-feature-build-fix#4684b9e7da2d9c964c2b3a73883947aab7370a06"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"camino",
|
||||
|
||||
29
Cargo.toml
29
Cargo.toml
@@ -3,7 +3,7 @@ cargo-features = ["profile-rustflags"]
|
||||
[package]
|
||||
name = "ezkl"
|
||||
version = "0.0.0"
|
||||
edition = "2021"
|
||||
edition = "2024"
|
||||
default-run = "ezkl"
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
@@ -40,7 +40,6 @@ maybe-rayon = { version = "0.1.1", default-features = false }
|
||||
bincode = { version = "1.3.3", default-features = false }
|
||||
unzip-n = "0.1.2"
|
||||
num = "0.4.1"
|
||||
portable-atomic = { version = "1.6.0", optional = true }
|
||||
tosubcommand = { git = "https://github.com/zkonduit/enum_to_subcommand", package = "tosubcommand", optional = true }
|
||||
semver = { version = "1.0.22", optional = true }
|
||||
|
||||
@@ -74,7 +73,6 @@ tokio-postgres = { version = "0.7.10", optional = true }
|
||||
pg_bigdecimal = { version = "0.1.5", optional = true }
|
||||
lazy_static = { version = "1.4.0", optional = true }
|
||||
colored_json = { version = "3.0.1", default-features = false, optional = true }
|
||||
regex = { version = "1", default-features = false, optional = true }
|
||||
tokio = { version = "1.35.0", default-features = false, features = [
|
||||
"macros",
|
||||
"rt-multi-thread",
|
||||
@@ -91,7 +89,6 @@ pyo3-async-runtimes = { git = "https://github.com/PyO3/pyo3-async-runtimes", ver
|
||||
pyo3-log = { version = "0.12.0", default-features = false, optional = true }
|
||||
tract-onnx = { git = "https://github.com/sonos/tract/", rev = "37132e0397d0a73e5bd3a8615d932dabe44f6736", default-features = false, optional = true }
|
||||
tabled = { version = "0.12.0", optional = true }
|
||||
metal = { git = "https://github.com/gfx-rs/metal-rs", optional = true }
|
||||
objc = { version = "0.2.4", optional = true }
|
||||
mimalloc = { version = "0.1", optional = true }
|
||||
pyo3-stub-gen = { version = "0.6.0", optional = true }
|
||||
@@ -245,16 +242,14 @@ ezkl = [
|
||||
"dep:indicatif",
|
||||
"dep:gag",
|
||||
"dep:reqwest",
|
||||
"dep:openssl",
|
||||
"dep:tokio-postgres",
|
||||
"dep:pg_bigdecimal",
|
||||
"dep:lazy_static",
|
||||
"dep:regex",
|
||||
"dep:tokio",
|
||||
"dep:openssl",
|
||||
"dep:mimalloc",
|
||||
"dep:chrono",
|
||||
"dep:sha256",
|
||||
"dep:portable-atomic",
|
||||
"dep:clap_complete",
|
||||
"dep:halo2_solidity_verifier",
|
||||
"dep:semver",
|
||||
@@ -277,13 +272,15 @@ icicle = ["halo2_proofs/icicle_gpu"]
|
||||
empty-cmd = []
|
||||
no-banner = []
|
||||
no-update = []
|
||||
|
||||
macos-metal = ["halo2_proofs/macos"]
|
||||
ios-metal = ["halo2_proofs/ios"]
|
||||
|
||||
[patch.'https://github.com/zkonduit/halo2']
|
||||
halo2_proofs = { git = "https://github.com/zkonduit/halo2#ee4e1a09ebdb1f79f797685b78951c6034c430a6", package = "halo2_proofs" }
|
||||
halo2_proofs = { git = "https://github.com/zkonduit/halo2#f441c920be45f8f05d2c06a173d82e8885a5ed4d", package = "halo2_proofs" }
|
||||
|
||||
[patch.'https://github.com/zkonduit/halo2#0654e92bdf725fd44d849bfef3643870a8c7d50b']
|
||||
halo2_proofs = { git = "https://github.com/zkonduit/halo2#ee4e1a09ebdb1f79f797685b78951c6034c430a6", package = "halo2_proofs" }
|
||||
halo2_proofs = { git = "https://github.com/zkonduit/halo2#f441c920be45f8f05d2c06a173d82e8885a5ed4d", package = "halo2_proofs" }
|
||||
|
||||
|
||||
[patch.crates-io]
|
||||
uniffi_testing = { git = "https://github.com/ElusAegis/uniffi-rs", branch = "feat/testing-feature-build-fix" }
|
||||
@@ -292,12 +289,12 @@ uniffi_testing = { git = "https://github.com/ElusAegis/uniffi-rs", branch = "fea
|
||||
rustflags = ["-C", "relocation-model=pic"]
|
||||
lto = "fat"
|
||||
codegen-units = 1
|
||||
# panic = "abort"
|
||||
#panic = "abort"
|
||||
|
||||
|
||||
[profile.test-runs]
|
||||
inherits = "dev"
|
||||
opt-level = 3
|
||||
|
||||
[package.metadata.wasm-pack.profile.release]
|
||||
wasm-opt = [
|
||||
"-O4",
|
||||
"--flexible-inline-max-function-size",
|
||||
"4294967295",
|
||||
]
|
||||
wasm-opt = ["-O4", "--flexible-inline-max-function-size", "4294967295"]
|
||||
|
||||
28
README.md
28
README.md
@@ -43,7 +43,7 @@ The generated proofs can then be verified with much less computational resources
|
||||
|
||||
----------------------
|
||||
|
||||
### getting started ⚙️
|
||||
### Getting Started ⚙️
|
||||
|
||||
The easiest way to get started is to try out a notebook.
|
||||
|
||||
@@ -76,12 +76,12 @@ For more details visit the [docs](https://docs.ezkl.xyz). The CLI is faster than
|
||||
|
||||
Build the auto-generated rust documentation and open the docs in your browser locally. `cargo doc --open`
|
||||
|
||||
#### In-browser EVM verifier
|
||||
#### In-browser EVM Verifier
|
||||
|
||||
As an alternative to running the native Halo2 verifier as a WASM binding in the browser, you can use the in-browser EVM verifier. The source code of which you can find in the `in-browser-evm-verifier` directory and a README with instructions on how to use it.
|
||||
|
||||
|
||||
### building the project 🔨
|
||||
### Building the Project 🔨
|
||||
|
||||
#### Rust CLI
|
||||
|
||||
@@ -96,7 +96,7 @@ cargo install --locked --path .
|
||||
|
||||
|
||||
|
||||
#### building python bindings
|
||||
#### Building Python Bindings
|
||||
Python bindings exists and can be built using `maturin`. You will need `rust` and `cargo` to be installed.
|
||||
|
||||
```bash
|
||||
@@ -126,7 +126,7 @@ unset ENABLE_ICICLE_GPU
|
||||
|
||||
**NOTE:** Even with the above environment variable set, icicle is disabled for circuits where k <= 8. To change the value of `k` where icicle is enabled, you can set the environment variable `ICICLE_SMALL_K`.
|
||||
|
||||
### contributing 🌎
|
||||
### Contributing 🌎
|
||||
|
||||
If you're interested in contributing and are unsure where to start, reach out to one of the maintainers:
|
||||
|
||||
@@ -144,13 +144,21 @@ More broadly:
|
||||
|
||||
Any contribution intentionally submitted for inclusion in the work by you shall be licensed to Zkonduit Inc. under the terms and conditions specified in the [CLA](https://github.com/zkonduit/ezkl/blob/main/cla.md), which you agree to by intentionally submitting a contribution. In particular, you have the right to submit the contribution and we can distribute it, among other terms and conditions.
|
||||
|
||||
### no security guarantees
|
||||
|
||||
Ezkl is unaudited, beta software undergoing rapid development. There may be bugs. No guarantees of security are made and it should not be relied on in production.
|
||||
### Audits & Security
|
||||
|
||||
> NOTE: Because operations are quantized when they are converted from an onnx file to a zk-circuit, outputs in python and ezkl may differ slightly.
|
||||
[v21.0.0](https://github.com/zkonduit/ezkl/releases/tag/v21.0.0) has been audited by Trail of Bits, the report can be found [here](https://github.com/trailofbits/publications/blob/master/reviews/2025-03-zkonduit-ezkl-securityreview.pdf).
|
||||
|
||||
### no warranty
|
||||
> NOTE: Because operations are quantized when they are converted from an onnx file to a zk-circuit, outputs in python and ezkl may differ slightly.
|
||||
|
||||
Copyright (c) 2024 Zkonduit Inc. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
|
||||
Check out `docs/advanced_security` for more advanced information on potential threat vectors that are specific to zero-knowledge inference, quantization, and to machine learning models generally.
|
||||
|
||||
|
||||
### No Warranty
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
|
||||
Copyright (c) 2025 Zkonduit Inc.
|
||||
|
||||
|
||||
312
abis/DataAttestation.json
Normal file
312
abis/DataAttestation.json
Normal file
@@ -0,0 +1,312 @@
|
||||
[
|
||||
{
|
||||
"inputs": [
|
||||
{
|
||||
"internalType": "address",
|
||||
"name": "_contractAddresses",
|
||||
"type": "address"
|
||||
},
|
||||
{
|
||||
"internalType": "bytes",
|
||||
"name": "_callData",
|
||||
"type": "bytes"
|
||||
},
|
||||
{
|
||||
"internalType": "uint256[]",
|
||||
"name": "_decimals",
|
||||
"type": "uint256[]"
|
||||
},
|
||||
{
|
||||
"internalType": "uint256[]",
|
||||
"name": "_bits",
|
||||
"type": "uint256[]"
|
||||
},
|
||||
{
|
||||
"internalType": "uint8",
|
||||
"name": "_instanceOffset",
|
||||
"type": "uint8"
|
||||
}
|
||||
],
|
||||
"stateMutability": "nonpayable",
|
||||
"type": "constructor"
|
||||
},
|
||||
{
|
||||
"inputs": [],
|
||||
"name": "HALF_ORDER",
|
||||
"outputs": [
|
||||
{
|
||||
"internalType": "uint256",
|
||||
"name": "",
|
||||
"type": "uint256"
|
||||
}
|
||||
],
|
||||
"stateMutability": "view",
|
||||
"type": "function"
|
||||
},
|
||||
{
|
||||
"inputs": [],
|
||||
"name": "ORDER",
|
||||
"outputs": [
|
||||
{
|
||||
"internalType": "uint256",
|
||||
"name": "",
|
||||
"type": "uint256"
|
||||
}
|
||||
],
|
||||
"stateMutability": "view",
|
||||
"type": "function"
|
||||
},
|
||||
{
|
||||
"inputs": [
|
||||
{
|
||||
"internalType": "uint256[]",
|
||||
"name": "instances",
|
||||
"type": "uint256[]"
|
||||
}
|
||||
],
|
||||
"name": "attestData",
|
||||
"outputs": [],
|
||||
"stateMutability": "view",
|
||||
"type": "function"
|
||||
},
|
||||
{
|
||||
"inputs": [],
|
||||
"name": "callData",
|
||||
"outputs": [
|
||||
{
|
||||
"internalType": "bytes",
|
||||
"name": "",
|
||||
"type": "bytes"
|
||||
}
|
||||
],
|
||||
"stateMutability": "view",
|
||||
"type": "function"
|
||||
},
|
||||
{
|
||||
"inputs": [],
|
||||
"name": "contractAddress",
|
||||
"outputs": [
|
||||
{
|
||||
"internalType": "address",
|
||||
"name": "",
|
||||
"type": "address"
|
||||
}
|
||||
],
|
||||
"stateMutability": "view",
|
||||
"type": "function"
|
||||
},
|
||||
{
|
||||
"inputs": [
|
||||
{
|
||||
"internalType": "bytes",
|
||||
"name": "encoded",
|
||||
"type": "bytes"
|
||||
}
|
||||
],
|
||||
"name": "getInstancesCalldata",
|
||||
"outputs": [
|
||||
{
|
||||
"internalType": "uint256[]",
|
||||
"name": "instances",
|
||||
"type": "uint256[]"
|
||||
}
|
||||
],
|
||||
"stateMutability": "pure",
|
||||
"type": "function"
|
||||
},
|
||||
{
|
||||
"inputs": [
|
||||
{
|
||||
"internalType": "bytes",
|
||||
"name": "encoded",
|
||||
"type": "bytes"
|
||||
}
|
||||
],
|
||||
"name": "getInstancesMemory",
|
||||
"outputs": [
|
||||
{
|
||||
"internalType": "uint256[]",
|
||||
"name": "instances",
|
||||
"type": "uint256[]"
|
||||
}
|
||||
],
|
||||
"stateMutability": "pure",
|
||||
"type": "function"
|
||||
},
|
||||
{
|
||||
"inputs": [
|
||||
{
|
||||
"internalType": "uint256",
|
||||
"name": "index",
|
||||
"type": "uint256"
|
||||
}
|
||||
],
|
||||
"name": "getScalars",
|
||||
"outputs": [
|
||||
{
|
||||
"components": [
|
||||
{
|
||||
"internalType": "uint256",
|
||||
"name": "decimals",
|
||||
"type": "uint256"
|
||||
},
|
||||
{
|
||||
"internalType": "uint256",
|
||||
"name": "bits",
|
||||
"type": "uint256"
|
||||
}
|
||||
],
|
||||
"internalType": "struct DataAttestation.Scalars",
|
||||
"name": "",
|
||||
"type": "tuple"
|
||||
}
|
||||
],
|
||||
"stateMutability": "view",
|
||||
"type": "function"
|
||||
},
|
||||
{
|
||||
"inputs": [],
|
||||
"name": "instanceOffset",
|
||||
"outputs": [
|
||||
{
|
||||
"internalType": "uint8",
|
||||
"name": "",
|
||||
"type": "uint8"
|
||||
}
|
||||
],
|
||||
"stateMutability": "view",
|
||||
"type": "function"
|
||||
},
|
||||
{
|
||||
"inputs": [
|
||||
{
|
||||
"internalType": "uint256",
|
||||
"name": "x",
|
||||
"type": "uint256"
|
||||
},
|
||||
{
|
||||
"internalType": "uint256",
|
||||
"name": "y",
|
||||
"type": "uint256"
|
||||
},
|
||||
{
|
||||
"internalType": "uint256",
|
||||
"name": "denominator",
|
||||
"type": "uint256"
|
||||
}
|
||||
],
|
||||
"name": "mulDiv",
|
||||
"outputs": [
|
||||
{
|
||||
"internalType": "uint256",
|
||||
"name": "result",
|
||||
"type": "uint256"
|
||||
}
|
||||
],
|
||||
"stateMutability": "pure",
|
||||
"type": "function"
|
||||
},
|
||||
{
|
||||
"inputs": [
|
||||
{
|
||||
"internalType": "int256",
|
||||
"name": "x",
|
||||
"type": "int256"
|
||||
},
|
||||
{
|
||||
"components": [
|
||||
{
|
||||
"internalType": "uint256",
|
||||
"name": "decimals",
|
||||
"type": "uint256"
|
||||
},
|
||||
{
|
||||
"internalType": "uint256",
|
||||
"name": "bits",
|
||||
"type": "uint256"
|
||||
}
|
||||
],
|
||||
"internalType": "struct DataAttestation.Scalars",
|
||||
"name": "_scalars",
|
||||
"type": "tuple"
|
||||
}
|
||||
],
|
||||
"name": "quantizeData",
|
||||
"outputs": [
|
||||
{
|
||||
"internalType": "int256",
|
||||
"name": "quantized_data",
|
||||
"type": "int256"
|
||||
}
|
||||
],
|
||||
"stateMutability": "pure",
|
||||
"type": "function"
|
||||
},
|
||||
{
|
||||
"inputs": [
|
||||
{
|
||||
"internalType": "address",
|
||||
"name": "target",
|
||||
"type": "address"
|
||||
},
|
||||
{
|
||||
"internalType": "bytes",
|
||||
"name": "data",
|
||||
"type": "bytes"
|
||||
}
|
||||
],
|
||||
"name": "staticCall",
|
||||
"outputs": [
|
||||
{
|
||||
"internalType": "bytes",
|
||||
"name": "",
|
||||
"type": "bytes"
|
||||
}
|
||||
],
|
||||
"stateMutability": "view",
|
||||
"type": "function"
|
||||
},
|
||||
{
|
||||
"inputs": [
|
||||
{
|
||||
"internalType": "int256",
|
||||
"name": "x",
|
||||
"type": "int256"
|
||||
}
|
||||
],
|
||||
"name": "toFieldElement",
|
||||
"outputs": [
|
||||
{
|
||||
"internalType": "uint256",
|
||||
"name": "field_element",
|
||||
"type": "uint256"
|
||||
}
|
||||
],
|
||||
"stateMutability": "pure",
|
||||
"type": "function"
|
||||
},
|
||||
{
|
||||
"inputs": [
|
||||
{
|
||||
"internalType": "address",
|
||||
"name": "verifier",
|
||||
"type": "address"
|
||||
},
|
||||
{
|
||||
"internalType": "bytes",
|
||||
"name": "encoded",
|
||||
"type": "bytes"
|
||||
}
|
||||
],
|
||||
"name": "verifyWithDataAttestation",
|
||||
"outputs": [
|
||||
{
|
||||
"internalType": "bool",
|
||||
"name": "",
|
||||
"type": "bool"
|
||||
}
|
||||
],
|
||||
"stateMutability": "view",
|
||||
"type": "function"
|
||||
}
|
||||
]
|
||||
@@ -1,167 +0,0 @@
|
||||
[
|
||||
{
|
||||
"inputs": [
|
||||
{
|
||||
"internalType": "address[]",
|
||||
"name": "_contractAddresses",
|
||||
"type": "address[]"
|
||||
},
|
||||
{
|
||||
"internalType": "bytes[][]",
|
||||
"name": "_callData",
|
||||
"type": "bytes[][]"
|
||||
},
|
||||
{
|
||||
"internalType": "uint256[][]",
|
||||
"name": "_decimals",
|
||||
"type": "uint256[][]"
|
||||
},
|
||||
{
|
||||
"internalType": "uint256[]",
|
||||
"name": "_scales",
|
||||
"type": "uint256[]"
|
||||
},
|
||||
{
|
||||
"internalType": "uint8",
|
||||
"name": "_instanceOffset",
|
||||
"type": "uint8"
|
||||
},
|
||||
{
|
||||
"internalType": "address",
|
||||
"name": "_admin",
|
||||
"type": "address"
|
||||
}
|
||||
],
|
||||
"stateMutability": "nonpayable",
|
||||
"type": "constructor"
|
||||
},
|
||||
{
|
||||
"inputs": [
|
||||
{
|
||||
"internalType": "uint256",
|
||||
"name": "",
|
||||
"type": "uint256"
|
||||
}
|
||||
],
|
||||
"name": "accountCalls",
|
||||
"outputs": [
|
||||
{
|
||||
"internalType": "address",
|
||||
"name": "contractAddress",
|
||||
"type": "address"
|
||||
},
|
||||
{
|
||||
"internalType": "uint256",
|
||||
"name": "callCount",
|
||||
"type": "uint256"
|
||||
}
|
||||
],
|
||||
"stateMutability": "view",
|
||||
"type": "function"
|
||||
},
|
||||
{
|
||||
"inputs": [],
|
||||
"name": "admin",
|
||||
"outputs": [
|
||||
{
|
||||
"internalType": "address",
|
||||
"name": "",
|
||||
"type": "address"
|
||||
}
|
||||
],
|
||||
"stateMutability": "view",
|
||||
"type": "function"
|
||||
},
|
||||
{
|
||||
"inputs": [],
|
||||
"name": "instanceOffset",
|
||||
"outputs": [
|
||||
{
|
||||
"internalType": "uint8",
|
||||
"name": "",
|
||||
"type": "uint8"
|
||||
}
|
||||
],
|
||||
"stateMutability": "view",
|
||||
"type": "function"
|
||||
},
|
||||
{
|
||||
"inputs": [
|
||||
{
|
||||
"internalType": "uint256",
|
||||
"name": "",
|
||||
"type": "uint256"
|
||||
}
|
||||
],
|
||||
"name": "scales",
|
||||
"outputs": [
|
||||
{
|
||||
"internalType": "uint256",
|
||||
"name": "",
|
||||
"type": "uint256"
|
||||
}
|
||||
],
|
||||
"stateMutability": "view",
|
||||
"type": "function"
|
||||
},
|
||||
{
|
||||
"inputs": [
|
||||
{
|
||||
"internalType": "address[]",
|
||||
"name": "_contractAddresses",
|
||||
"type": "address[]"
|
||||
},
|
||||
{
|
||||
"internalType": "bytes[][]",
|
||||
"name": "_callData",
|
||||
"type": "bytes[][]"
|
||||
},
|
||||
{
|
||||
"internalType": "uint256[][]",
|
||||
"name": "_decimals",
|
||||
"type": "uint256[][]"
|
||||
}
|
||||
],
|
||||
"name": "updateAccountCalls",
|
||||
"outputs": [],
|
||||
"stateMutability": "nonpayable",
|
||||
"type": "function"
|
||||
},
|
||||
{
|
||||
"inputs": [
|
||||
{
|
||||
"internalType": "address",
|
||||
"name": "_admin",
|
||||
"type": "address"
|
||||
}
|
||||
],
|
||||
"name": "updateAdmin",
|
||||
"outputs": [],
|
||||
"stateMutability": "nonpayable",
|
||||
"type": "function"
|
||||
},
|
||||
{
|
||||
"inputs": [
|
||||
{
|
||||
"internalType": "address",
|
||||
"name": "verifier",
|
||||
"type": "address"
|
||||
},
|
||||
{
|
||||
"internalType": "bytes",
|
||||
"name": "encoded",
|
||||
"type": "bytes"
|
||||
}
|
||||
],
|
||||
"name": "verifyWithDataAttestation",
|
||||
"outputs": [
|
||||
{
|
||||
"internalType": "bool",
|
||||
"name": "",
|
||||
"type": "bool"
|
||||
}
|
||||
],
|
||||
"stateMutability": "view",
|
||||
"type": "function"
|
||||
}
|
||||
]
|
||||
@@ -1,147 +0,0 @@
|
||||
[
|
||||
{
|
||||
"inputs": [
|
||||
{
|
||||
"internalType": "address",
|
||||
"name": "_contractAddresses",
|
||||
"type": "address"
|
||||
},
|
||||
{
|
||||
"internalType": "bytes",
|
||||
"name": "_callData",
|
||||
"type": "bytes"
|
||||
},
|
||||
{
|
||||
"internalType": "uint256",
|
||||
"name": "_decimals",
|
||||
"type": "uint256"
|
||||
},
|
||||
{
|
||||
"internalType": "uint256[]",
|
||||
"name": "_scales",
|
||||
"type": "uint256[]"
|
||||
},
|
||||
{
|
||||
"internalType": "uint8",
|
||||
"name": "_instanceOffset",
|
||||
"type": "uint8"
|
||||
},
|
||||
{
|
||||
"internalType": "address",
|
||||
"name": "_admin",
|
||||
"type": "address"
|
||||
}
|
||||
],
|
||||
"stateMutability": "nonpayable",
|
||||
"type": "constructor"
|
||||
},
|
||||
{
|
||||
"inputs": [],
|
||||
"name": "accountCall",
|
||||
"outputs": [
|
||||
{
|
||||
"internalType": "address",
|
||||
"name": "contractAddress",
|
||||
"type": "address"
|
||||
},
|
||||
{
|
||||
"internalType": "bytes",
|
||||
"name": "callData",
|
||||
"type": "bytes"
|
||||
},
|
||||
{
|
||||
"internalType": "uint256",
|
||||
"name": "decimals",
|
||||
"type": "uint256"
|
||||
}
|
||||
],
|
||||
"stateMutability": "view",
|
||||
"type": "function"
|
||||
},
|
||||
{
|
||||
"inputs": [],
|
||||
"name": "admin",
|
||||
"outputs": [
|
||||
{
|
||||
"internalType": "address",
|
||||
"name": "",
|
||||
"type": "address"
|
||||
}
|
||||
],
|
||||
"stateMutability": "view",
|
||||
"type": "function"
|
||||
},
|
||||
{
|
||||
"inputs": [],
|
||||
"name": "instanceOffset",
|
||||
"outputs": [
|
||||
{
|
||||
"internalType": "uint8",
|
||||
"name": "",
|
||||
"type": "uint8"
|
||||
}
|
||||
],
|
||||
"stateMutability": "view",
|
||||
"type": "function"
|
||||
},
|
||||
{
|
||||
"inputs": [
|
||||
{
|
||||
"internalType": "address",
|
||||
"name": "_contractAddresses",
|
||||
"type": "address"
|
||||
},
|
||||
{
|
||||
"internalType": "bytes",
|
||||
"name": "_callData",
|
||||
"type": "bytes"
|
||||
},
|
||||
{
|
||||
"internalType": "uint256",
|
||||
"name": "_decimals",
|
||||
"type": "uint256"
|
||||
}
|
||||
],
|
||||
"name": "updateAccountCalls",
|
||||
"outputs": [],
|
||||
"stateMutability": "nonpayable",
|
||||
"type": "function"
|
||||
},
|
||||
{
|
||||
"inputs": [
|
||||
{
|
||||
"internalType": "address",
|
||||
"name": "_admin",
|
||||
"type": "address"
|
||||
}
|
||||
],
|
||||
"name": "updateAdmin",
|
||||
"outputs": [],
|
||||
"stateMutability": "nonpayable",
|
||||
"type": "function"
|
||||
},
|
||||
{
|
||||
"inputs": [
|
||||
{
|
||||
"internalType": "address",
|
||||
"name": "verifier",
|
||||
"type": "address"
|
||||
},
|
||||
{
|
||||
"internalType": "bytes",
|
||||
"name": "encoded",
|
||||
"type": "bytes"
|
||||
}
|
||||
],
|
||||
"name": "verifyWithDataAttestation",
|
||||
"outputs": [
|
||||
{
|
||||
"internalType": "bool",
|
||||
"name": "",
|
||||
"type": "bool"
|
||||
}
|
||||
],
|
||||
"stateMutability": "view",
|
||||
"type": "function"
|
||||
}
|
||||
]
|
||||
@@ -73,6 +73,8 @@ impl Circuit<Fr> for MyCircuit {
|
||||
padding: vec![(0, 0)],
|
||||
stride: vec![1; 2],
|
||||
group: 1,
|
||||
data_format: DataFormat::NCHW,
|
||||
kernel_format: KernelFormat::OIHW,
|
||||
}),
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
@@ -69,6 +69,7 @@ impl Circuit<Fr> for MyCircuit {
|
||||
stride: vec![1, 1],
|
||||
kernel_shape: vec![2, 2],
|
||||
normalized: false,
|
||||
data_format: DataFormat::NCHW,
|
||||
}),
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
@@ -23,8 +23,6 @@ use halo2curves::bn256::{Bn256, Fr};
|
||||
use rand::rngs::OsRng;
|
||||
use snark_verifier::system::halo2::transcript::evm::EvmTranscript;
|
||||
|
||||
const L: usize = 10;
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
struct MyCircuit {
|
||||
image: ValTensor<Fr>,
|
||||
@@ -40,7 +38,7 @@ impl Circuit<Fr> for MyCircuit {
|
||||
}
|
||||
|
||||
fn configure(cs: &mut ConstraintSystem<Fr>) -> Self::Config {
|
||||
PoseidonChip::<PoseidonSpec, POSEIDON_WIDTH, POSEIDON_RATE, 10>::configure(cs, ())
|
||||
PoseidonChip::<PoseidonSpec, POSEIDON_WIDTH, POSEIDON_RATE>::configure(cs, ())
|
||||
}
|
||||
|
||||
fn synthesize(
|
||||
@@ -48,7 +46,7 @@ impl Circuit<Fr> for MyCircuit {
|
||||
config: Self::Config,
|
||||
mut layouter: impl Layouter<Fr>,
|
||||
) -> Result<(), Error> {
|
||||
let chip: PoseidonChip<PoseidonSpec, POSEIDON_WIDTH, POSEIDON_RATE, L> =
|
||||
let chip: PoseidonChip<PoseidonSpec, POSEIDON_WIDTH, POSEIDON_RATE> =
|
||||
PoseidonChip::new(config);
|
||||
chip.layout(&mut layouter, &[self.image.clone()], 0, &mut HashMap::new())?;
|
||||
Ok(())
|
||||
@@ -59,7 +57,7 @@ fn runposeidon(c: &mut Criterion) {
|
||||
let mut group = c.benchmark_group("poseidon");
|
||||
|
||||
for size in [64, 784, 2352, 12288].iter() {
|
||||
let k = (PoseidonChip::<PoseidonSpec, POSEIDON_WIDTH, POSEIDON_RATE, L>::num_rows(*size)
|
||||
let k = (PoseidonChip::<PoseidonSpec, POSEIDON_WIDTH, POSEIDON_RATE>::num_rows(*size)
|
||||
as f32)
|
||||
.log2()
|
||||
.ceil() as u32;
|
||||
@@ -67,7 +65,7 @@ fn runposeidon(c: &mut Criterion) {
|
||||
|
||||
let message = (0..*size).map(|_| Fr::random(OsRng)).collect::<Vec<_>>();
|
||||
let _output =
|
||||
PoseidonChip::<PoseidonSpec, POSEIDON_WIDTH, POSEIDON_RATE, L>::run(message.to_vec())
|
||||
PoseidonChip::<PoseidonSpec, POSEIDON_WIDTH, POSEIDON_RATE>::run(message.to_vec())
|
||||
.unwrap();
|
||||
|
||||
let mut image = Tensor::from(message.into_iter().map(Value::known));
|
||||
|
||||
@@ -10,6 +10,7 @@ use rand::Rng;
|
||||
|
||||
// Assuming these are your types
|
||||
#[derive(Clone)]
|
||||
#[allow(dead_code)]
|
||||
enum ValType {
|
||||
Constant(F),
|
||||
AssignedConstant(usize, F),
|
||||
@@ -21,7 +22,7 @@ fn generate_test_data(size: usize, zero_probability: f64) -> Vec<ValType> {
|
||||
let mut rng = rand::thread_rng();
|
||||
(0..size)
|
||||
.map(|_i| {
|
||||
if rng.gen::<f64>() < zero_probability {
|
||||
if rng.r#gen::<f64>() < zero_probability {
|
||||
ValType::Constant(F::ZERO)
|
||||
} else {
|
||||
ValType::Constant(F::ONE) // Or some other non-zero value
|
||||
|
||||
@@ -8,21 +8,27 @@ contract LoadInstances {
|
||||
*/
|
||||
function getInstancesMemory(
|
||||
bytes memory encoded
|
||||
) internal pure returns (uint256[] memory instances) {
|
||||
) public pure returns (uint256[] memory instances) {
|
||||
bytes4 funcSig;
|
||||
uint256 instances_offset;
|
||||
uint256 instances_length;
|
||||
assembly {
|
||||
// fetch function sig. Either `verifyProof(bytes,uint256[])` or `verifyProof(address,bytes,uint256[])`
|
||||
funcSig := mload(add(encoded, 0x20))
|
||||
|
||||
}
|
||||
if (funcSig == 0xaf83a18d) {
|
||||
instances_offset = 0x64;
|
||||
} else if (funcSig == 0x1e8e1e13) {
|
||||
instances_offset = 0x44;
|
||||
} else {
|
||||
revert("Invalid function signature");
|
||||
}
|
||||
assembly {
|
||||
// Fetch instances offset which is 4 + 32 + 32 bytes away from
|
||||
// start of encoded for `verifyProof(bytes,uint256[])`,
|
||||
// and 4 + 32 + 32 +32 away for `verifyProof(address,bytes,uint256[])`
|
||||
|
||||
instances_offset := mload(
|
||||
add(encoded, add(0x44, mul(0x20, eq(funcSig, 0xaf83a18d))))
|
||||
)
|
||||
instances_offset := mload(add(encoded, instances_offset))
|
||||
|
||||
instances_length := mload(add(add(encoded, 0x24), instances_offset))
|
||||
}
|
||||
@@ -41,6 +47,10 @@ contract LoadInstances {
|
||||
)
|
||||
}
|
||||
}
|
||||
require(
|
||||
funcSig == 0xaf83a18d || funcSig == 0x1e8e1e13,
|
||||
"Invalid function signature"
|
||||
);
|
||||
}
|
||||
/**
|
||||
* @dev Parse the instances array from the Halo2Verifier encoded calldata.
|
||||
@@ -49,23 +59,31 @@ contract LoadInstances {
|
||||
*/
|
||||
function getInstancesCalldata(
|
||||
bytes calldata encoded
|
||||
) internal pure returns (uint256[] memory instances) {
|
||||
) public pure returns (uint256[] memory instances) {
|
||||
bytes4 funcSig;
|
||||
uint256 instances_offset;
|
||||
uint256 instances_length;
|
||||
assembly {
|
||||
// fetch function sig. Either `verifyProof(bytes,uint256[])` or `verifyProof(address,bytes,uint256[])`
|
||||
funcSig := calldataload(encoded.offset)
|
||||
|
||||
}
|
||||
if (funcSig == 0xaf83a18d) {
|
||||
instances_offset = 0x44;
|
||||
} else if (funcSig == 0x1e8e1e13) {
|
||||
instances_offset = 0x24;
|
||||
} else {
|
||||
revert("Invalid function signature");
|
||||
}
|
||||
// We need to create a new assembly block in order for solidity
|
||||
// to cast the funcSig to a bytes4 type. Otherwise it will load the entire first 32 bytes of the calldata
|
||||
// within the block
|
||||
assembly {
|
||||
// Fetch instances offset which is 4 + 32 + 32 bytes away from
|
||||
// start of encoded for `verifyProof(bytes,uint256[])`,
|
||||
// and 4 + 32 + 32 +32 away for `verifyProof(address,bytes,uint256[])`
|
||||
|
||||
instances_offset := calldataload(
|
||||
add(
|
||||
encoded.offset,
|
||||
add(0x24, mul(0x20, eq(funcSig, 0xaf83a18d)))
|
||||
)
|
||||
add(encoded.offset, instances_offset)
|
||||
)
|
||||
|
||||
instances_length := calldataload(
|
||||
@@ -96,7 +114,7 @@ contract LoadInstances {
|
||||
// The kzg commitments of a given model, all aggregated into a single bytes array.
|
||||
// At solidity generation time, the commitments are hardcoded into the contract via the COMMITMENT_KZG constant.
|
||||
// It will be used to check that the proof commitments match the expected commitments.
|
||||
bytes constant COMMITMENT_KZG = hex"";
|
||||
bytes constant COMMITMENT_KZG = hex"1234";
|
||||
|
||||
contract SwapProofCommitments {
|
||||
/**
|
||||
@@ -113,17 +131,20 @@ contract SwapProofCommitments {
|
||||
assembly {
|
||||
// fetch function sig. Either `verifyProof(bytes,uint256[])` or `verifyProof(address,bytes,uint256[])`
|
||||
funcSig := calldataload(encoded.offset)
|
||||
|
||||
}
|
||||
if (funcSig == 0xaf83a18d) {
|
||||
proof_offset = 0x24;
|
||||
} else if (funcSig == 0x1e8e1e13) {
|
||||
proof_offset = 0x04;
|
||||
} else {
|
||||
revert("Invalid function signature");
|
||||
}
|
||||
assembly {
|
||||
// Fetch proof offset which is 4 + 32 bytes away from
|
||||
// start of encoded for `verifyProof(bytes,uint256[])`,
|
||||
// and 4 + 32 + 32 away for `verifyProof(address,bytes,uint256[])`
|
||||
|
||||
proof_offset := calldataload(
|
||||
add(
|
||||
encoded.offset,
|
||||
add(0x04, mul(0x20, eq(funcSig, 0xaf83a18d)))
|
||||
)
|
||||
)
|
||||
proof_offset := calldataload(add(encoded.offset, proof_offset))
|
||||
|
||||
proof_length := calldataload(
|
||||
add(add(encoded.offset, 0x04), proof_offset)
|
||||
@@ -154,7 +175,7 @@ contract SwapProofCommitments {
|
||||
let wordCommitment := mload(add(commitment, i))
|
||||
equal := eq(wordProof, wordCommitment)
|
||||
if eq(equal, 0) {
|
||||
return(0, 0)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -163,36 +184,38 @@ contract SwapProofCommitments {
|
||||
} /// end checkKzgCommits
|
||||
}
|
||||
|
||||
contract DataAttestationSingle is LoadInstances, SwapProofCommitments {
|
||||
/**
|
||||
* @notice Struct used to make view only call to account to fetch the data that EZKL reads from.
|
||||
* @param the address of the account to make calls to
|
||||
* @param the abi encoded function calls to make to the `contractAddress`
|
||||
*/
|
||||
struct AccountCall {
|
||||
address contractAddress;
|
||||
bytes callData;
|
||||
contract DataAttestation is LoadInstances, SwapProofCommitments {
|
||||
// the address of the account to make calls to
|
||||
address public immutable contractAddress;
|
||||
|
||||
// the abi encoded function calls to make to the `contractAddress` that returns the attested to data
|
||||
bytes public callData;
|
||||
|
||||
struct Scalars {
|
||||
// The number of base 10 decimals to scale the data by.
|
||||
// For most ERC20 tokens this is 1e18
|
||||
uint256 decimals;
|
||||
// The number of fractional bits of the fixed point EZKL data points.
|
||||
uint256 bits;
|
||||
}
|
||||
AccountCall public accountCall;
|
||||
|
||||
uint[] scales;
|
||||
Scalars[] private scalars;
|
||||
|
||||
address public admin;
|
||||
function getScalars(uint256 index) public view returns (Scalars memory) {
|
||||
return scalars[index];
|
||||
}
|
||||
|
||||
/**
|
||||
* @notice EZKL P value
|
||||
* @dev In order to prevent the verifier from accepting two version of the same pubInput, n and the quantity (n + P), where n + P <= 2^256, we require that all instances are stricly less than P. a
|
||||
* @dev The reason for this is that the assmebly code of the verifier performs all arithmetic operations modulo P and as a consequence can't distinguish between n and n + P.
|
||||
*/
|
||||
uint256 constant ORDER =
|
||||
uint256 public constant ORDER =
|
||||
uint256(
|
||||
0x30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f0000001
|
||||
);
|
||||
|
||||
uint256 constant INPUT_LEN = 0;
|
||||
|
||||
uint256 constant OUTPUT_LEN = 0;
|
||||
uint256 public constant HALF_ORDER = ORDER >> 1;
|
||||
|
||||
uint8 public instanceOffset;
|
||||
|
||||
@@ -204,53 +227,27 @@ contract DataAttestationSingle is LoadInstances, SwapProofCommitments {
|
||||
constructor(
|
||||
address _contractAddresses,
|
||||
bytes memory _callData,
|
||||
uint256 _decimals,
|
||||
uint[] memory _scales,
|
||||
uint8 _instanceOffset,
|
||||
address _admin
|
||||
uint256[] memory _decimals,
|
||||
uint[] memory _bits,
|
||||
uint8 _instanceOffset
|
||||
) {
|
||||
admin = _admin;
|
||||
for (uint i; i < _scales.length; i++) {
|
||||
scales.push(1 << _scales[i]);
|
||||
require(
|
||||
_bits.length == _decimals.length,
|
||||
"Invalid scalar array lengths"
|
||||
);
|
||||
for (uint i; i < _bits.length; i++) {
|
||||
scalars.push(Scalars(10 ** _decimals[i], 1 << _bits[i]));
|
||||
}
|
||||
populateAccountCalls(_contractAddresses, _callData, _decimals);
|
||||
contractAddress = _contractAddresses;
|
||||
callData = _callData;
|
||||
instanceOffset = _instanceOffset;
|
||||
}
|
||||
|
||||
function updateAdmin(address _admin) external {
|
||||
require(msg.sender == admin, "Only admin can update admin");
|
||||
if (_admin == address(0)) {
|
||||
revert();
|
||||
}
|
||||
admin = _admin;
|
||||
}
|
||||
|
||||
function updateAccountCalls(
|
||||
address _contractAddresses,
|
||||
bytes memory _callData,
|
||||
uint256 _decimals
|
||||
) external {
|
||||
require(msg.sender == admin, "Only admin can update account calls");
|
||||
populateAccountCalls(_contractAddresses, _callData, _decimals);
|
||||
}
|
||||
|
||||
function populateAccountCalls(
|
||||
address _contractAddresses,
|
||||
bytes memory _callData,
|
||||
uint256 _decimals
|
||||
) internal {
|
||||
AccountCall memory _accountCall = accountCall;
|
||||
_accountCall.contractAddress = _contractAddresses;
|
||||
_accountCall.callData = _callData;
|
||||
_accountCall.decimals = 10 ** _decimals;
|
||||
accountCall = _accountCall;
|
||||
}
|
||||
|
||||
function mulDiv(
|
||||
uint256 x,
|
||||
uint256 y,
|
||||
uint256 denominator
|
||||
) internal pure returns (uint256 result) {
|
||||
) public pure returns (uint256 result) {
|
||||
unchecked {
|
||||
uint256 prod0;
|
||||
uint256 prod1;
|
||||
@@ -298,21 +295,28 @@ contract DataAttestationSingle is LoadInstances, SwapProofCommitments {
|
||||
/**
|
||||
* @dev Quantize the data returned from the account calls to the scale used by the EZKL model.
|
||||
* @param x - One of the elements of the data returned from the account calls
|
||||
* @param _decimals - Number of base 10 decimals to scale the data by.
|
||||
* @param _scale - The base 2 scale used to convert the floating point value into a fixed point value.
|
||||
* @param _scalars - The scaling factors for the data returned from the account calls.
|
||||
*
|
||||
*/
|
||||
function quantizeData(
|
||||
int x,
|
||||
uint256 _decimals,
|
||||
uint256 _scale
|
||||
) internal pure returns (int256 quantized_data) {
|
||||
Scalars memory _scalars
|
||||
) public pure returns (int256 quantized_data) {
|
||||
if (_scalars.bits == 1 && _scalars.decimals == 1) {
|
||||
return x;
|
||||
}
|
||||
bool neg = x < 0;
|
||||
if (neg) x = -x;
|
||||
uint output = mulDiv(uint256(x), _scale, _decimals);
|
||||
if (mulmod(uint256(x), _scale, _decimals) * 2 >= _decimals) {
|
||||
uint output = mulDiv(uint256(x), _scalars.bits, _scalars.decimals);
|
||||
if (
|
||||
mulmod(uint256(x), _scalars.bits, _scalars.decimals) * 2 >=
|
||||
_scalars.decimals
|
||||
) {
|
||||
output += 1;
|
||||
}
|
||||
if (output > HALF_ORDER) {
|
||||
revert("Overflow field modulus");
|
||||
}
|
||||
quantized_data = neg ? -int256(output) : int256(output);
|
||||
}
|
||||
/**
|
||||
@@ -324,7 +328,7 @@ contract DataAttestationSingle is LoadInstances, SwapProofCommitments {
|
||||
function staticCall(
|
||||
address target,
|
||||
bytes memory data
|
||||
) internal view returns (bytes memory) {
|
||||
) public view returns (bytes memory) {
|
||||
(bool success, bytes memory returndata) = target.staticcall(data);
|
||||
if (success) {
|
||||
if (returndata.length == 0) {
|
||||
@@ -345,7 +349,7 @@ contract DataAttestationSingle is LoadInstances, SwapProofCommitments {
|
||||
*/
|
||||
function toFieldElement(
|
||||
int256 x
|
||||
) internal pure returns (uint256 field_element) {
|
||||
) public pure returns (uint256 field_element) {
|
||||
// The casting down to uint256 is safe because the order is about 2^254, and the value
|
||||
// of x ranges of -2^127 to 2^127, so x + int(ORDER) is always positive.
|
||||
return uint256(x + int(ORDER)) % ORDER;
|
||||
@@ -355,315 +359,16 @@ contract DataAttestationSingle is LoadInstances, SwapProofCommitments {
|
||||
* @dev Make the account calls to fetch the data that EZKL reads from and attest to the data.
|
||||
* @param instances - The public instances to the proof (the data in the proof that publicly accessible to the verifier).
|
||||
*/
|
||||
function attestData(uint256[] memory instances) internal view {
|
||||
require(
|
||||
instances.length >= INPUT_LEN + OUTPUT_LEN,
|
||||
"Invalid public inputs length"
|
||||
);
|
||||
AccountCall memory _accountCall = accountCall;
|
||||
uint[] memory _scales = scales;
|
||||
bytes memory returnData = staticCall(
|
||||
_accountCall.contractAddress,
|
||||
_accountCall.callData
|
||||
);
|
||||
function attestData(uint256[] memory instances) public view {
|
||||
bytes memory returnData = staticCall(contractAddress, callData);
|
||||
int256[] memory x = abi.decode(returnData, (int256[]));
|
||||
uint _offset;
|
||||
int output = quantizeData(x[0], _accountCall.decimals, _scales[0]);
|
||||
uint field_element = toFieldElement(output);
|
||||
int output;
|
||||
uint fieldElement;
|
||||
for (uint i = 0; i < x.length; i++) {
|
||||
if (field_element != instances[i + instanceOffset]) {
|
||||
_offset += 1;
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
uint length = x.length - _offset;
|
||||
for (uint i = 1; i < length; i++) {
|
||||
output = quantizeData(x[i], _accountCall.decimals, _scales[i]);
|
||||
field_element = toFieldElement(output);
|
||||
require(
|
||||
field_element == instances[i + instanceOffset + _offset],
|
||||
"Public input does not match"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @dev Verify the proof with the data attestation.
|
||||
* @param verifier - The address of the verifier contract.
|
||||
* @param encoded - The verifier calldata.
|
||||
*/
|
||||
function verifyWithDataAttestation(
|
||||
address verifier,
|
||||
bytes calldata encoded
|
||||
) public view returns (bool) {
|
||||
require(verifier.code.length > 0, "Address: call to non-contract");
|
||||
attestData(getInstancesCalldata(encoded));
|
||||
// static call the verifier contract to verify the proof
|
||||
(bool success, bytes memory returndata) = verifier.staticcall(encoded);
|
||||
|
||||
if (success) {
|
||||
return abi.decode(returndata, (bool));
|
||||
} else {
|
||||
revert("low-level call to verifier failed");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// This contract serves as a Data Attestation Verifier for the EZKL model.
|
||||
// It is designed to read and attest to instances of proofs generated from a specified circuit.
|
||||
// It is particularly constructed to read only int256 data from specified on-chain contracts' view functions.
|
||||
|
||||
// Overview of the contract functionality:
|
||||
// 1. Initialization: Through the constructor, it sets up the contract calls that the EZKL model will read from.
|
||||
// 2. Data Quantization: Quantizes the returned data into a scaled fixed-point representation. See the `quantizeData` method for details.
|
||||
// 3. Static Calls: Makes static calls to fetch data from other contracts. See the `staticCall` method.
|
||||
// 4. Field Element Conversion: The fixed-point representation is then converted into a field element modulo P using the `toFieldElement` method.
|
||||
// 5. Data Attestation: The `attestData` method validates that the public instances match the data fetched and processed by the contract.
|
||||
// 6. Proof Verification: The `verifyWithDataAttestationMulti` method parses the instances out of the encoded calldata and calls the `attestData` method to validate the public instances,
|
||||
// 6b. Optional KZG Commitment Verification: It also checks the KZG commitments in the proof against the expected commitments using the `checkKzgCommits` method.
|
||||
// then calls the `verifyProof` method to verify the proof on the verifier.
|
||||
|
||||
contract DataAttestationMulti is LoadInstances, SwapProofCommitments {
|
||||
/**
|
||||
* @notice Struct used to make view only calls to accounts to fetch the data that EZKL reads from.
|
||||
* @param the address of the account to make calls to
|
||||
* @param the abi encoded function calls to make to the `contractAddress`
|
||||
*/
|
||||
struct AccountCall {
|
||||
address contractAddress;
|
||||
mapping(uint256 => bytes) callData;
|
||||
mapping(uint256 => uint256) decimals;
|
||||
uint callCount;
|
||||
}
|
||||
AccountCall[] public accountCalls;
|
||||
|
||||
uint[] public scales;
|
||||
|
||||
address public admin;
|
||||
|
||||
/**
|
||||
* @notice EZKL P value
|
||||
* @dev In order to prevent the verifier from accepting two version of the same pubInput, n and the quantity (n + P), where n + P <= 2^256, we require that all instances are stricly less than P. a
|
||||
* @dev The reason for this is that the assmebly code of the verifier performs all arithmetic operations modulo P and as a consequence can't distinguish between n and n + P.
|
||||
*/
|
||||
uint256 constant ORDER =
|
||||
uint256(
|
||||
0x30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f0000001
|
||||
);
|
||||
|
||||
uint256 constant INPUT_CALLS = 0;
|
||||
|
||||
uint256 constant OUTPUT_CALLS = 0;
|
||||
|
||||
uint8 public instanceOffset;
|
||||
|
||||
/**
|
||||
* @dev Initialize the contract with account calls the EZKL model will read from.
|
||||
* @param _contractAddresses - The calls to all the contracts EZKL reads storage from.
|
||||
* @param _callData - The abi encoded function calls to make to the `contractAddress` that EZKL reads storage from.
|
||||
*/
|
||||
constructor(
|
||||
address[] memory _contractAddresses,
|
||||
bytes[][] memory _callData,
|
||||
uint256[][] memory _decimals,
|
||||
uint[] memory _scales,
|
||||
uint8 _instanceOffset,
|
||||
address _admin
|
||||
) {
|
||||
admin = _admin;
|
||||
for (uint i; i < _scales.length; i++) {
|
||||
scales.push(1 << _scales[i]);
|
||||
}
|
||||
populateAccountCalls(_contractAddresses, _callData, _decimals);
|
||||
instanceOffset = _instanceOffset;
|
||||
}
|
||||
|
||||
function updateAdmin(address _admin) external {
|
||||
require(msg.sender == admin, "Only admin can update admin");
|
||||
if (_admin == address(0)) {
|
||||
revert();
|
||||
}
|
||||
admin = _admin;
|
||||
}
|
||||
|
||||
function updateAccountCalls(
|
||||
address[] memory _contractAddresses,
|
||||
bytes[][] memory _callData,
|
||||
uint256[][] memory _decimals
|
||||
) external {
|
||||
require(msg.sender == admin, "Only admin can update account calls");
|
||||
populateAccountCalls(_contractAddresses, _callData, _decimals);
|
||||
}
|
||||
|
||||
function populateAccountCalls(
|
||||
address[] memory _contractAddresses,
|
||||
bytes[][] memory _callData,
|
||||
uint256[][] memory _decimals
|
||||
) internal {
|
||||
require(
|
||||
_contractAddresses.length == _callData.length &&
|
||||
accountCalls.length == _contractAddresses.length,
|
||||
"Invalid input length"
|
||||
);
|
||||
require(
|
||||
_decimals.length == _contractAddresses.length,
|
||||
"Invalid number of decimals"
|
||||
);
|
||||
// fill in the accountCalls storage array
|
||||
uint counter = 0;
|
||||
for (uint256 i = 0; i < _contractAddresses.length; i++) {
|
||||
AccountCall storage accountCall = accountCalls[i];
|
||||
accountCall.contractAddress = _contractAddresses[i];
|
||||
accountCall.callCount = _callData[i].length;
|
||||
for (uint256 j = 0; j < _callData[i].length; j++) {
|
||||
accountCall.callData[j] = _callData[i][j];
|
||||
accountCall.decimals[j] = 10 ** _decimals[i][j];
|
||||
}
|
||||
// count the total number of storage reads across all of the accounts
|
||||
counter += _callData[i].length;
|
||||
}
|
||||
require(
|
||||
counter == INPUT_CALLS + OUTPUT_CALLS,
|
||||
"Invalid number of calls"
|
||||
);
|
||||
}
|
||||
|
||||
function mulDiv(
|
||||
uint256 x,
|
||||
uint256 y,
|
||||
uint256 denominator
|
||||
) internal pure returns (uint256 result) {
|
||||
unchecked {
|
||||
uint256 prod0;
|
||||
uint256 prod1;
|
||||
assembly {
|
||||
let mm := mulmod(x, y, not(0))
|
||||
prod0 := mul(x, y)
|
||||
prod1 := sub(sub(mm, prod0), lt(mm, prod0))
|
||||
}
|
||||
|
||||
if (prod1 == 0) {
|
||||
return prod0 / denominator;
|
||||
}
|
||||
|
||||
require(denominator > prod1, "Math: mulDiv overflow");
|
||||
|
||||
uint256 remainder;
|
||||
assembly {
|
||||
remainder := mulmod(x, y, denominator)
|
||||
prod1 := sub(prod1, gt(remainder, prod0))
|
||||
prod0 := sub(prod0, remainder)
|
||||
}
|
||||
|
||||
uint256 twos = denominator & (~denominator + 1);
|
||||
assembly {
|
||||
denominator := div(denominator, twos)
|
||||
prod0 := div(prod0, twos)
|
||||
twos := add(div(sub(0, twos), twos), 1)
|
||||
}
|
||||
|
||||
prod0 |= prod1 * twos;
|
||||
|
||||
uint256 inverse = (3 * denominator) ^ 2;
|
||||
|
||||
inverse *= 2 - denominator * inverse;
|
||||
inverse *= 2 - denominator * inverse;
|
||||
inverse *= 2 - denominator * inverse;
|
||||
inverse *= 2 - denominator * inverse;
|
||||
inverse *= 2 - denominator * inverse;
|
||||
inverse *= 2 - denominator * inverse;
|
||||
|
||||
result = prod0 * inverse;
|
||||
return result;
|
||||
}
|
||||
}
|
||||
/**
|
||||
* @dev Quantize the data returned from the account calls to the scale used by the EZKL model.
|
||||
* @param data - The data returned from the account calls.
|
||||
* @param decimals - The number of decimals the data returned from the account calls has (for floating point representation).
|
||||
* @param scale - The scale used to convert the floating point value into a fixed point value.
|
||||
*/
|
||||
function quantizeData(
|
||||
bytes memory data,
|
||||
uint256 decimals,
|
||||
uint256 scale
|
||||
) internal pure returns (int256 quantized_data) {
|
||||
int x = abi.decode(data, (int256));
|
||||
bool neg = x < 0;
|
||||
if (neg) x = -x;
|
||||
uint output = mulDiv(uint256(x), scale, decimals);
|
||||
if (mulmod(uint256(x), scale, decimals) * 2 >= decimals) {
|
||||
output += 1;
|
||||
}
|
||||
quantized_data = neg ? -int256(output) : int256(output);
|
||||
}
|
||||
/**
|
||||
* @dev Make a static call to the account to fetch the data that EZKL reads from.
|
||||
* @param target - The address of the account to make calls to.
|
||||
* @param data - The abi encoded function calls to make to the `contractAddress` that EZKL reads storage from.
|
||||
* @return The data returned from the account calls. (Must come from either a view or pure function. Will throw an error otherwise)
|
||||
*/
|
||||
function staticCall(
|
||||
address target,
|
||||
bytes memory data
|
||||
) internal view returns (bytes memory) {
|
||||
(bool success, bytes memory returndata) = target.staticcall(data);
|
||||
if (success) {
|
||||
if (returndata.length == 0) {
|
||||
require(
|
||||
target.code.length > 0,
|
||||
"Address: call to non-contract"
|
||||
);
|
||||
}
|
||||
return returndata;
|
||||
} else {
|
||||
revert("Address: low-level call failed");
|
||||
}
|
||||
}
|
||||
/**
|
||||
* @dev Convert the fixed point quantized data into a field element.
|
||||
* @param x - The quantized data.
|
||||
* @return field_element - The field element.
|
||||
*/
|
||||
function toFieldElement(
|
||||
int256 x
|
||||
) internal pure returns (uint256 field_element) {
|
||||
// The casting down to uint256 is safe because the order is about 2^254, and the value
|
||||
// of x ranges of -2^127 to 2^127, so x + int(ORDER) is always positive.
|
||||
return uint256(x + int(ORDER)) % ORDER;
|
||||
}
|
||||
|
||||
/**
|
||||
* @dev Make the account calls to fetch the data that EZKL reads from and attest to the data.
|
||||
* @param instances - The public instances to the proof (the data in the proof that publicly accessible to the verifier).
|
||||
*/
|
||||
function attestData(uint256[] memory instances) internal view {
|
||||
require(
|
||||
instances.length >= INPUT_CALLS + OUTPUT_CALLS,
|
||||
"Invalid public inputs length"
|
||||
);
|
||||
uint256 _accountCount = accountCalls.length;
|
||||
uint counter = 0;
|
||||
for (uint8 i = 0; i < _accountCount; ++i) {
|
||||
address account = accountCalls[i].contractAddress;
|
||||
for (uint8 j = 0; j < accountCalls[i].callCount; j++) {
|
||||
bytes memory returnData = staticCall(
|
||||
account,
|
||||
accountCalls[i].callData[j]
|
||||
);
|
||||
uint256 scale = scales[counter];
|
||||
int256 quantized_data = quantizeData(
|
||||
returnData,
|
||||
accountCalls[i].decimals[j],
|
||||
scale
|
||||
);
|
||||
uint256 field_element = toFieldElement(quantized_data);
|
||||
require(
|
||||
field_element == instances[counter + instanceOffset],
|
||||
"Public input does not match"
|
||||
);
|
||||
counter++;
|
||||
output = quantizeData(x[i], scalars[i]);
|
||||
fieldElement = toFieldElement(output);
|
||||
if (fieldElement != instances[i]) {
|
||||
revert("Public input does not match");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
41
docs/advanced_security/public_commitments.md
Normal file
41
docs/advanced_security/public_commitments.md
Normal file
@@ -0,0 +1,41 @@
|
||||
## EZKL Security Note: Public Commitments and Low-Entropy Data
|
||||
|
||||
> **Disclaimer:** this a more technical post that requires some prior knowledge of how ZK proving systems like Halo2 operate, and in particular in how these APIs are constructed. For background reading we highly recommend the [Halo2 book](https://zcash.github.io/halo2/) and [Halo2 Club](https://halo2.club/).
|
||||
|
||||
## Overview of commitments in EZKL
|
||||
|
||||
A common design pattern in a zero knowledge (zk) application is thus:
|
||||
- A prover has some data which is used within a circuit.
|
||||
- This data, as it may be high-dimensional or somewhat private, is pre-committed to using some hash function.
|
||||
- The zk-circuit which forms the core of the application then proves (para-phrasing) a statement of the form:
|
||||
>"I know some data D which when hashed corresponds to the pre-committed to value H + whatever else the circuit is proving over D".
|
||||
|
||||
From our own experience, we've implemented such patterns using snark-friendly hash functions like [Poseidon](https://www.poseidon-hash.info/), for which there is a relatively well vetted [implementation](https://docs.rs/halo2_gadgets/latest/halo2_gadgets/poseidon/index.html) in Halo2. Even then these hash functions can introduce lots of overhead and can be very expensive to generate proofs for if the dimensionality of the data D is large.
|
||||
|
||||
You can also implement such a pattern using Halo2's `Fixed` columns _if the privacy preservation of the pre-image is not necessary_. These are Halo2 columns (i.e in reality just polynomials) that are left unblinded (unlike the blinded `Advice` columns), and whose commitments are shared with the verifier by way of the verifying key for the application's zk-circuit. These commitments are much lower cost to generate than implementing a hashing function, such as Poseidon, within a circuit.
|
||||
|
||||
> **Note:** Blinding is the process whereby a certain set of the final elements (i.e rows) of a Halo2 column are set to random field elements. This is the mechanism by which Halo2 achieves its zero knowledge properties for `Advice` columns. By contrast `Fixed` columns aren't zero-knowledge in that they are vulnerable to dictionary attacks in the same manner a hash function is. Given some set of known or popular data D an attacker can attempt to recover the pre-image of a hash by running D through the hash function to see if the outputs match a public commitment. These attacks aren't "possible" on blinded `Advice` columns.
|
||||
|
||||
> **Further Note:** Note that without blinding, with access to `M` proofs, each of which contains an evaluation of the polynomial at a different point, an attacker can more easily recover a non blinded column's pre-image. This is because each proof generates a new query and evaluation of the polynomial represented by the column and as such with repetition a clearer picture can emerge of the column's pre-image. Thus unblinded columns should only be used for privacy preservation, in the manner of a hash, if the number of proofs generated against a fixed set of values is limited. More formally if M independent and _unique_ queries are generated; if M is equal to the degree + 1 of the polynomial represented by the column (i.e the unique lagrange interpolation of the values in the columns), then the column's pre-image can be recovered. As such as the logrows K increases, the more queries are required to recover the pre-image (as 2^K unique queries are required). This assumes that the entries in the column are not structured, as if they are then the number of queries required to recover the pre-image is reduced (eg. if all rows above a certain point are known to be nil).
|
||||
|
||||
The annoyance in using `Fixed` columns comes from the fact that they require generating a new verifying key every time a new set of commitments is generated.
|
||||
|
||||
> **Example:** Say for instance an application leverages a zero-knowledge circuit to prove the correct execution of a neural network. Every week the neural network is finetuned or retrained on new data. If the architecture remains the same then commiting to the new network parameters, along with a new proof of performance on a test set, would be an ideal setup. If we leverage `Fixed` columns to commit to the model parameters, each new commitment will require re-generating a verifying key and sharing the new key with the verifier(s). This is not-ideal UX and can become expensive if the verifier is deployed on-chain.
|
||||
|
||||
An ideal commitment would thus have the low cost of a `Fixed` column but wouldn't require regenerating a new verifying key for each new commitment.
|
||||
|
||||
### Unblinded Advice Columns
|
||||
|
||||
A first step in designing such a commitment is to allow for optionally unblinded `Advice` columns within the Halo2 API. These won't be included in the verifying key, AND are blinded with a constant factor `1` -- such that if someone knows the pre-image to the commitment, they can recover it by running it through the corresponding polynomial commitment scheme (in ezkl's case [KZG commitments](https://dankradfeist.de/ethereum/2020/06/16/kate-polynomial-commitments.html)).
|
||||
|
||||
This is implemented using the `polycommit` visibility parameter in the ezkl API.
|
||||
|
||||
## The Vulnerability of Public Commitments
|
||||
|
||||
|
||||
Public commitments in EZKL (both Poseidon-hashed inputs and KZG commitments) can be vulnerable to brute-force attacks when input data has low entropy. A malicious actor could reveal committed data by searching through possible input values, compromising privacy in applications like anonymous credentials. This is particularly relevant when input data comes from known finite sets (e.g., names, dates).
|
||||
|
||||
Example Risk: In an anonymous credential system using EZKL for ID verification, an attacker could match hashed outputs against a database of common identifying information to deanonymize users.
|
||||
|
||||
|
||||
|
||||
54
docs/advanced_security/quantization_backdoors.md
Normal file
54
docs/advanced_security/quantization_backdoors.md
Normal file
@@ -0,0 +1,54 @@
|
||||
# EZKL Security Note: Quantization-Activated Model Backdoors
|
||||
|
||||
## Model backdoors and provenance
|
||||
|
||||
Machine learning models inherently suffer from robustness issues, which can lead to various
|
||||
kinds of attacks, from backdoors to evasion attacks. These vulnerabilities are a direct byproductof how machine learning models learn and cannot be remediated.
|
||||
|
||||
We say a model has a backdoor whenever a specific attacker-chosen trigger in the input leads
|
||||
to the model misbehaving. For instance, if we have an image classifier discriminating cats from dogs, the ability to turn any image of a cat into an image classified as a dog by changing a specific pixel pattern constitutes a backdoor.
|
||||
|
||||
Backdoors can be introduced using many different vectors. An attacker can introduce a
|
||||
backdoor using traditional security vulnerabilities. For instance, they could directly alter the file containing model weights or dynamically hack the Python code of the model. In addition, backdoors can be introduced by the training data through a process known as poisoning. In this case, an attacker adds malicious data points to the dataset before the model is trained so that the model learns to associate the backdoor trigger with the intended misbehavior.
|
||||
|
||||
All these vectors constitute a whole range of provenance challenges, as any component of an
|
||||
AI system can virtually be an entrypoint for a backdoor. Although provenance is already a
|
||||
concern with traditional code, the issue is exacerbated with AI, as retraining a model is
|
||||
cost-prohibitive. It is thus impractical to translate the “recompile it yourself” thinking to AI.
|
||||
|
||||
## Quantization activated backdoors
|
||||
|
||||
Backdoors are a generic concern in AI that is outside the scope of EZKL. However, EZKL may
|
||||
activate a specific subset of backdoors. Several academic papers have demonstrated the
|
||||
possibility, both in theory and in practice, of implanting undetectable and inactive backdoors in a full precision model that can be reactivated by quantization.
|
||||
|
||||
An external attacker may trick the user of an application running EZKL into loading a model
|
||||
containing a quantization backdoor. This backdoor is active in the resulting model and circuit but not in the full-precision model supplied to EZKL, compromising the integrity of the target application and the resulting proof.
|
||||
|
||||
### When is this a concern for me as a user?
|
||||
|
||||
Any untrusted component in your AI stack may be a backdoor vector. In practice, the most
|
||||
sensitive parts include:
|
||||
|
||||
- Datasets downloaded from the web or containing crowdsourced data
|
||||
- Models downloaded from the web even after finetuning
|
||||
- Untrusted software dependencies (well-known frameworks such as PyTorch can typically
|
||||
be considered trusted)
|
||||
- Any component loaded through an unsafe serialization format, such as Pickle.
|
||||
Because backdoors are inherent to ML and cannot be eliminated, reviewing the provenance of
|
||||
these sensitive components is especially important.
|
||||
|
||||
### Responsibilities of the user and EZKL
|
||||
|
||||
As EZKL cannot prevent backdoored models from being used, it is the responsibility of the user to review the provenance of all the components in their AI stack to ensure that no backdoor could have been implanted. EZKL shall not be held responsible for misleading prediction proofs resulting from using a backdoored model or for any harm caused to a system or its users due to a misbehaving model.
|
||||
|
||||
### Limitations:
|
||||
|
||||
- Attack effectiveness depends on calibration settings and internal rescaling operations.
|
||||
- Further research needed on backdoor persistence through witness/proof stages.
|
||||
- Can be mitigated by evaluating the quantized model (using `ezkl gen-witness`), rather than relying on the evaluation of the original model in pytorch or onnx-runtime as difference in evaluation could reveal a backdoor.
|
||||
|
||||
References:
|
||||
|
||||
1. [Quantization Backdoors to Deep Learning Commercial Frameworks (Ma et al., 2021)](https://arxiv.org/abs/2108.09187)
|
||||
2. [Planting Undetectable Backdoors in Machine Learning Models (Goldwasser et al., 2022)](https://arxiv.org/abs/2204.06974)
|
||||
@@ -1,7 +1,7 @@
|
||||
import ezkl
|
||||
|
||||
project = 'ezkl'
|
||||
release = '0.0.0'
|
||||
release = '22.0.0'
|
||||
version = release
|
||||
|
||||
|
||||
|
||||
@@ -32,6 +32,7 @@ use mnist::*;
|
||||
use rand::rngs::OsRng;
|
||||
use std::marker::PhantomData;
|
||||
|
||||
|
||||
mod params;
|
||||
|
||||
const K: usize = 20;
|
||||
@@ -208,6 +209,8 @@ where
|
||||
padding: vec![(PADDING, PADDING); 2],
|
||||
stride: vec![STRIDE; 2],
|
||||
group: 1,
|
||||
data_format: DataFormat::NCHW,
|
||||
kernel_format: KernelFormat::OIHW,
|
||||
};
|
||||
let x = config
|
||||
.layer_config
|
||||
|
||||
1145
examples/notebooks/cat_and_dog.ipynb
Normal file
1145
examples/notebooks/cat_and_dog.ipynb
Normal file
File diff suppressed because it is too large
Load Diff
13
examples/notebooks/cat_and_dog_data.sh
Normal file
13
examples/notebooks/cat_and_dog_data.sh
Normal file
@@ -0,0 +1,13 @@
|
||||
# download tess data
|
||||
# check if first argument has been set
|
||||
if [ ! -z "$1" ]; then
|
||||
DATA_DIR=$1
|
||||
else
|
||||
DATA_DIR=data
|
||||
fi
|
||||
|
||||
echo "Downloading data to $DATA_DIR"
|
||||
|
||||
if [ ! -d "$DATA_DIR/CATDOG" ]; then
|
||||
kaggle datasets download tongpython/cat-and-dog -p $DATA_DIR/CATDOG --unzip
|
||||
fi
|
||||
@@ -272,33 +272,21 @@
|
||||
"\n",
|
||||
"- For file data sources, the raw floating point values that eventually get quantized, converted into field elements and stored in `witness.json` to be consumed by the circuit are stored. The output data contains the expected floating point values returned as outputs from running your vanilla pytorch model on the given inputs.\n",
|
||||
"- For on chain data sources, the input_data field contains all the data necessary to read and format the on chain data into something digestable by EZKL (aka field elements :-D). \n",
|
||||
"Here is what the schema for an on-chain data source graph input file should look like:\n",
|
||||
"Here is what the schema for an on-chain data source graph input file should look like for a single call data source:\n",
|
||||
" \n",
|
||||
"```json\n",
|
||||
"{\n",
|
||||
" \"input_data\": {\n",
|
||||
" \"rpc\": \"http://localhost:3030\", // The rpc endpoint of the chain you are deploying your verifier to\n",
|
||||
" \"calls\": [\n",
|
||||
" {\n",
|
||||
" \"call_data\": [\n",
|
||||
" [\n",
|
||||
" \"71e5ee5f0000000000000000000000000000000000000000000000000000000000000000\", // The abi encoded call data to a view function that returns a single on-chain data point (we only support uint256 returns for now)\n",
|
||||
" 7 // The number of decimal places of the large uint256 value. This is our way of representing large wei values as floating points on chain, since the evm only natively supports integer values.\n",
|
||||
" ],\n",
|
||||
" [\n",
|
||||
" \"71e5ee5f0000000000000000000000000000000000000000000000000000000000000001\",\n",
|
||||
" 5\n",
|
||||
" ],\n",
|
||||
" [\n",
|
||||
" \"71e5ee5f0000000000000000000000000000000000000000000000000000000000000002\",\n",
|
||||
" 5\n",
|
||||
" ]\n",
|
||||
" ],\n",
|
||||
" \"address\": \"5fbdb2315678afecb367f032d93f642f64180aa3\" // The address of the contract that we are calling to get the data. \n",
|
||||
" }\n",
|
||||
" ]\n",
|
||||
" }\n",
|
||||
"}"
|
||||
" \"input_data\": {\n",
|
||||
" \"rpc\": \"http://localhost:3030\", // The rpc endpoint of the chain you are deploying your verifier to\n",
|
||||
" \"calls\": {\n",
|
||||
" \"call_data\": \"1f3be514000000000000000000000000c6962004f452be9203591991d15f6b388e09e8d00000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000c000000000000000000000000000000000000000000000000000000000000000b000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000009000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000070000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000000500000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000003000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000\", // The abi encoded call data to a view function that returns an array of on-chain data points we are attesting to. \n",
|
||||
" \"decimals\": 0, // The number of decimal places of the large uint256 value. This is our way of representing large wei values as floating points on chain, since the evm only natively supports integer values.\n",
|
||||
" \"address\": \"9A213F53334279C128C37DA962E5472eCD90554f\", // The address of the contract that we are calling to get the data. \n",
|
||||
" \"len\": 12 // The number of data points returned by the view function (the length of the array)\n",
|
||||
" }\n",
|
||||
" }\n",
|
||||
"}\n",
|
||||
"```"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -307,7 +295,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"await ezkl.setup_test_evm_witness(\n",
|
||||
"await ezkl.setup_test_evm_data(\n",
|
||||
" data_path,\n",
|
||||
" compiled_model_path,\n",
|
||||
" # we write the call data to the same file as the input data\n",
|
||||
@@ -484,8 +472,8 @@
|
||||
"\n",
|
||||
"res = await ezkl.deploy_evm(\n",
|
||||
" addr_path_verifier,\n",
|
||||
" 'http://127.0.0.1:3030',\n",
|
||||
" sol_code_path,\n",
|
||||
" 'http://127.0.0.1:3030'\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"assert res == True"
|
||||
@@ -538,9 +526,9 @@
|
||||
"res = await ezkl.deploy_da_evm(\n",
|
||||
" addr_path_da,\n",
|
||||
" input_path,\n",
|
||||
" RPC_URL,\n",
|
||||
" settings_path,\n",
|
||||
" sol_code_path,\n",
|
||||
" RPC_URL,\n",
|
||||
" )\n"
|
||||
]
|
||||
},
|
||||
@@ -569,8 +557,8 @@
|
||||
"\n",
|
||||
"res = await ezkl.verify_evm(\n",
|
||||
" addr,\n",
|
||||
" proof_path,\n",
|
||||
" RPC_URL,\n",
|
||||
" proof_path,\n",
|
||||
" addr_da,\n",
|
||||
")"
|
||||
]
|
||||
@@ -578,7 +566,7 @@
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "ezkl",
|
||||
"display_name": ".env",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
@@ -592,7 +580,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.12.5"
|
||||
"version": "3.12.9"
|
||||
},
|
||||
"orig_nbformat": 4
|
||||
},
|
||||
|
||||
@@ -337,6 +337,7 @@
|
||||
"w3 = Web3(HTTPProvider(RPC_URL))\n",
|
||||
"\n",
|
||||
"def test_on_chain_data(res):\n",
|
||||
" print(f'poseidon_hash: {res[\"processed_outputs\"][\"poseidon_hash\"]}')\n",
|
||||
" # Step 0: Convert the tensor to a flat list\n",
|
||||
" data = [int(ezkl.felt_to_big_endian(res['processed_outputs']['poseidon_hash'][0]), 0)]\n",
|
||||
"\n",
|
||||
@@ -356,6 +357,9 @@
|
||||
" arr.push(_numbers[i]);\n",
|
||||
" }\n",
|
||||
" }\n",
|
||||
" function getArr() public view returns (uint[] memory) {\n",
|
||||
" return arr;\n",
|
||||
" }\n",
|
||||
" }\n",
|
||||
" '''\n",
|
||||
"\n",
|
||||
@@ -382,31 +386,30 @@
|
||||
" contract = w3.eth.contract(address=tx_receipt['contractAddress'], abi=abi)\n",
|
||||
"\n",
|
||||
" # Step 4: Interact with the contract\n",
|
||||
" calldata = []\n",
|
||||
" for i, _ in enumerate(data):\n",
|
||||
" call = contract.functions.arr(i).build_transaction()\n",
|
||||
" calldata.append((call['data'][2:], 0))\n",
|
||||
" calldata = contract.functions.getArr().build_transaction()['data'][2:]\n",
|
||||
"\n",
|
||||
" # Prepare the calls_to_account object\n",
|
||||
" # If you were calling view functions across multiple contracts,\n",
|
||||
" # you would have multiple entries in the calls_to_account array,\n",
|
||||
" # one for each contract.\n",
|
||||
" calls_to_account = [{\n",
|
||||
" decimals = [0] * len(data)\n",
|
||||
" call_to_account = {\n",
|
||||
" 'call_data': calldata,\n",
|
||||
" 'decimals': decimals,\n",
|
||||
" 'address': contract.address[2:], # remove the '0x' prefix\n",
|
||||
" }]\n",
|
||||
" }\n",
|
||||
"\n",
|
||||
" print(f'calls_to_account: {calls_to_account}')\n",
|
||||
" print(f'call_to_account: {call_to_account}')\n",
|
||||
"\n",
|
||||
" return calls_to_account\n",
|
||||
" return call_to_account\n",
|
||||
"\n",
|
||||
"# Now let's start the Anvil process. You don't need to do this if you are deploying to a non-local chain.\n",
|
||||
"start_anvil()\n",
|
||||
"\n",
|
||||
"# Now let's call our function, passing in the same input tensor we used to export the model 2 cells above.\n",
|
||||
"calls_to_account = test_on_chain_data(res)\n",
|
||||
"call_to_account = test_on_chain_data(res)\n",
|
||||
"\n",
|
||||
"data = dict(input_data = [data_array], output_data = {'rpc': RPC_URL, 'calls': calls_to_account })\n",
|
||||
"data = dict(input_data = [data_array], output_data = {'rpc': RPC_URL, 'call': call_to_account })\n",
|
||||
"\n",
|
||||
"# Serialize on-chain data into file:\n",
|
||||
"json.dump(data, open(\"input.json\", 'w'))\n",
|
||||
@@ -540,8 +543,8 @@
|
||||
"\n",
|
||||
"res = await ezkl.deploy_evm(\n",
|
||||
" addr_path_verifier,\n",
|
||||
" 'http://127.0.0.1:3030',\n",
|
||||
" sol_code_path,\n",
|
||||
" 'http://127.0.0.1:3030'\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"assert res == True"
|
||||
@@ -594,9 +597,9 @@
|
||||
"res = await ezkl.deploy_da_evm(\n",
|
||||
" addr_path_da,\n",
|
||||
" input_path,\n",
|
||||
" RPC_URL,\n",
|
||||
" settings_path,\n",
|
||||
" sol_code_path,\n",
|
||||
" RPC_URL,\n",
|
||||
" )\n"
|
||||
]
|
||||
},
|
||||
@@ -625,8 +628,8 @@
|
||||
"\n",
|
||||
"res = await ezkl.verify_evm(\n",
|
||||
" addr,\n",
|
||||
" proof_path,\n",
|
||||
" RPC_URL,\n",
|
||||
" proof_path,\n",
|
||||
" addr_da,\n",
|
||||
")"
|
||||
]
|
||||
@@ -634,7 +637,7 @@
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "ezkl",
|
||||
"display_name": ".env",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
@@ -648,7 +651,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.12.7"
|
||||
"version": "3.12.9"
|
||||
},
|
||||
"orig_nbformat": 4
|
||||
},
|
||||
|
||||
@@ -276,33 +276,21 @@
|
||||
"\n",
|
||||
"- For file data sources, the raw floating point values that eventually get quantized, converted into field elements and stored in `witness.json` to be consumed by the circuit are stored. The output data contains the expected floating point values returned as outputs from running your vanilla pytorch model on the given inputs.\n",
|
||||
"- For on chain data sources, the input_data field contains all the data necessary to read and format the on chain data into something digestable by EZKL (aka field elements :-D). \n",
|
||||
"Here is what the schema for an on-chain data source graph input file should look like:\n",
|
||||
"Here is what the schema for an on-chain data source graph input file should look like for a single call data source:\n",
|
||||
" \n",
|
||||
"```json\n",
|
||||
"{\n",
|
||||
" \"input_data\": {\n",
|
||||
" \"rpc\": \"http://localhost:3030\", // The rpc endpoint of the chain you are deploying your verifier to\n",
|
||||
" \"calls\": [\n",
|
||||
" {\n",
|
||||
" \"call_data\": [\n",
|
||||
" [\n",
|
||||
" \"71e5ee5f0000000000000000000000000000000000000000000000000000000000000000\", // The abi encoded call data to a view function that returns a single on-chain data point (we only support uint256 returns for now)\n",
|
||||
" 7 // The number of decimal places of the large uint256 value. This is our way of representing large wei values as floating points on chain, since the evm only natively supports integer values.\n",
|
||||
" ],\n",
|
||||
" [\n",
|
||||
" \"71e5ee5f0000000000000000000000000000000000000000000000000000000000000001\",\n",
|
||||
" 5\n",
|
||||
" ],\n",
|
||||
" [\n",
|
||||
" \"71e5ee5f0000000000000000000000000000000000000000000000000000000000000002\",\n",
|
||||
" 5\n",
|
||||
" ]\n",
|
||||
" ],\n",
|
||||
" \"address\": \"5fbdb2315678afecb367f032d93f642f64180aa3\" // The address of the contract that we are calling to get the data. \n",
|
||||
" }\n",
|
||||
" ]\n",
|
||||
" }\n",
|
||||
"}"
|
||||
" \"input_data\": {\n",
|
||||
" \"rpc\": \"http://localhost:3030\", // The rpc endpoint of the chain you are deploying your verifier to\n",
|
||||
" \"calls\": {\n",
|
||||
" \"call_data\": \"1f3be514000000000000000000000000c6962004f452be9203591991d15f6b388e09e8d00000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000c000000000000000000000000000000000000000000000000000000000000000b000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000009000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000070000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000000500000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000003000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000\", // The abi encoded call data to a view function that returns an array of on-chain data points we are attesting to. \n",
|
||||
" \"decimals\": 0, // The number of decimal places of the large uint256 value. This is our way of representing large wei values as floating points on chain, since the evm only natively supports integer values.\n",
|
||||
" \"address\": \"9A213F53334279C128C37DA962E5472eCD90554f\", // The address of the contract that we are calling to get the data. \n",
|
||||
" \"len\": 3 // The number of data points returned by the view function (the length of the array)\n",
|
||||
" }\n",
|
||||
" }\n",
|
||||
"}\n",
|
||||
"```"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -311,7 +299,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"await ezkl.setup_test_evm_witness(\n",
|
||||
"await ezkl.setup_test_evm_data(\n",
|
||||
" data_path,\n",
|
||||
" compiled_model_path,\n",
|
||||
" # we write the call data to the same file as the input data\n",
|
||||
@@ -337,7 +325,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"res = await ezkl.get_srs( settings_path)\n"
|
||||
"res = await ezkl.get_srs( settings_path)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -348,27 +336,6 @@
|
||||
"We now need to generate the circuit witness. These are the model outputs (and any hashes) that are generated when feeding the previously generated `input.json` through the circuit / model. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!export RUST_BACKTRACE=1\n",
|
||||
"\n",
|
||||
"witness_path = \"witness.json\"\n",
|
||||
"\n",
|
||||
"res = await ezkl.gen_witness(data_path, compiled_model_path, witness_path, vk_path)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Here we setup verifying and proving keys for the circuit. As the name suggests the proving key is needed for ... proving and the verifying key is needed for ... verifying. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
@@ -391,6 +358,27 @@
|
||||
"assert os.path.isfile(settings_path)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!export RUST_BACKTRACE=1\n",
|
||||
"\n",
|
||||
"witness_path = \"witness.json\"\n",
|
||||
"\n",
|
||||
"res = await ezkl.gen_witness(data_path, compiled_model_path, witness_path, vk_path)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Here we setup verifying and proving keys for the circuit. As the name suggests the proving key is needed for ... proving and the verifying key is needed for ... verifying. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
@@ -486,8 +474,8 @@
|
||||
"\n",
|
||||
"res = await ezkl.deploy_evm(\n",
|
||||
" addr_path_verifier,\n",
|
||||
" 'http://127.0.0.1:3030',\n",
|
||||
" sol_code_path,\n",
|
||||
" 'http://127.0.0.1:3030'\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"assert res == True"
|
||||
@@ -541,9 +529,9 @@
|
||||
"res = await ezkl.deploy_da_evm(\n",
|
||||
" addr_path_da,\n",
|
||||
" input_path,\n",
|
||||
" RPC_URL,\n",
|
||||
" settings_path,\n",
|
||||
" sol_code_path,\n",
|
||||
" RPC_URL,\n",
|
||||
" )\n"
|
||||
]
|
||||
},
|
||||
@@ -572,8 +560,8 @@
|
||||
"\n",
|
||||
"res = await ezkl.verify_evm(\n",
|
||||
" addr,\n",
|
||||
" proof_path,\n",
|
||||
" RPC_URL,\n",
|
||||
" proof_path,\n",
|
||||
" addr_da,\n",
|
||||
")"
|
||||
]
|
||||
@@ -581,7 +569,7 @@
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "ezkl",
|
||||
"display_name": ".env",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
@@ -595,7 +583,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.13"
|
||||
"version": "3.11.5"
|
||||
},
|
||||
"orig_nbformat": 4
|
||||
},
|
||||
|
||||
@@ -77,6 +77,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"gip_run_args = ezkl.PyRunArgs()\n",
|
||||
"gip_run_args.ignore_range_check_inputs_outputs = True\n",
|
||||
"gip_run_args.input_visibility = \"polycommit\" # matrix and generalized inverse commitments\n",
|
||||
"gip_run_args.output_visibility = \"fixed\" # no parameters used\n",
|
||||
"gip_run_args.param_visibility = \"fixed\" # should be Tensor(True)"
|
||||
@@ -335,9 +336,9 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.15"
|
||||
"version": "3.9.13"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
}
|
||||
|
||||
@@ -453,8 +453,8 @@
|
||||
"\n",
|
||||
"res = await ezkl.deploy_evm(\n",
|
||||
" address_path,\n",
|
||||
" 'http://127.0.0.1:3030',\n",
|
||||
" sol_code_path,\n",
|
||||
" 'http://127.0.0.1:3030'\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"assert res == True\n",
|
||||
@@ -474,8 +474,8 @@
|
||||
"\n",
|
||||
"res = await ezkl.verify_evm(\n",
|
||||
" addr,\n",
|
||||
" \"http://127.0.0.1:3030\",\n",
|
||||
" proof_path,\n",
|
||||
" \"http://127.0.0.1:3030\"\n",
|
||||
")\n",
|
||||
"assert res == True"
|
||||
]
|
||||
@@ -510,4 +510,4 @@
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
}
|
||||
|
||||
@@ -462,8 +462,8 @@
|
||||
"\n",
|
||||
"res = await ezkl.deploy_evm(\n",
|
||||
" address_path,\n",
|
||||
" 'http://127.0.0.1:3030',\n",
|
||||
" sol_code_path,\n",
|
||||
" 'http://127.0.0.1:3030'\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"assert res == True\n",
|
||||
@@ -483,8 +483,8 @@
|
||||
"\n",
|
||||
"res = await ezkl.verify_evm(\n",
|
||||
" addr,\n",
|
||||
" \"http://127.0.0.1:3030\",\n",
|
||||
" proof_path,\n",
|
||||
" \"http://127.0.0.1:3030\"\n",
|
||||
")\n",
|
||||
"assert res == True"
|
||||
]
|
||||
@@ -512,4 +512,4 @@
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
}
|
||||
|
||||
@@ -308,8 +308,11 @@
|
||||
"compiled_filename = os.path.join('lol.compiled')\n",
|
||||
"settings_filename = os.path.join('settings.json')\n",
|
||||
"\n",
|
||||
"run_args = ezkl.PyRunArgs()\n",
|
||||
"run_args.decomp_legs = 4\n",
|
||||
"\n",
|
||||
"# Generate settings using ezkl\n",
|
||||
"res = ezkl.gen_settings(onnx_filename, settings_filename)\n",
|
||||
"res = ezkl.gen_settings(onnx_filename, settings_filename, py_run_args=run_args)\n",
|
||||
"\n",
|
||||
"assert res == True\n",
|
||||
"\n",
|
||||
|
||||
@@ -504,8 +504,8 @@
|
||||
"\n",
|
||||
"res = await ezkl.deploy_evm(\n",
|
||||
" address_path,\n",
|
||||
" 'http://127.0.0.1:3030',\n",
|
||||
" sol_code_path,\n",
|
||||
" 'http://127.0.0.1:3030'\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"assert res == True\n",
|
||||
@@ -527,8 +527,8 @@
|
||||
"\n",
|
||||
"res = await ezkl.verify_evm(\n",
|
||||
" addr,\n",
|
||||
" proof_path,\n",
|
||||
" \"http://127.0.0.1:3030\"\n",
|
||||
" \"http://127.0.0.1:3030\",\n",
|
||||
" proof_path\n",
|
||||
")\n",
|
||||
"assert res == True"
|
||||
]
|
||||
@@ -558,4 +558,4 @@
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 0
|
||||
}
|
||||
}
|
||||
|
||||
@@ -453,18 +453,18 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# now mock aggregate the proofs\n",
|
||||
"proofs = []\n",
|
||||
"for i in range(3):\n",
|
||||
" proof_path = os.path.join('proof_split_'+str(i)+'.json')\n",
|
||||
" proofs.append(proof_path)\n",
|
||||
"# proofs = []\n",
|
||||
"# for i in range(3):\n",
|
||||
"# proof_path = os.path.join('proof_split_'+str(i)+'.json')\n",
|
||||
"# proofs.append(proof_path)\n",
|
||||
"\n",
|
||||
"ezkl.mock_aggregate(proofs, logrows=23, split_proofs = True)"
|
||||
"# ezkl.mock_aggregate(proofs, logrows=26, split_proofs = True)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "ezkl",
|
||||
"display_name": ".env",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
@@ -478,7 +478,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.12.5"
|
||||
"version": "3.12.7"
|
||||
},
|
||||
"orig_nbformat": 4
|
||||
},
|
||||
|
||||
@@ -152,9 +152,11 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!RUST_LOG=trace\n",
|
||||
"# TODO: Dictionary outputs\n",
|
||||
"res = ezkl.gen_settings(model_path, settings_path)\n",
|
||||
"run_args = ezkl.PyRunArgs()\n",
|
||||
"# logrows\n",
|
||||
"run_args.logrows = 20\n",
|
||||
"\n",
|
||||
"res = ezkl.gen_settings(model_path, settings_path, py_run_args=run_args)\n",
|
||||
"assert res == True\n"
|
||||
]
|
||||
},
|
||||
@@ -302,7 +304,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.12.2"
|
||||
"version": "3.9.13"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -220,15 +220,6 @@
|
||||
"Check that the generated verifiers are identical for all models."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"start_anvil()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
@@ -270,8 +261,8 @@
|
||||
"\n",
|
||||
"res = await ezkl.deploy_evm(\n",
|
||||
" addr_path_verifier,\n",
|
||||
" sol_code_path,\n",
|
||||
" 'http://127.0.0.1:3030',\n",
|
||||
" sol_code_path,\n",
|
||||
" \"verifier/reusable\"\n",
|
||||
")\n",
|
||||
"\n",
|
||||
@@ -297,7 +288,7 @@
|
||||
"for name in names:\n",
|
||||
" addr_path_vk = \"addr_vk.txt\"\n",
|
||||
" sol_key_code_path = os.path.join(name, 'test_key.sol')\n",
|
||||
" res = await ezkl.deploy_evm(addr_path_vk, sol_key_code_path, 'http://127.0.0.1:3030', \"vka\")\n",
|
||||
" res = await ezkl.deploy_evm(addr_path_vk, 'http://127.0.0.1:3030', sol_key_code_path, \"vka\")\n",
|
||||
" assert res == True\n",
|
||||
"\n",
|
||||
" with open(addr_path_vk, 'r') as file:\n",
|
||||
@@ -307,8 +298,8 @@
|
||||
" sol_code_path = os.path.join(name, 'vk.sol')\n",
|
||||
" res = await ezkl.verify_evm(\n",
|
||||
" addr,\n",
|
||||
" proof_path,\n",
|
||||
" \"http://127.0.0.1:3030\",\n",
|
||||
" proof_path,\n",
|
||||
" addr_vk = addr_vk\n",
|
||||
" )\n",
|
||||
" assert res == True"
|
||||
|
||||
@@ -167,6 +167,8 @@
|
||||
"run_args = ezkl.PyRunArgs()\n",
|
||||
"# \"hashed/private\" means that the output of the hashing is not visible to the verifier and is instead fed into the computational graph\n",
|
||||
"run_args.input_visibility = \"hashed/private/0\"\n",
|
||||
"# as the inputs are felts we turn off input range checks\n",
|
||||
"run_args.ignore_range_check_inputs_outputs = True\n",
|
||||
"# we set it to fix the set we want to check membership for\n",
|
||||
"run_args.param_visibility = \"fixed\"\n",
|
||||
"# the output is public -- set membership fails if it is not = 0\n",
|
||||
@@ -519,4 +521,4 @@
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
}
|
||||
@@ -204,6 +204,7 @@
|
||||
"run_args = ezkl.PyRunArgs()\n",
|
||||
"# \"polycommit\" means that the output of the hashing is not visible to the verifier and is instead fed into the computational graph\n",
|
||||
"run_args.input_visibility = \"polycommit\"\n",
|
||||
"run_args.ignore_range_check_inputs_outputs = True\n",
|
||||
"# the parameters are public\n",
|
||||
"run_args.param_visibility = \"fixed\"\n",
|
||||
"# the output is public (this is the inequality test)\n",
|
||||
@@ -514,4 +515,4 @@
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
}
|
||||
@@ -20,7 +20,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
@@ -60,7 +60,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
@@ -94,7 +94,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
@@ -134,7 +134,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 44,
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
@@ -183,7 +183,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
@@ -201,6 +201,7 @@
|
||||
"run_args.input_visibility = \"public\"\n",
|
||||
"run_args.param_visibility = \"private\"\n",
|
||||
"run_args.output_visibility = \"public\"\n",
|
||||
"run_args.decomp_legs=5\n",
|
||||
"run_args.num_inner_cols = 1\n",
|
||||
"run_args.variables = [(\"batch_size\", 1)]"
|
||||
]
|
||||
@@ -269,7 +270,7 @@
|
||||
"{\n",
|
||||
" \"input_data\": {\n",
|
||||
" \"rpc\": \"http://localhost:3030\", // The rpc endpoint of the chain you are deploying your verifier to\n",
|
||||
" \"calls\": {\n",
|
||||
" \"call\": {\n",
|
||||
" \"call_data\": \"1f3be514000000000000000000000000c6962004f452be9203591991d15f6b388e09e8d00000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000c000000000000000000000000000000000000000000000000000000000000000b000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000009000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000070000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000000500000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000003000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000\", // The abi encoded call data to a view function that returns an array of on-chain data points we are attesting to. \n",
|
||||
" \"decimals\": 0, // The number of decimal places of the large uint256 value. This is our way of representing large wei values as floating points on chain, since the evm only natively supports integer values.\n",
|
||||
" \"address\": \"9A213F53334279C128C37DA962E5472eCD90554f\", // The address of the contract that we are calling to get the data. \n",
|
||||
@@ -294,7 +295,6 @@
|
||||
"import torch\n",
|
||||
"import requests\n",
|
||||
"\n",
|
||||
"# This function counts the decimal places of a floating point number\n",
|
||||
"def count_decimal_places(num):\n",
|
||||
" num_str = str(num)\n",
|
||||
" if '.' in num_str:\n",
|
||||
@@ -302,69 +302,28 @@
|
||||
" else:\n",
|
||||
" return 0\n",
|
||||
"\n",
|
||||
"# setup web3 instance\n",
|
||||
"w3 = Web3(HTTPProvider(RPC_URL)) \n",
|
||||
"\n",
|
||||
"def set_next_block_timestamp(anvil_url, timestamp):\n",
|
||||
" # Send the JSON-RPC request to Anvil\n",
|
||||
" payload = {\n",
|
||||
" \"jsonrpc\": \"2.0\",\n",
|
||||
" \"id\": 1,\n",
|
||||
" \"method\": \"evm_setNextBlockTimestamp\",\n",
|
||||
" \"params\": [timestamp]\n",
|
||||
" }\n",
|
||||
" response = requests.post(anvil_url, json=payload)\n",
|
||||
" if response.status_code == 200:\n",
|
||||
" print(f\"Next block timestamp set to: {timestamp}\")\n",
|
||||
" else:\n",
|
||||
" print(f\"Failed to set next block timestamp: {response.text}\")\n",
|
||||
"\n",
|
||||
"def on_chain_data(tensor):\n",
|
||||
" # Step 0: Convert the tensor to a flat list\n",
|
||||
" data = tensor.view(-1).tolist()\n",
|
||||
"\n",
|
||||
" # Step 1: Prepare the calldata\n",
|
||||
" secondsAgo = [len(data) - 1 - i for i in range(len(data))]\n",
|
||||
"\n",
|
||||
" # Step 2: Prepare and compile the contract UniTickAttestor contract\n",
|
||||
" contract_source_code = '''\n",
|
||||
" // SPDX-License-Identifier: MIT\n",
|
||||
" pragma solidity ^0.8.20;\n",
|
||||
"\n",
|
||||
" /// @title Pool state that is not stored\n",
|
||||
" /// @notice Contains view functions to provide information about the pool that is computed rather than stored on the\n",
|
||||
" /// blockchain. The functions here may have variable gas costs.\n",
|
||||
" interface IUniswapV3PoolDerivedState {\n",
|
||||
" /// @notice Returns the cumulative tick and liquidity as of each timestamp `secondsAgo` from the current block timestamp\n",
|
||||
" /// @dev To get a time weighted average tick or liquidity-in-range, you must call this with two values, one representing\n",
|
||||
" /// the beginning of the period and another for the end of the period. E.g., to get the last hour time-weighted average tick,\n",
|
||||
" /// you must call it with secondsAgos = [3600, 0].\n",
|
||||
" /// log base sqrt(1.0001) of token1 / token0. The TickMath library can be used to go from a tick value to a ratio.\n",
|
||||
" /// @dev The time weighted average tick represents the geometric time weighted average price of the pool, in\n",
|
||||
" /// @param secondsAgos From how long ago each cumulative tick and liquidity value should be returned\n",
|
||||
" /// @return tickCumulatives Cumulative tick values as of each `secondsAgos` from the current block timestamp\n",
|
||||
" /// @return secondsPerLiquidityCumulativeX128s Cumulative seconds per liquidity-in-range value as of each `secondsAgos` from the current block\n",
|
||||
" /// timestamp\n",
|
||||
" function observe(\n",
|
||||
" uint32[] calldata secondsAgos\n",
|
||||
" )\n",
|
||||
" external\n",
|
||||
" view\n",
|
||||
" returns (\n",
|
||||
" int56[] memory tickCumulatives,\n",
|
||||
" uint160[] memory secondsPerLiquidityCumulativeX128s\n",
|
||||
" );\n",
|
||||
" ) external view returns (\n",
|
||||
" int56[] memory tickCumulatives,\n",
|
||||
" uint160[] memory secondsPerLiquidityCumulativeX128s\n",
|
||||
" );\n",
|
||||
" }\n",
|
||||
"\n",
|
||||
" /// @title Uniswap Wrapper around `pool.observe` that stores the parameters for fetching and then attesting to historical data\n",
|
||||
" /// @notice Provides functions to integrate with V3 pool oracle\n",
|
||||
" contract UniTickAttestor {\n",
|
||||
" /**\n",
|
||||
" * @notice Calculates time-weighted means of tick and liquidity for a given Uniswap V3 pool\n",
|
||||
" * @param pool Address of the pool that we want to observe\n",
|
||||
" * @param secondsAgo Number of seconds in the past from which to calculate the time-weighted means\n",
|
||||
" * @return tickCumulatives The cumulative tick values as of each `secondsAgo` from the current block timestamp\n",
|
||||
" */\n",
|
||||
" int256[] private cachedTicks;\n",
|
||||
"\n",
|
||||
" function consult(\n",
|
||||
" IUniswapV3PoolDerivedState pool,\n",
|
||||
" uint32[] memory secondsAgo\n",
|
||||
@@ -375,6 +334,21 @@
|
||||
" tickCumulatives[i] = int256(_ticks[i]);\n",
|
||||
" }\n",
|
||||
" }\n",
|
||||
"\n",
|
||||
" function cache_price(\n",
|
||||
" IUniswapV3PoolDerivedState pool,\n",
|
||||
" uint32[] memory secondsAgo\n",
|
||||
" ) public {\n",
|
||||
" (int56[] memory _ticks,) = pool.observe(secondsAgo);\n",
|
||||
" cachedTicks = new int256[](_ticks.length);\n",
|
||||
" for (uint256 i = 0; i < _ticks.length; i++) {\n",
|
||||
" cachedTicks[i] = int256(_ticks[i]);\n",
|
||||
" }\n",
|
||||
" }\n",
|
||||
"\n",
|
||||
" function readPriceCache() public view returns (int256[] memory) {\n",
|
||||
" return cachedTicks;\n",
|
||||
" }\n",
|
||||
" }\n",
|
||||
" '''\n",
|
||||
"\n",
|
||||
@@ -384,69 +358,44 @@
|
||||
" \"settings\": {\"outputSelection\": {\"*\": {\"*\": [\"metadata\", \"evm.bytecode\", \"abi\"]}}}\n",
|
||||
" })\n",
|
||||
"\n",
|
||||
" # Get bytecode\n",
|
||||
" bytecode = compiled_sol['contracts']['UniTickAttestor.sol']['UniTickAttestor']['evm']['bytecode']['object']\n",
|
||||
"\n",
|
||||
" # Get ABI\n",
|
||||
" # In production if you are reading from really large contracts you can just use\n",
|
||||
" # a stripped down version of the ABI of the contract you are calling, containing only the view functions you will fetch data from.\n",
|
||||
" abi = json.loads(compiled_sol['contracts']['UniTickAttestor.sol']['UniTickAttestor']['metadata'])['output']['abi']\n",
|
||||
"\n",
|
||||
" # Step 3: Deploy the contract\n",
|
||||
" # Deploy contract\n",
|
||||
" UniTickAttestor = w3.eth.contract(abi=abi, bytecode=bytecode)\n",
|
||||
" tx_hash = UniTickAttestor.constructor().transact()\n",
|
||||
" tx_receipt = w3.eth.wait_for_transaction_receipt(tx_hash)\n",
|
||||
" # If you are deploying to production you can skip the 3 lines of code above and just instantiate the contract like this,\n",
|
||||
" # passing the address and abi of the contract you are fetching data from.\n",
|
||||
" contract = w3.eth.contract(address=tx_receipt['contractAddress'], abi=abi)\n",
|
||||
"\n",
|
||||
" # Step 4: Interact with the contract\n",
|
||||
" call = contract.functions.consult(\n",
|
||||
" # Address of the UniV3 usdc-weth pool 0.005 fee\n",
|
||||
" # Step 4: Store data via cache_price transaction\n",
|
||||
" tx_hash = contract.functions.cache_price(\n",
|
||||
" \"0xC6962004f452bE9203591991D15f6b388e09E8D0\",\n",
|
||||
" secondsAgo\n",
|
||||
" ).build_transaction()\n",
|
||||
" result = contract.functions.consult(\n",
|
||||
" # Address of the UniV3 usdc-weth pool 0.005 fee\n",
|
||||
" \"0xC6962004f452bE9203591991D15f6b388e09E8D0\",\n",
|
||||
" secondsAgo\n",
|
||||
" ).call()\n",
|
||||
" \n",
|
||||
" print(f'result: {result}')\n",
|
||||
" ).transact()\n",
|
||||
" tx_receipt = w3.eth.wait_for_transaction_receipt(tx_hash)\n",
|
||||
"\n",
|
||||
" # Step 5: Prepare calldata for readPriceCache\n",
|
||||
" call = contract.functions.readPriceCache().build_transaction()\n",
|
||||
" calldata = call['data'][2:]\n",
|
||||
"\n",
|
||||
" time_stamp = w3.eth.get_block('latest')['timestamp']\n",
|
||||
" # Get stored data\n",
|
||||
" result = contract.functions.readPriceCache().call()\n",
|
||||
" print(f'Cached ticks: {result}')\n",
|
||||
"\n",
|
||||
" print(f'time_stamp: {time_stamp}')\n",
|
||||
" decimals = [0] * len(data)\n",
|
||||
"\n",
|
||||
" # Set the next block timestamp using the fetched time_stamp\n",
|
||||
" set_next_block_timestamp(RPC_URL, time_stamp)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
" # Prepare the calls_to_account object\n",
|
||||
" # If you were calling view functions across multiple contracts,\n",
|
||||
" # you would have multiple entries in the calls_to_account array,\n",
|
||||
" # one for each contract.\n",
|
||||
" call_to_account = {\n",
|
||||
" 'call_data': calldata,\n",
|
||||
" 'decimals': 0,\n",
|
||||
" 'address': contract.address[2:], # remove the '0x' prefix\n",
|
||||
" 'len': len(data),\n",
|
||||
" 'decimals': decimals,\n",
|
||||
" 'address': contract.address[2:],\n",
|
||||
" }\n",
|
||||
"\n",
|
||||
" print(f'call_to_account: {call_to_account}')\n",
|
||||
"\n",
|
||||
" return call_to_account\n",
|
||||
"\n",
|
||||
"# Now let's start the Anvil process. You don't need to do this if you are deploying to a non-local chain.\n",
|
||||
"start_anvil()\n",
|
||||
"call_to_account = on_chain_data(x)\n",
|
||||
"\n",
|
||||
"# Now let's call our function, passing in the same input tensor we used to export the model 2 cells above.\n",
|
||||
"calls_to_account = on_chain_data(x)\n",
|
||||
"\n",
|
||||
"data = dict(input_data = {'rpc': RPC_URL, 'calls': calls_to_account })\n",
|
||||
"\n",
|
||||
"# Serialize on-chain data into file:\n",
|
||||
"data = dict(input_data = {'rpc': RPC_URL, 'call': call_to_account })\n",
|
||||
"json.dump(data, open(\"input.json\", 'w'))"
|
||||
]
|
||||
},
|
||||
@@ -613,8 +562,8 @@
|
||||
"\n",
|
||||
"res = await ezkl.deploy_evm(\n",
|
||||
" addr_path_verifier,\n",
|
||||
" 'http://127.0.0.1:3030',\n",
|
||||
" sol_code_path,\n",
|
||||
" 'http://127.0.0.1:3030'\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"assert res == True"
|
||||
@@ -667,9 +616,9 @@
|
||||
"res = await ezkl.deploy_da_evm(\n",
|
||||
" addr_path_da,\n",
|
||||
" input_path,\n",
|
||||
" RPC_URL,\n",
|
||||
" settings_path,\n",
|
||||
" sol_code_path,\n",
|
||||
" RPC_URL,\n",
|
||||
" )\n"
|
||||
]
|
||||
},
|
||||
@@ -691,34 +640,7 @@
|
||||
"source": [
|
||||
"# !export RUST_BACKTRACE=1\n",
|
||||
"\n",
|
||||
"calls_to_account = on_chain_data(x)\n",
|
||||
"\n",
|
||||
"data = dict(input_data = {'rpc': RPC_URL, 'calls': calls_to_account })\n",
|
||||
"\n",
|
||||
"# Serialize on-chain data into file:\n",
|
||||
"json.dump(data, open(\"input.json\", 'w'))\n",
|
||||
"\n",
|
||||
"# setup web3 instance\n",
|
||||
"w3 = Web3(HTTPProvider(RPC_URL)) \n",
|
||||
"\n",
|
||||
"time_stamp = w3.eth.get_block('latest')['timestamp']\n",
|
||||
"\n",
|
||||
"print(f'time_stamp: {time_stamp}')\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"witness_path = \"witness.json\"\n",
|
||||
"\n",
|
||||
"res = await ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"\n",
|
||||
"res = ezkl.prove(\n",
|
||||
" witness_path,\n",
|
||||
" compiled_model_path,\n",
|
||||
" pk_path,\n",
|
||||
" proof_path,\n",
|
||||
" \"single\",\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
"print(res)\n",
|
||||
"# print(res)\n",
|
||||
"assert os.path.isfile(proof_path)\n",
|
||||
"# read the verifier address\n",
|
||||
"addr_verifier = None\n",
|
||||
@@ -731,8 +653,8 @@
|
||||
"\n",
|
||||
"res = await ezkl.verify_evm(\n",
|
||||
" addr,\n",
|
||||
" proof_path,\n",
|
||||
" RPC_URL,\n",
|
||||
" proof_path,\n",
|
||||
" addr_da,\n",
|
||||
")"
|
||||
]
|
||||
|
||||
@@ -666,7 +666,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
@@ -689,8 +689,8 @@
|
||||
"# await\n",
|
||||
"res = await ezkl.deploy_evm(\n",
|
||||
" address_path,\n",
|
||||
" 'http://127.0.0.1:3030',\n",
|
||||
" sol_code_path,\n",
|
||||
" 'http://127.0.0.1:3030'\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"assert res == True\n",
|
||||
@@ -701,7 +701,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
@@ -722,8 +722,8 @@
|
||||
"\n",
|
||||
"res = await ezkl.verify_evm(\n",
|
||||
" addr,\n",
|
||||
" \"http://127.0.0.1:3030\",\n",
|
||||
" proof_path,\n",
|
||||
" \"http://127.0.0.1:3030\"\n",
|
||||
")\n",
|
||||
"assert res == True"
|
||||
]
|
||||
@@ -743,7 +743,8 @@
|
||||
"provenance": []
|
||||
},
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3",
|
||||
"display_name": ".env",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
@@ -756,7 +757,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.12.2"
|
||||
"version": "3.12.9"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -849,8 +849,8 @@
|
||||
"\n",
|
||||
"res = await ezkl.deploy_evm(\n",
|
||||
" address_path,\n",
|
||||
" 'http://127.0.0.1:3030',\n",
|
||||
" sol_code_path,\n",
|
||||
" 'http://127.0.0.1:3030'\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"assert res == True\n",
|
||||
@@ -870,8 +870,8 @@
|
||||
"\n",
|
||||
"res = await ezkl.verify_evm(\n",
|
||||
" addr,\n",
|
||||
" proof_path,\n",
|
||||
" \"http://127.0.0.1:3030\"\n",
|
||||
" \"http://127.0.0.1:3030\",\n",
|
||||
" proof_path\n",
|
||||
")\n",
|
||||
"assert res == True"
|
||||
]
|
||||
@@ -905,4 +905,4 @@
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
}
|
||||
|
||||
@@ -246,7 +246,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"ezkl.setup_test_evm_witness(\n",
|
||||
"ezkl.setup_test_evm_data(\n",
|
||||
" data_path,\n",
|
||||
" compiled_model_path,\n",
|
||||
" # we write the call data to the same file as the input data\n",
|
||||
@@ -358,8 +358,8 @@
|
||||
"\n",
|
||||
"res = await ezkl.deploy_evm(\n",
|
||||
" addr_path_verifier,\n",
|
||||
" 'http://127.0.0.1:3030',\n",
|
||||
" sol_code_path,\n",
|
||||
" 'http://127.0.0.1:3030'\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"assert res == True"
|
||||
@@ -374,14 +374,6 @@
|
||||
"\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "cc888848",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
@@ -413,9 +405,9 @@
|
||||
"res = await ezkl.deploy_da_evm(\n",
|
||||
" addr_path_da,\n",
|
||||
" input_path,\n",
|
||||
" RPC_URL,\n",
|
||||
" settings_path,\n",
|
||||
" sol_code_path,\n",
|
||||
" RPC_URL,\n",
|
||||
" )"
|
||||
]
|
||||
},
|
||||
@@ -478,8 +470,8 @@
|
||||
"\n",
|
||||
"res = ezkl.verify_evm(\n",
|
||||
" addr,\n",
|
||||
" proof_path,\n",
|
||||
" RPC_URL,\n",
|
||||
" proof_path,\n",
|
||||
" addr_da,\n",
|
||||
")"
|
||||
]
|
||||
@@ -525,7 +517,7 @@
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"display_name": ".env",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
@@ -539,7 +531,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.12.2"
|
||||
"version": "3.12.9"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
106
examples/onnx/1d_conv/input.json
Normal file
106
examples/onnx/1d_conv/input.json
Normal file
@@ -0,0 +1,106 @@
|
||||
{
|
||||
"input_data": [
|
||||
[
|
||||
8761,
|
||||
7654,
|
||||
8501,
|
||||
2404,
|
||||
6929,
|
||||
8858,
|
||||
5946,
|
||||
3673,
|
||||
4131,
|
||||
3854,
|
||||
8137,
|
||||
8239,
|
||||
9038,
|
||||
6299,
|
||||
1118,
|
||||
9737,
|
||||
208,
|
||||
7954,
|
||||
3691,
|
||||
610,
|
||||
3468,
|
||||
3314,
|
||||
8658,
|
||||
8366,
|
||||
2850,
|
||||
477,
|
||||
6114,
|
||||
232,
|
||||
4601,
|
||||
7420,
|
||||
5713,
|
||||
2936,
|
||||
6061,
|
||||
2870,
|
||||
8421,
|
||||
177,
|
||||
7107,
|
||||
7382,
|
||||
6115,
|
||||
5487,
|
||||
8502,
|
||||
2559,
|
||||
1875,
|
||||
129,
|
||||
8533,
|
||||
8201,
|
||||
8414,
|
||||
4775,
|
||||
9817,
|
||||
3127,
|
||||
8761,
|
||||
7654,
|
||||
8501,
|
||||
2404,
|
||||
6929,
|
||||
8858,
|
||||
5946,
|
||||
3673,
|
||||
4131,
|
||||
3854,
|
||||
8137,
|
||||
8239,
|
||||
9038,
|
||||
6299,
|
||||
1118,
|
||||
9737,
|
||||
208,
|
||||
7954,
|
||||
3691,
|
||||
610,
|
||||
3468,
|
||||
3314,
|
||||
8658,
|
||||
8366,
|
||||
2850,
|
||||
477,
|
||||
6114,
|
||||
232,
|
||||
4601,
|
||||
7420,
|
||||
5713,
|
||||
2936,
|
||||
6061,
|
||||
2870,
|
||||
8421,
|
||||
177,
|
||||
7107,
|
||||
7382,
|
||||
6115,
|
||||
5487,
|
||||
8502,
|
||||
2559,
|
||||
1875,
|
||||
129,
|
||||
8533,
|
||||
8201,
|
||||
8414,
|
||||
4775,
|
||||
9817,
|
||||
3127
|
||||
]
|
||||
]
|
||||
}
|
||||
BIN
examples/onnx/1d_conv/network.onnx
Normal file
BIN
examples/onnx/1d_conv/network.onnx
Normal file
Binary file not shown.
1
examples/onnx/1l_div/settings.json
Normal file
1
examples/onnx/1l_div/settings.json
Normal file
@@ -0,0 +1 @@
|
||||
{"run_args":{"input_scale":7,"param_scale":7,"scale_rebase_multiplier":1,"lookup_range":[-32768,32768],"logrows":17,"num_inner_cols":2,"variables":[["batch_size",1]],"input_visibility":"Private","output_visibility":"Public","param_visibility":"Private","rebase_frac_zero_constants":false,"check_mode":"UNSAFE","commitment":"KZG","decomp_base":16384,"decomp_legs":2,"bounded_log_lookup":false,"ignore_range_check_inputs_outputs":false},"num_rows":54,"total_assignments":109,"total_const_size":4,"total_dynamic_col_size":0,"max_dynamic_input_len":0,"num_dynamic_lookups":0,"num_shuffles":0,"total_shuffle_col_size":0,"model_instance_shapes":[[1,1]],"model_output_scales":[7],"model_input_scales":[7],"module_sizes":{"polycommit":[],"poseidon":[0,[0]]},"required_lookups":[],"required_range_checks":[[-1,1],[0,16383]],"check_mode":"UNSAFE","version":"0.0.0","num_blinding_factors":null,"timestamp":1739396322131,"input_types":["F32"],"output_types":["F32"]}
|
||||
1
examples/onnx/fr_age/input.json
Normal file
1
examples/onnx/fr_age/input.json
Normal file
File diff suppressed because one or more lines are too long
BIN
examples/onnx/fr_age/network.onnx
Normal file
BIN
examples/onnx/fr_age/network.onnx
Normal file
Binary file not shown.
1
examples/onnx/hierarchical_risk/input.json
Normal file
1
examples/onnx/hierarchical_risk/input.json
Normal file
File diff suppressed because one or more lines are too long
BIN
examples/onnx/hierarchical_risk/network.onnx
Normal file
BIN
examples/onnx/hierarchical_risk/network.onnx
Normal file
Binary file not shown.
42
examples/onnx/integer_div/gen.py
Normal file
42
examples/onnx/integer_div/gen.py
Normal file
@@ -0,0 +1,42 @@
|
||||
from torch import nn
|
||||
import torch
|
||||
import json
|
||||
import numpy as np
|
||||
|
||||
|
||||
class MyModel(nn.Module):
|
||||
def __init__(self):
|
||||
super(MyModel, self).__init__()
|
||||
|
||||
def forward(self, x):
|
||||
return x // 3
|
||||
|
||||
|
||||
circuit = MyModel()
|
||||
|
||||
x = torch.randint(0, 10, (1, 2, 2, 8))
|
||||
|
||||
out = circuit(x)
|
||||
|
||||
print(x)
|
||||
print(out)
|
||||
print(x/3)
|
||||
|
||||
torch.onnx.export(circuit, x, "network.onnx",
|
||||
export_params=True, # store the trained parameter weights inside the model file
|
||||
opset_version=17, # the ONNX version to export the model to
|
||||
do_constant_folding=True, # whether to execute constant folding for optimization
|
||||
input_names=['input'], # the model's input names
|
||||
output_names=['output'], # the model's output names
|
||||
dynamic_axes={'input': {0: 'batch_size'}, # variable length axes
|
||||
'output': {0: 'batch_size'}})
|
||||
|
||||
|
||||
d1 = ((x).detach().numpy()).reshape([-1]).tolist()
|
||||
|
||||
data = dict(
|
||||
input_data=[d1],
|
||||
)
|
||||
|
||||
# Serialize data into file:
|
||||
json.dump(data, open("input.json", 'w'))
|
||||
1
examples/onnx/integer_div/input.json
Normal file
1
examples/onnx/integer_div/input.json
Normal file
@@ -0,0 +1 @@
|
||||
{"input_data": [[3, 4, 0, 9, 2, 6, 2, 5, 1, 5, 3, 5, 5, 7, 0, 2, 6, 1, 4, 4, 1, 9, 7, 7, 5, 8, 2, 0, 1, 5, 9, 8]]}
|
||||
BIN
examples/onnx/integer_div/network.onnx
Normal file
BIN
examples/onnx/integer_div/network.onnx
Normal file
Binary file not shown.
4
ezkl.pyi
4
ezkl.pyi
@@ -706,9 +706,9 @@ def setup_aggregate(sample_snarks:typing.Sequence[str | os.PathLike | pathlib.Pa
|
||||
"""
|
||||
...
|
||||
|
||||
def setup_test_evm_witness(data_path:str | os.PathLike | pathlib.Path,compiled_circuit_path:str | os.PathLike | pathlib.Path,test_data:str | os.PathLike | pathlib.Path,input_source:PyTestDataSource,output_source:PyTestDataSource,rpc_url:typing.Optional[str]) -> typing.Any:
|
||||
def setup_test_evm_data(data_path:str | os.PathLike | pathlib.Path,compiled_circuit_path:str | os.PathLike | pathlib.Path,test_data:str | os.PathLike | pathlib.Path,input_source:PyTestDataSource,output_source:PyTestDataSource,rpc_url:typing.Optional[str]) -> typing.Any:
|
||||
r"""
|
||||
Setup test evm witness
|
||||
Setup test evm data
|
||||
|
||||
Arguments
|
||||
---------
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
[toolchain]
|
||||
channel = "nightly-2024-07-18"
|
||||
channel = "nightly-2025-02-17"
|
||||
components = ["rustfmt", "clippy"]
|
||||
|
||||
@@ -1,7 +1,11 @@
|
||||
// ignore file if compiling for wasm
|
||||
|
||||
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
|
||||
use mimalloc::MiMalloc;
|
||||
|
||||
#[global_allocator]
|
||||
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
|
||||
static GLOBAL: mimalloc::MiMalloc = mimalloc::MiMalloc;
|
||||
static GLOBAL: MiMalloc = MiMalloc;
|
||||
|
||||
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
|
||||
use clap::{CommandFactory, Parser};
|
||||
@@ -24,6 +28,8 @@ use std::env;
|
||||
#[tokio::main(flavor = "current_thread")]
|
||||
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
|
||||
pub async fn main() {
|
||||
use log::debug;
|
||||
|
||||
let args = Cli::parse();
|
||||
|
||||
if let Some(generator) = args.generator {
|
||||
@@ -38,7 +44,7 @@ pub async fn main() {
|
||||
} else {
|
||||
info!("Running with CPU");
|
||||
}
|
||||
info!(
|
||||
debug!(
|
||||
"command: \n {}",
|
||||
&command.as_json().to_colored_json_auto().unwrap()
|
||||
);
|
||||
|
||||
@@ -1,35 +1,34 @@
|
||||
use crate::Commitments;
|
||||
use crate::RunArgs;
|
||||
use crate::circuit::CheckMode;
|
||||
use crate::circuit::InputType;
|
||||
use crate::circuit::modules::Module;
|
||||
use crate::circuit::modules::polycommit::PolyCommitChip;
|
||||
use crate::circuit::modules::poseidon::{
|
||||
spec::{PoseidonSpec, POSEIDON_RATE, POSEIDON_WIDTH},
|
||||
PoseidonChip,
|
||||
spec::{POSEIDON_RATE, POSEIDON_WIDTH, PoseidonSpec},
|
||||
};
|
||||
use crate::circuit::modules::Module;
|
||||
use crate::circuit::InputType;
|
||||
use crate::circuit::{CheckMode, Tolerance};
|
||||
use crate::commands::*;
|
||||
use crate::fieldutils::{felt_to_integer_rep, integer_rep_to_felt, IntegerRep};
|
||||
use crate::graph::modules::POSEIDON_LEN_GRAPH;
|
||||
use crate::fieldutils::{IntegerRep, felt_to_integer_rep, integer_rep_to_felt};
|
||||
use crate::graph::TestDataSource;
|
||||
use crate::graph::{
|
||||
quantize_float, scale_to_multiplier, GraphCircuit, GraphSettings, Model, Visibility,
|
||||
GraphCircuit, GraphSettings, Model, Visibility, quantize_float, scale_to_multiplier,
|
||||
};
|
||||
use crate::pfsys::evm::aggregation_kzg::AggregationCircuit;
|
||||
use crate::pfsys::{
|
||||
load_pk, load_vk, save_params, save_vk, srs::gen_srs as ezkl_gen_srs, srs::load_srs_prover,
|
||||
ProofType, TranscriptType,
|
||||
ProofType, TranscriptType, load_pk, load_vk, save_params, save_vk,
|
||||
srs::gen_srs as ezkl_gen_srs, srs::load_srs_prover,
|
||||
};
|
||||
use crate::Commitments;
|
||||
use crate::RunArgs;
|
||||
use halo2_proofs::poly::ipa::commitment::IPACommitmentScheme;
|
||||
use halo2_proofs::poly::kzg::commitment::KZGCommitmentScheme;
|
||||
use halo2curves::bn256::{Bn256, Fq, Fr, G1Affine, G1};
|
||||
use halo2curves::bn256::{Bn256, Fq, Fr, G1, G1Affine};
|
||||
use pyo3::exceptions::{PyIOError, PyRuntimeError};
|
||||
use pyo3::prelude::*;
|
||||
use pyo3::wrap_pyfunction;
|
||||
use pyo3_log;
|
||||
use pyo3_stub_gen::{
|
||||
define_stub_info_gatherer, derive::gen_stub_pyclass, derive::gen_stub_pyclass_enum,
|
||||
derive::gen_stub_pyfunction, TypeInfo,
|
||||
TypeInfo, define_stub_info_gatherer, derive::gen_stub_pyclass, derive::gen_stub_pyclass_enum,
|
||||
derive::gen_stub_pyfunction,
|
||||
};
|
||||
use snark_verifier::util::arithmetic::PrimeField;
|
||||
use std::collections::HashSet;
|
||||
@@ -156,9 +155,6 @@ impl pyo3::ToPyObject for PyG1Affine {
|
||||
#[derive(Clone)]
|
||||
#[gen_stub_pyclass]
|
||||
struct PyRunArgs {
|
||||
#[pyo3(get, set)]
|
||||
/// float: The tolerance for error on model outputs
|
||||
pub tolerance: f32,
|
||||
#[pyo3(get, set)]
|
||||
/// int: The denominator in the fixed point representation used when quantizing inputs
|
||||
pub input_scale: crate::Scale,
|
||||
@@ -207,6 +203,9 @@ struct PyRunArgs {
|
||||
/// bool: Should the circuit use unbounded lookups for log
|
||||
#[pyo3(get, set)]
|
||||
pub bounded_log_lookup: bool,
|
||||
/// bool: Should the circuit use range checks for inputs and outputs (set to false if the input is a felt)
|
||||
#[pyo3(get, set)]
|
||||
pub ignore_range_check_inputs_outputs: bool,
|
||||
}
|
||||
|
||||
/// default instantiation of PyRunArgs
|
||||
@@ -223,7 +222,6 @@ impl From<PyRunArgs> for RunArgs {
|
||||
fn from(py_run_args: PyRunArgs) -> Self {
|
||||
RunArgs {
|
||||
bounded_log_lookup: py_run_args.bounded_log_lookup,
|
||||
tolerance: Tolerance::from(py_run_args.tolerance),
|
||||
input_scale: py_run_args.input_scale,
|
||||
param_scale: py_run_args.param_scale,
|
||||
num_inner_cols: py_run_args.num_inner_cols,
|
||||
@@ -239,6 +237,7 @@ impl From<PyRunArgs> for RunArgs {
|
||||
commitment: Some(py_run_args.commitment.into()),
|
||||
decomp_base: py_run_args.decomp_base,
|
||||
decomp_legs: py_run_args.decomp_legs,
|
||||
ignore_range_check_inputs_outputs: py_run_args.ignore_range_check_inputs_outputs,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -247,7 +246,6 @@ impl Into<PyRunArgs> for RunArgs {
|
||||
fn into(self) -> PyRunArgs {
|
||||
PyRunArgs {
|
||||
bounded_log_lookup: self.bounded_log_lookup,
|
||||
tolerance: self.tolerance.val,
|
||||
input_scale: self.input_scale,
|
||||
param_scale: self.param_scale,
|
||||
num_inner_cols: self.num_inner_cols,
|
||||
@@ -263,6 +261,7 @@ impl Into<PyRunArgs> for RunArgs {
|
||||
commitment: self.commitment.into(),
|
||||
decomp_base: self.decomp_base,
|
||||
decomp_legs: self.decomp_legs,
|
||||
ignore_range_check_inputs_outputs: self.ignore_range_check_inputs_outputs,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -333,6 +332,8 @@ enum PyInputType {
|
||||
Int,
|
||||
///
|
||||
TDim,
|
||||
///
|
||||
Unknown,
|
||||
}
|
||||
|
||||
impl From<InputType> for PyInputType {
|
||||
@@ -344,6 +345,7 @@ impl From<InputType> for PyInputType {
|
||||
InputType::F64 => PyInputType::F64,
|
||||
InputType::Int => PyInputType::Int,
|
||||
InputType::TDim => PyInputType::TDim,
|
||||
InputType::Unknown => PyInputType::Unknown,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -357,6 +359,7 @@ impl From<PyInputType> for InputType {
|
||||
PyInputType::F64 => InputType::F64,
|
||||
PyInputType::Int => InputType::Int,
|
||||
PyInputType::TDim => InputType::TDim,
|
||||
PyInputType::Unknown => InputType::Unknown,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -371,6 +374,7 @@ impl FromStr for PyInputType {
|
||||
"f64" => Ok(PyInputType::F64),
|
||||
"int" => Ok(PyInputType::Int),
|
||||
"tdim" => Ok(PyInputType::TDim),
|
||||
"unknown" => Ok(PyInputType::Unknown),
|
||||
_ => Err("Invalid value for InputType".to_string()),
|
||||
}
|
||||
}
|
||||
@@ -573,10 +577,7 @@ fn poseidon_hash(message: Vec<PyFelt>) -> PyResult<Vec<PyFelt>> {
|
||||
.map(crate::pfsys::string_to_field::<Fr>)
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let output =
|
||||
PoseidonChip::<PoseidonSpec, POSEIDON_WIDTH, POSEIDON_RATE, POSEIDON_LEN_GRAPH>::run(
|
||||
message.clone(),
|
||||
)
|
||||
let output = PoseidonChip::<PoseidonSpec, POSEIDON_WIDTH, POSEIDON_RATE>::run(message.clone())
|
||||
.map_err(|_| PyIOError::new_err("Failed to run poseidon"))?;
|
||||
|
||||
let hash = output[0]
|
||||
@@ -591,7 +592,7 @@ fn poseidon_hash(message: Vec<PyFelt>) -> PyResult<Vec<PyFelt>> {
|
||||
/// Arguments
|
||||
/// -------
|
||||
/// message: list[str]
|
||||
/// List of field elements represnted as strings
|
||||
/// List of field elements represented as strings
|
||||
///
|
||||
/// vk_path: str
|
||||
/// Path to the verification key
|
||||
@@ -650,7 +651,7 @@ fn kzg_commit(
|
||||
/// Arguments
|
||||
/// -------
|
||||
/// message: list[str]
|
||||
/// List of field elements represnted as strings
|
||||
/// List of field elements represented as strings
|
||||
///
|
||||
/// vk_path: str
|
||||
/// Path to the verification key
|
||||
@@ -961,6 +962,8 @@ fn gen_settings(
|
||||
output=PathBuf::from(DEFAULT_SETTINGS),
|
||||
variables=Vec::from([("batch_size".to_string(), 1)]),
|
||||
seed=DEFAULT_SEED.parse().unwrap(),
|
||||
min=None,
|
||||
max=None
|
||||
))]
|
||||
#[gen_stub_pyfunction]
|
||||
fn gen_random_data(
|
||||
@@ -968,8 +971,10 @@ fn gen_random_data(
|
||||
output: PathBuf,
|
||||
variables: Vec<(String, usize)>,
|
||||
seed: u64,
|
||||
min: Option<f32>,
|
||||
max: Option<f32>,
|
||||
) -> Result<bool, PyErr> {
|
||||
crate::execute::gen_random_data(model, output, variables, seed).map_err(|e| {
|
||||
crate::execute::gen_random_data(model, output, variables, seed, min, max).map_err(|e| {
|
||||
let err_str = format!("Failed to generate settings: {}", e);
|
||||
PyRuntimeError::new_err(err_str)
|
||||
})?;
|
||||
@@ -1008,7 +1013,7 @@ fn gen_random_data(
|
||||
/// bool
|
||||
///
|
||||
#[pyfunction(signature = (
|
||||
data = PathBuf::from(DEFAULT_CALIBRATION_FILE),
|
||||
data = String::from(DEFAULT_CALIBRATION_FILE),
|
||||
model = PathBuf::from(DEFAULT_MODEL),
|
||||
settings = PathBuf::from(DEFAULT_SETTINGS),
|
||||
target = CalibrationTarget::default(), // default is "resources
|
||||
@@ -1020,7 +1025,7 @@ fn gen_random_data(
|
||||
#[gen_stub_pyfunction]
|
||||
fn calibrate_settings(
|
||||
py: Python,
|
||||
data: PathBuf,
|
||||
data: String,
|
||||
model: PathBuf,
|
||||
settings: PathBuf,
|
||||
target: CalibrationTarget,
|
||||
@@ -1075,7 +1080,7 @@ fn calibrate_settings(
|
||||
/// Python object containing the witness values
|
||||
///
|
||||
#[pyfunction(signature = (
|
||||
data=PathBuf::from(DEFAULT_DATA),
|
||||
data=String::from(DEFAULT_DATA),
|
||||
model=PathBuf::from(DEFAULT_COMPILED_CIRCUIT),
|
||||
output=PathBuf::from(DEFAULT_WITNESS),
|
||||
vk_path=None,
|
||||
@@ -1084,7 +1089,7 @@ fn calibrate_settings(
|
||||
#[gen_stub_pyfunction]
|
||||
fn gen_witness(
|
||||
py: Python,
|
||||
data: PathBuf,
|
||||
data: String,
|
||||
model: PathBuf,
|
||||
output: Option<PathBuf>,
|
||||
vk_path: Option<PathBuf>,
|
||||
@@ -1753,7 +1758,7 @@ fn create_evm_vka(
|
||||
/// bool
|
||||
///
|
||||
#[pyfunction(signature = (
|
||||
input_data=PathBuf::from(DEFAULT_DATA),
|
||||
input_data=String::from(DEFAULT_DATA),
|
||||
settings_path=PathBuf::from(DEFAULT_SETTINGS),
|
||||
sol_code_path=PathBuf::from(DEFAULT_SOL_CODE_DA),
|
||||
abi_path=PathBuf::from(DEFAULT_VERIFIER_DA_ABI),
|
||||
@@ -1762,7 +1767,7 @@ fn create_evm_vka(
|
||||
#[gen_stub_pyfunction]
|
||||
fn create_evm_data_attestation(
|
||||
py: Python,
|
||||
input_data: PathBuf,
|
||||
input_data: String,
|
||||
settings_path: PathBuf,
|
||||
sol_code_path: PathBuf,
|
||||
abi_path: PathBuf,
|
||||
@@ -1818,20 +1823,20 @@ fn create_evm_data_attestation(
|
||||
test_data,
|
||||
input_source,
|
||||
output_source,
|
||||
rpc_url=None,
|
||||
rpc_url,
|
||||
))]
|
||||
#[gen_stub_pyfunction]
|
||||
fn setup_test_evm_witness(
|
||||
fn setup_test_evm_data(
|
||||
py: Python,
|
||||
data_path: PathBuf,
|
||||
data_path: String,
|
||||
compiled_circuit_path: PathBuf,
|
||||
test_data: PathBuf,
|
||||
input_source: PyTestDataSource,
|
||||
output_source: PyTestDataSource,
|
||||
rpc_url: Option<String>,
|
||||
rpc_url: String,
|
||||
) -> PyResult<Bound<'_, PyAny>> {
|
||||
pyo3_async_runtimes::tokio::future_into_py(py, async move {
|
||||
crate::execute::setup_test_evm_witness(
|
||||
crate::execute::setup_test_evm_data(
|
||||
data_path,
|
||||
compiled_circuit_path,
|
||||
test_data,
|
||||
@@ -1841,7 +1846,7 @@ fn setup_test_evm_witness(
|
||||
)
|
||||
.await
|
||||
.map_err(|e| {
|
||||
let err_str = format!("Failed to run setup_test_evm_witness: {}", e);
|
||||
let err_str = format!("Failed to run setup_test_evm_data: {}", e);
|
||||
PyRuntimeError::new_err(err_str)
|
||||
})?;
|
||||
|
||||
@@ -1852,8 +1857,8 @@ fn setup_test_evm_witness(
|
||||
/// deploys the solidity verifier
|
||||
#[pyfunction(signature = (
|
||||
addr_path,
|
||||
rpc_url,
|
||||
sol_code_path=PathBuf::from(DEFAULT_SOL_CODE),
|
||||
rpc_url=None,
|
||||
contract_type=ContractType::default(),
|
||||
optimizer_runs=DEFAULT_OPTIMIZER_RUNS.parse().unwrap(),
|
||||
private_key=None,
|
||||
@@ -1862,8 +1867,8 @@ fn setup_test_evm_witness(
|
||||
fn deploy_evm(
|
||||
py: Python,
|
||||
addr_path: PathBuf,
|
||||
rpc_url: String,
|
||||
sol_code_path: PathBuf,
|
||||
rpc_url: Option<String>,
|
||||
contract_type: ContractType,
|
||||
optimizer_runs: usize,
|
||||
private_key: Option<String>,
|
||||
@@ -1891,9 +1896,9 @@ fn deploy_evm(
|
||||
#[pyfunction(signature = (
|
||||
addr_path,
|
||||
input_data,
|
||||
rpc_url,
|
||||
settings_path=PathBuf::from(DEFAULT_SETTINGS),
|
||||
sol_code_path=PathBuf::from(DEFAULT_SOL_CODE_DA),
|
||||
rpc_url=None,
|
||||
optimizer_runs=DEFAULT_OPTIMIZER_RUNS.parse().unwrap(),
|
||||
private_key=None
|
||||
))]
|
||||
@@ -1901,10 +1906,10 @@ fn deploy_evm(
|
||||
fn deploy_da_evm(
|
||||
py: Python,
|
||||
addr_path: PathBuf,
|
||||
input_data: PathBuf,
|
||||
input_data: String,
|
||||
rpc_url: String,
|
||||
settings_path: PathBuf,
|
||||
sol_code_path: PathBuf,
|
||||
rpc_url: Option<String>,
|
||||
optimizer_runs: usize,
|
||||
private_key: Option<String>,
|
||||
) -> PyResult<Bound<'_, PyAny>> {
|
||||
@@ -1944,15 +1949,15 @@ fn deploy_da_evm(
|
||||
/// does the verifier use data attestation ?
|
||||
///
|
||||
/// addr_vk: str
|
||||
/// The addess of the separate VK contract (if the verifier key is rendered as a separate contract)
|
||||
/// The address of the separate VK contract (if the verifier key is rendered as a separate contract)
|
||||
/// Returns
|
||||
/// -------
|
||||
/// bool
|
||||
///
|
||||
#[pyfunction(signature = (
|
||||
addr_verifier,
|
||||
rpc_url,
|
||||
proof_path=PathBuf::from(DEFAULT_PROOF),
|
||||
rpc_url=None,
|
||||
addr_da = None,
|
||||
addr_vk = None,
|
||||
))]
|
||||
@@ -1960,8 +1965,8 @@ fn deploy_da_evm(
|
||||
fn verify_evm<'a>(
|
||||
py: Python<'a>,
|
||||
addr_verifier: &'a str,
|
||||
rpc_url: String,
|
||||
proof_path: PathBuf,
|
||||
rpc_url: Option<String>,
|
||||
addr_da: Option<&'a str>,
|
||||
addr_vk: Option<&'a str>,
|
||||
) -> PyResult<Bound<'a, PyAny>> {
|
||||
@@ -2106,7 +2111,7 @@ fn ezkl(m: &Bound<'_, PyModule>) -> PyResult<()> {
|
||||
m.add_function(wrap_pyfunction!(deploy_evm, m)?)?;
|
||||
m.add_function(wrap_pyfunction!(deploy_da_evm, m)?)?;
|
||||
m.add_function(wrap_pyfunction!(verify_evm, m)?)?;
|
||||
m.add_function(wrap_pyfunction!(setup_test_evm_witness, m)?)?;
|
||||
m.add_function(wrap_pyfunction!(setup_test_evm_data, m)?)?;
|
||||
m.add_function(wrap_pyfunction!(create_evm_verifier_aggr, m)?)?;
|
||||
m.add_function(wrap_pyfunction!(create_evm_data_attestation, m)?)?;
|
||||
m.add_function(wrap_pyfunction!(encode_evm_calldata, m)?)?;
|
||||
|
||||
@@ -8,10 +8,7 @@ use crate::{
|
||||
Module,
|
||||
},
|
||||
fieldutils::{felt_to_integer_rep, integer_rep_to_felt},
|
||||
graph::{
|
||||
modules::POSEIDON_LEN_GRAPH, quantize_float, scale_to_multiplier, GraphCircuit,
|
||||
GraphSettings,
|
||||
},
|
||||
graph::{quantize_float, scale_to_multiplier, GraphCircuit, GraphSettings},
|
||||
};
|
||||
use console_error_panic_hook;
|
||||
use halo2_proofs::{
|
||||
@@ -231,10 +228,7 @@ pub fn poseidonHash(
|
||||
let message: Vec<Fr> = serde_json::from_slice(&message[..])
|
||||
.map_err(|e| JsError::new(&format!("Failed to deserialize message: {}", e)))?;
|
||||
|
||||
let output =
|
||||
PoseidonChip::<PoseidonSpec, POSEIDON_WIDTH, POSEIDON_RATE, POSEIDON_LEN_GRAPH>::run(
|
||||
message.clone(),
|
||||
)
|
||||
let output = PoseidonChip::<PoseidonSpec, POSEIDON_WIDTH, POSEIDON_RATE>::run(message.clone())
|
||||
.map_err(|e| JsError::new(&format!("{}", e)))?;
|
||||
|
||||
Ok(wasm_bindgen::Clamped(serde_json::to_vec(&output).map_err(
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
/*
|
||||
An easy-to-use implementation of the Poseidon Hash in the form of a Halo2 Chip. While the Poseidon Hash function
|
||||
is already implemented in halo2_gadgets, there is no wrapper chip that makes it easy to use in other circuits.
|
||||
Thanks to https://github.com/summa-dev/summa-solvency/blob/master/src/chips/poseidon/hash.rs for the inspiration (and also helping us understand how to use this).
|
||||
Thanks to https://github.com/summa-dev/summa-solvency/blob/master/zk_prover/src/chips/poseidon/hash.rs for the inspiration (and also helping us understand how to use this).
|
||||
*/
|
||||
|
||||
use std::collections::HashMap;
|
||||
|
||||
@@ -1,20 +1,18 @@
|
||||
/*
|
||||
An easy-to-use implementation of the Poseidon Hash in the form of a Halo2 Chip. While the Poseidon Hash function
|
||||
is already implemented in halo2_gadgets, there is no wrapper chip that makes it easy to use in other circuits.
|
||||
Thanks to https://github.com/summa-dev/summa-solvency/blob/master/src/chips/poseidon/hash.rs for the inspiration (and also helping us understand how to use this).
|
||||
Thanks to https://github.com/summa-dev/summa-solvency/blob/master/zk_prover/src/chips/poseidon/hash.rs for the inspiration (and also helping us understand how to use this).
|
||||
*/
|
||||
|
||||
pub mod poseidon_params;
|
||||
pub mod spec;
|
||||
|
||||
// This chip adds a set of advice columns to the gadget Chip to store the inputs of the hash
|
||||
use halo2_gadgets::poseidon::{primitives::*, Hash, Pow5Chip, Pow5Config};
|
||||
use halo2_proofs::arithmetic::Field;
|
||||
use halo2_gadgets::poseidon::{
|
||||
primitives::VariableLength, primitives::*, Hash, Pow5Chip, Pow5Config,
|
||||
};
|
||||
use halo2_proofs::halo2curves::bn256::Fr as Fp;
|
||||
use halo2_proofs::{circuit::*, plonk::*};
|
||||
// use maybe_rayon::prelude::{IndexedParallelIterator, IntoParallelRefIterator};
|
||||
use maybe_rayon::prelude::ParallelIterator;
|
||||
use maybe_rayon::slice::ParallelSlice;
|
||||
|
||||
use std::marker::PhantomData;
|
||||
|
||||
@@ -40,22 +38,17 @@ pub struct PoseidonConfig<const WIDTH: usize, const RATE: usize> {
|
||||
pub pow5_config: Pow5Config<Fp, WIDTH, RATE>,
|
||||
}
|
||||
|
||||
type InputAssignments = (Vec<AssignedCell<Fp, Fp>>, AssignedCell<Fp, Fp>);
|
||||
type InputAssignments = Vec<AssignedCell<Fp, Fp>>;
|
||||
|
||||
/// PoseidonChip is a wrapper around the Pow5Chip that adds a set of advice columns to the gadget Chip to store the inputs of the hash
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct PoseidonChip<
|
||||
S: Spec<Fp, WIDTH, RATE> + Sync,
|
||||
const WIDTH: usize,
|
||||
const RATE: usize,
|
||||
const L: usize,
|
||||
> {
|
||||
pub struct PoseidonChip<S: Spec<Fp, WIDTH, RATE> + Sync, const WIDTH: usize, const RATE: usize> {
|
||||
config: PoseidonConfig<WIDTH, RATE>,
|
||||
_marker: PhantomData<S>,
|
||||
}
|
||||
|
||||
impl<S: Spec<Fp, WIDTH, RATE> + Sync, const WIDTH: usize, const RATE: usize, const L: usize>
|
||||
PoseidonChip<S, WIDTH, RATE, L>
|
||||
impl<S: Spec<Fp, WIDTH, RATE> + Sync, const WIDTH: usize, const RATE: usize>
|
||||
PoseidonChip<S, WIDTH, RATE>
|
||||
{
|
||||
/// Creates a new PoseidonChip
|
||||
pub fn configure_with_cols(
|
||||
@@ -82,8 +75,8 @@ impl<S: Spec<Fp, WIDTH, RATE> + Sync, const WIDTH: usize, const RATE: usize, con
|
||||
}
|
||||
}
|
||||
|
||||
impl<S: Spec<Fp, WIDTH, RATE> + Sync, const WIDTH: usize, const RATE: usize, const L: usize>
|
||||
PoseidonChip<S, WIDTH, RATE, L>
|
||||
impl<S: Spec<Fp, WIDTH, RATE> + Sync, const WIDTH: usize, const RATE: usize>
|
||||
PoseidonChip<S, WIDTH, RATE>
|
||||
{
|
||||
/// Configuration of the PoseidonChip
|
||||
pub fn configure_with_optional_instance(
|
||||
@@ -100,9 +93,6 @@ impl<S: Spec<Fp, WIDTH, RATE> + Sync, const WIDTH: usize, const RATE: usize, con
|
||||
let rc_a = (0..WIDTH).map(|_| meta.fixed_column()).collect::<Vec<_>>();
|
||||
let rc_b = (0..WIDTH).map(|_| meta.fixed_column()).collect::<Vec<_>>();
|
||||
|
||||
for input in hash_inputs.iter().take(WIDTH) {
|
||||
meta.enable_equality(*input);
|
||||
}
|
||||
meta.enable_constant(rc_b[0]);
|
||||
|
||||
Self::configure_with_cols(
|
||||
@@ -116,8 +106,8 @@ impl<S: Spec<Fp, WIDTH, RATE> + Sync, const WIDTH: usize, const RATE: usize, con
|
||||
}
|
||||
}
|
||||
|
||||
impl<S: Spec<Fp, WIDTH, RATE> + Sync, const WIDTH: usize, const RATE: usize, const L: usize>
|
||||
Module<Fp> for PoseidonChip<S, WIDTH, RATE, L>
|
||||
impl<S: Spec<Fp, WIDTH, RATE> + Sync, const WIDTH: usize, const RATE: usize> Module<Fp>
|
||||
for PoseidonChip<S, WIDTH, RATE>
|
||||
{
|
||||
type Config = PoseidonConfig<WIDTH, RATE>;
|
||||
type InputAssignments = InputAssignments;
|
||||
@@ -152,9 +142,6 @@ impl<S: Spec<Fp, WIDTH, RATE> + Sync, const WIDTH: usize, const RATE: usize, con
|
||||
let rc_a = (0..WIDTH).map(|_| meta.fixed_column()).collect::<Vec<_>>();
|
||||
let rc_b = (0..WIDTH).map(|_| meta.fixed_column()).collect::<Vec<_>>();
|
||||
|
||||
for input in hash_inputs.iter().take(WIDTH) {
|
||||
meta.enable_equality(*input);
|
||||
}
|
||||
meta.enable_constant(rc_b[0]);
|
||||
|
||||
let instance = meta.instance_column();
|
||||
@@ -176,7 +163,10 @@ impl<S: Spec<Fp, WIDTH, RATE> + Sync, const WIDTH: usize, const RATE: usize, con
|
||||
message: &[ValTensor<Fp>],
|
||||
constants: &mut ConstantsMap<Fp>,
|
||||
) -> Result<Self::InputAssignments, ModuleError> {
|
||||
assert_eq!(message.len(), 1);
|
||||
if message.len() != 1 {
|
||||
return Err(ModuleError::InputWrongLength(message.len()));
|
||||
}
|
||||
|
||||
let message = message[0].clone();
|
||||
|
||||
let start_time = instant::Instant::now();
|
||||
@@ -186,95 +176,81 @@ impl<S: Spec<Fp, WIDTH, RATE> + Sync, const WIDTH: usize, const RATE: usize, con
|
||||
let res = layouter.assign_region(
|
||||
|| "load message",
|
||||
|mut region| {
|
||||
let assigned_message: Result<Vec<AssignedCell<Fp, Fp>>, ModuleError> =
|
||||
match &message {
|
||||
ValTensor::Value { inner: v, .. } => {
|
||||
v.iter()
|
||||
.enumerate()
|
||||
.map(|(i, value)| {
|
||||
let x = i % WIDTH;
|
||||
let y = i / WIDTH;
|
||||
let assigned_message: Result<Vec<AssignedCell<Fp, Fp>>, _> = match &message {
|
||||
ValTensor::Value { inner: v, .. } => v
|
||||
.iter()
|
||||
.enumerate()
|
||||
.map(|(i, value)| {
|
||||
let x = i % WIDTH;
|
||||
let y = i / WIDTH;
|
||||
|
||||
match value {
|
||||
ValType::Value(v) => region
|
||||
.assign_advice(
|
||||
|| format!("load message_{}", i),
|
||||
self.config.hash_inputs[x],
|
||||
y,
|
||||
|| *v,
|
||||
)
|
||||
.map_err(|e| e.into()),
|
||||
ValType::PrevAssigned(v)
|
||||
| ValType::AssignedConstant(v, ..) => Ok(v.clone()),
|
||||
ValType::Constant(f) => {
|
||||
if local_constants.contains_key(f) {
|
||||
Ok(constants
|
||||
.get(f)
|
||||
.unwrap()
|
||||
.assigned_cell()
|
||||
.ok_or(ModuleError::ConstantNotAssigned)?)
|
||||
} else {
|
||||
let res = region.assign_advice_from_constant(
|
||||
|| format!("load message_{}", i),
|
||||
self.config.hash_inputs[x],
|
||||
y,
|
||||
*f,
|
||||
)?;
|
||||
|
||||
constants.insert(
|
||||
*f,
|
||||
ValType::AssignedConstant(res.clone(), *f),
|
||||
);
|
||||
|
||||
Ok(res)
|
||||
}
|
||||
}
|
||||
e => Err(ModuleError::WrongInputType(
|
||||
format!("{:?}", e),
|
||||
"PrevAssigned".to_string(),
|
||||
)),
|
||||
}
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
ValTensor::Instance {
|
||||
dims,
|
||||
inner: col,
|
||||
idx,
|
||||
initial_offset,
|
||||
..
|
||||
} => {
|
||||
// this should never ever fail
|
||||
let num_elems = dims[*idx].iter().product::<usize>();
|
||||
(0..num_elems)
|
||||
.map(|i| {
|
||||
let x = i % WIDTH;
|
||||
let y = i / WIDTH;
|
||||
region.assign_advice_from_instance(
|
||||
|| "pub input anchor",
|
||||
*col,
|
||||
initial_offset + i,
|
||||
match value {
|
||||
ValType::Value(v) => region
|
||||
.assign_advice(
|
||||
|| format!("load message_{}", i),
|
||||
self.config.hash_inputs[x],
|
||||
y,
|
||||
|| *v,
|
||||
)
|
||||
})
|
||||
.collect::<Result<Vec<_>, _>>()
|
||||
.map_err(|e| e.into())
|
||||
}
|
||||
};
|
||||
.map_err(|e| e.into()),
|
||||
ValType::PrevAssigned(v) | ValType::AssignedConstant(v, ..) => {
|
||||
Ok(v.clone())
|
||||
}
|
||||
ValType::Constant(f) => {
|
||||
if local_constants.contains_key(f) {
|
||||
Ok(constants
|
||||
.get(f)
|
||||
.unwrap()
|
||||
.assigned_cell()
|
||||
.ok_or(ModuleError::ConstantNotAssigned)?)
|
||||
} else {
|
||||
let res = region.assign_advice_from_constant(
|
||||
|| format!("load message_{}", i),
|
||||
self.config.hash_inputs[x],
|
||||
y,
|
||||
*f,
|
||||
)?;
|
||||
|
||||
let offset = message.len() / WIDTH + 1;
|
||||
constants
|
||||
.insert(*f, ValType::AssignedConstant(res.clone(), *f));
|
||||
|
||||
let zero_val = region
|
||||
.assign_advice_from_constant(
|
||||
|| "",
|
||||
self.config.hash_inputs[0],
|
||||
offset,
|
||||
Fp::ZERO,
|
||||
)
|
||||
.unwrap();
|
||||
Ok(res)
|
||||
}
|
||||
}
|
||||
e => Err(ModuleError::WrongInputType(
|
||||
format!("{:?}", e),
|
||||
"AssignedValue".to_string(),
|
||||
)),
|
||||
}
|
||||
})
|
||||
.collect(),
|
||||
ValTensor::Instance {
|
||||
dims,
|
||||
inner: col,
|
||||
idx,
|
||||
initial_offset,
|
||||
..
|
||||
} => {
|
||||
// this should never ever fail
|
||||
let num_elems = dims[*idx].iter().product::<usize>();
|
||||
(0..num_elems)
|
||||
.map(|i| {
|
||||
let x = i % WIDTH;
|
||||
let y = i / WIDTH;
|
||||
region.assign_advice_from_instance(
|
||||
|| "pub input anchor",
|
||||
*col,
|
||||
initial_offset + i,
|
||||
self.config.hash_inputs[x],
|
||||
y,
|
||||
)
|
||||
})
|
||||
.collect::<Result<Vec<_>, _>>()
|
||||
.map_err(|e| e.into())
|
||||
}
|
||||
};
|
||||
|
||||
Ok((assigned_message?, zero_val))
|
||||
Ok(assigned_message?)
|
||||
},
|
||||
);
|
||||
log::trace!(
|
||||
@@ -295,7 +271,13 @@ impl<S: Spec<Fp, WIDTH, RATE> + Sync, const WIDTH: usize, const RATE: usize, con
|
||||
row_offset: usize,
|
||||
constants: &mut ConstantsMap<Fp>,
|
||||
) -> Result<ValTensor<Fp>, ModuleError> {
|
||||
let (mut input_cells, zero_val) = self.layout_inputs(layouter, input, constants)?;
|
||||
let input_cells = self.layout_inputs(layouter, input, constants)?;
|
||||
|
||||
// empty hash case
|
||||
if input_cells.is_empty() {
|
||||
return Ok(input[0].clone());
|
||||
}
|
||||
|
||||
// extract the values from the input cells
|
||||
let mut assigned_input: Tensor<ValType<Fp>> =
|
||||
input_cells.iter().map(|e| ValType::from(e.clone())).into();
|
||||
@@ -303,52 +285,25 @@ impl<S: Spec<Fp, WIDTH, RATE> + Sync, const WIDTH: usize, const RATE: usize, con
|
||||
|
||||
let start_time = instant::Instant::now();
|
||||
|
||||
let mut one_iter = false;
|
||||
// do the Tree dance baby
|
||||
while input_cells.len() > 1 || !one_iter {
|
||||
let hashes: Result<Vec<AssignedCell<Fp, Fp>>, ModuleError> = input_cells
|
||||
.chunks(L)
|
||||
.enumerate()
|
||||
.map(|(i, block)| {
|
||||
let _start_time = instant::Instant::now();
|
||||
let pow5_chip = Pow5Chip::construct(self.config.pow5_config.clone());
|
||||
// initialize the hasher
|
||||
let hasher = Hash::<_, _, S, VariableLength, WIDTH, RATE>::init(
|
||||
pow5_chip,
|
||||
layouter.namespace(|| "block_hasher"),
|
||||
)?;
|
||||
|
||||
let mut block = block.to_vec();
|
||||
let remainder = block.len() % L;
|
||||
|
||||
if remainder != 0 {
|
||||
block.extend(vec![zero_val.clone(); L - remainder]);
|
||||
}
|
||||
|
||||
let pow5_chip = Pow5Chip::construct(self.config.pow5_config.clone());
|
||||
// initialize the hasher
|
||||
let hasher = Hash::<_, _, S, ConstantLength<L>, WIDTH, RATE>::init(
|
||||
pow5_chip,
|
||||
layouter.namespace(|| "block_hasher"),
|
||||
)?;
|
||||
|
||||
let hash = hasher.hash(
|
||||
layouter.namespace(|| "hash"),
|
||||
block.to_vec().try_into().map_err(|_| Error::Synthesis)?,
|
||||
);
|
||||
|
||||
if i == 0 {
|
||||
log::trace!("block (L={:?}) took: {:?}", L, _start_time.elapsed());
|
||||
}
|
||||
|
||||
hash
|
||||
})
|
||||
.collect::<Result<Vec<_>, _>>()
|
||||
.map_err(|e| e.into());
|
||||
|
||||
log::trace!("hashes (N={:?}) took: {:?}", len, start_time.elapsed());
|
||||
one_iter = true;
|
||||
input_cells = hashes?;
|
||||
}
|
||||
let hash: AssignedCell<Fp, Fp> = hasher.hash(
|
||||
layouter.namespace(|| "hash"),
|
||||
input_cells
|
||||
.to_vec()
|
||||
.try_into()
|
||||
.map_err(|_| Error::Synthesis)?,
|
||||
)?;
|
||||
|
||||
let duration = start_time.elapsed();
|
||||
log::trace!("layout (N={:?}) took: {:?}", len, duration);
|
||||
|
||||
let result = Tensor::from(input_cells.iter().map(|e| ValType::from(e.clone())));
|
||||
let result = Tensor::from(vec![ValType::from(hash.clone())].into_iter());
|
||||
|
||||
let output = match result[0].clone() {
|
||||
ValType::PrevAssigned(v) => v,
|
||||
@@ -387,69 +342,59 @@ impl<S: Spec<Fp, WIDTH, RATE> + Sync, const WIDTH: usize, const RATE: usize, con
|
||||
|
||||
///
|
||||
fn run(message: Vec<Fp>) -> Result<Vec<Vec<Fp>>, ModuleError> {
|
||||
let mut hash_inputs = message;
|
||||
|
||||
let len = hash_inputs.len();
|
||||
let len = message.len();
|
||||
if len == 0 {
|
||||
return Ok(vec![vec![]]);
|
||||
}
|
||||
|
||||
let start_time = instant::Instant::now();
|
||||
|
||||
let mut one_iter = false;
|
||||
// do the Tree dance baby
|
||||
while hash_inputs.len() > 1 || !one_iter {
|
||||
let hashes: Vec<Fp> = hash_inputs
|
||||
.par_chunks(L)
|
||||
.map(|block| {
|
||||
let mut block = block.to_vec();
|
||||
let remainder = block.len() % L;
|
||||
|
||||
if remainder != 0 {
|
||||
block.extend(vec![Fp::ZERO; L - remainder].iter());
|
||||
}
|
||||
|
||||
let block_len = block.len();
|
||||
|
||||
let message = block
|
||||
.try_into()
|
||||
.map_err(|_| ModuleError::InputWrongLength(block_len))?;
|
||||
|
||||
Ok(halo2_gadgets::poseidon::primitives::Hash::<
|
||||
_,
|
||||
S,
|
||||
ConstantLength<L>,
|
||||
{ WIDTH },
|
||||
{ RATE },
|
||||
>::init()
|
||||
.hash(message))
|
||||
})
|
||||
.collect::<Result<Vec<_>, ModuleError>>()?;
|
||||
one_iter = true;
|
||||
hash_inputs = hashes;
|
||||
}
|
||||
let hash = halo2_gadgets::poseidon::primitives::Hash::<
|
||||
_,
|
||||
S,
|
||||
VariableLength,
|
||||
{ WIDTH },
|
||||
{ RATE },
|
||||
>::init()
|
||||
.hash(message);
|
||||
|
||||
let duration = start_time.elapsed();
|
||||
log::trace!("run (N={:?}) took: {:?}", len, duration);
|
||||
|
||||
Ok(vec![hash_inputs])
|
||||
Ok(vec![vec![hash]])
|
||||
}
|
||||
|
||||
fn num_rows(mut input_len: usize) -> usize {
|
||||
fn num_rows(input_len: usize) -> usize {
|
||||
// this was determined by running the circuit and looking at the number of constraints
|
||||
// in the test called hash_for_a_range_of_input_sizes, then regressing in python to find the slope
|
||||
let fixed_cost: usize = 41 * L;
|
||||
// import numpy as np
|
||||
// from scipy import stats
|
||||
|
||||
let mut num_rows = 0;
|
||||
// x = np.array([32, 64, 96, 128, 160, 192])
|
||||
// y = np.array([1298, 2594, 3890, 5186, 6482, 7778])
|
||||
|
||||
loop {
|
||||
// the number of times the input_len is divisible by L
|
||||
let num_chunks = input_len / L + 1;
|
||||
num_rows += num_chunks * fixed_cost;
|
||||
if num_chunks == 1 {
|
||||
break;
|
||||
}
|
||||
input_len = num_chunks;
|
||||
}
|
||||
// slope, intercept, r_value, p_value, std_err = stats.linregress(x, y)
|
||||
|
||||
num_rows
|
||||
// print(f"slope: {slope}")
|
||||
// print(f"intercept: {intercept}")
|
||||
// print(f"R^2: {r_value**2}")
|
||||
|
||||
// # Predict for any x
|
||||
// def predict(x):
|
||||
// return slope * x + intercept
|
||||
|
||||
// # Test prediction
|
||||
// test_x = 256
|
||||
// print(f"Predicted value for x={test_x}: {predict(test_x)}")
|
||||
// our output:
|
||||
// slope: 40.5
|
||||
// intercept: 2.0
|
||||
// R^2: 1.0
|
||||
// Predicted value for x=256: 10370.0
|
||||
let fixed_cost: usize = 41 * input_len;
|
||||
|
||||
// the cost of the hash function is linear with the number of inputs
|
||||
fixed_cost + 2
|
||||
}
|
||||
}
|
||||
|
||||
@@ -476,12 +421,12 @@ mod tests {
|
||||
const RATE: usize = POSEIDON_RATE;
|
||||
const R: usize = 240;
|
||||
|
||||
struct HashCircuit<S: Spec<Fp, WIDTH, RATE>, const L: usize> {
|
||||
struct HashCircuit<S: Spec<Fp, WIDTH, RATE>> {
|
||||
message: ValTensor<Fp>,
|
||||
_spec: PhantomData<S>,
|
||||
}
|
||||
|
||||
impl<S: Spec<Fp, WIDTH, RATE>, const L: usize> Circuit<Fp> for HashCircuit<S, L> {
|
||||
impl<S: Spec<Fp, WIDTH, RATE>> Circuit<Fp> for HashCircuit<S> {
|
||||
type Config = PoseidonConfig<WIDTH, RATE>;
|
||||
type FloorPlanner = ModulePlanner;
|
||||
type Params = ();
|
||||
@@ -497,7 +442,7 @@ mod tests {
|
||||
}
|
||||
|
||||
fn configure(meta: &mut ConstraintSystem<Fp>) -> PoseidonConfig<WIDTH, RATE> {
|
||||
PoseidonChip::<PoseidonSpec, WIDTH, RATE, L>::configure(meta, ())
|
||||
PoseidonChip::<PoseidonSpec, WIDTH, RATE>::configure(meta, ())
|
||||
}
|
||||
|
||||
fn synthesize(
|
||||
@@ -505,7 +450,7 @@ mod tests {
|
||||
config: PoseidonConfig<WIDTH, RATE>,
|
||||
mut layouter: impl Layouter<Fp>,
|
||||
) -> Result<(), Error> {
|
||||
let chip: PoseidonChip<PoseidonSpec, WIDTH, RATE, L> = PoseidonChip::new(config);
|
||||
let chip: PoseidonChip<PoseidonSpec, WIDTH, RATE> = PoseidonChip::new(config);
|
||||
chip.layout(
|
||||
&mut layouter,
|
||||
&[self.message.clone()],
|
||||
@@ -517,18 +462,33 @@ mod tests {
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn poseidon_hash_empty() {
|
||||
let message = [];
|
||||
let output = PoseidonChip::<PoseidonSpec, WIDTH, RATE>::run(message.to_vec()).unwrap();
|
||||
let mut message: Tensor<ValType<Fp>> =
|
||||
message.into_iter().map(|m| Value::known(m).into()).into();
|
||||
let k = 9;
|
||||
let circuit = HashCircuit::<PoseidonSpec> {
|
||||
message: message.into(),
|
||||
_spec: PhantomData,
|
||||
};
|
||||
let prover = halo2_proofs::dev::MockProver::run(k, &circuit, vec![vec![]]).unwrap();
|
||||
assert_eq!(prover.verify(), Ok(()))
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn poseidon_hash() {
|
||||
let rng = rand::rngs::OsRng;
|
||||
|
||||
let message = [Fp::random(rng), Fp::random(rng)];
|
||||
let output = PoseidonChip::<PoseidonSpec, WIDTH, RATE, 2>::run(message.to_vec()).unwrap();
|
||||
let output = PoseidonChip::<PoseidonSpec, WIDTH, RATE>::run(message.to_vec()).unwrap();
|
||||
|
||||
let mut message: Tensor<ValType<Fp>> =
|
||||
message.into_iter().map(|m| Value::known(m).into()).into();
|
||||
|
||||
let k = 9;
|
||||
let circuit = HashCircuit::<PoseidonSpec, 2> {
|
||||
let circuit = HashCircuit::<PoseidonSpec> {
|
||||
message: message.into(),
|
||||
_spec: PhantomData,
|
||||
};
|
||||
@@ -541,13 +501,13 @@ mod tests {
|
||||
let rng = rand::rngs::OsRng;
|
||||
|
||||
let message = [Fp::random(rng), Fp::random(rng), Fp::random(rng)];
|
||||
let output = PoseidonChip::<PoseidonSpec, WIDTH, RATE, 3>::run(message.to_vec()).unwrap();
|
||||
let output = PoseidonChip::<PoseidonSpec, WIDTH, RATE>::run(message.to_vec()).unwrap();
|
||||
|
||||
let mut message: Tensor<ValType<Fp>> =
|
||||
message.into_iter().map(|m| Value::known(m).into()).into();
|
||||
|
||||
let k = 9;
|
||||
let circuit = HashCircuit::<PoseidonSpec, 3> {
|
||||
let circuit = HashCircuit::<PoseidonSpec> {
|
||||
message: message.into(),
|
||||
_spec: PhantomData,
|
||||
};
|
||||
@@ -563,23 +523,21 @@ mod tests {
|
||||
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
|
||||
env_logger::init();
|
||||
|
||||
{
|
||||
let i = 32;
|
||||
for i in (32..128).step_by(32) {
|
||||
// print a bunch of new lines
|
||||
println!(
|
||||
log::info!(
|
||||
"i is {} -------------------------------------------------",
|
||||
i
|
||||
);
|
||||
|
||||
let message: Vec<Fp> = (0..i).map(|_| Fp::random(rng)).collect::<Vec<_>>();
|
||||
let output =
|
||||
PoseidonChip::<PoseidonSpec, WIDTH, RATE, 32>::run(message.clone()).unwrap();
|
||||
let output = PoseidonChip::<PoseidonSpec, WIDTH, RATE>::run(message.clone()).unwrap();
|
||||
|
||||
let mut message: Tensor<ValType<Fp>> =
|
||||
message.into_iter().map(|m| Value::known(m).into()).into();
|
||||
|
||||
let k = 17;
|
||||
let circuit = HashCircuit::<PoseidonSpec, 32> {
|
||||
let circuit = HashCircuit::<PoseidonSpec> {
|
||||
message: message.into(),
|
||||
_spec: PhantomData,
|
||||
};
|
||||
@@ -596,13 +554,13 @@ mod tests {
|
||||
|
||||
let mut message: Vec<Fp> = (0..2048).map(|_| Fp::random(rng)).collect::<Vec<_>>();
|
||||
|
||||
let output = PoseidonChip::<PoseidonSpec, WIDTH, RATE, 25>::run(message.clone()).unwrap();
|
||||
let output = PoseidonChip::<PoseidonSpec, WIDTH, RATE>::run(message.clone()).unwrap();
|
||||
|
||||
let mut message: Tensor<ValType<Fp>> =
|
||||
message.into_iter().map(|m| Value::known(m).into()).into();
|
||||
|
||||
let k = 17;
|
||||
let circuit = HashCircuit::<PoseidonSpec, 25> {
|
||||
let circuit = HashCircuit::<PoseidonSpec> {
|
||||
message: message.into(),
|
||||
_spec: PhantomData,
|
||||
};
|
||||
|
||||
@@ -17,12 +17,14 @@ pub enum BaseOp {
|
||||
Sub,
|
||||
SumInit,
|
||||
Sum,
|
||||
IsBoolean,
|
||||
}
|
||||
|
||||
/// Matches a [BaseOp] to an operation over inputs
|
||||
impl BaseOp {
|
||||
/// forward func
|
||||
/// forward func for non-accumulating operations
|
||||
/// # Panics
|
||||
/// Panics if called on an accumulating operation
|
||||
/// # Examples
|
||||
pub fn nonaccum_f<
|
||||
T: TensorType + Add<Output = T> + Sub<Output = T> + Mul<Output = T> + Neg<Output = T>,
|
||||
>(
|
||||
@@ -34,12 +36,13 @@ impl BaseOp {
|
||||
BaseOp::Add => a + b,
|
||||
BaseOp::Sub => a - b,
|
||||
BaseOp::Mult => a * b,
|
||||
BaseOp::IsBoolean => b,
|
||||
_ => panic!("nonaccum_f called on accumulating operation"),
|
||||
}
|
||||
}
|
||||
|
||||
/// forward func
|
||||
/// forward func for accumulating operations
|
||||
/// # Panics
|
||||
/// Panics if called on a non-accumulating operation
|
||||
pub fn accum_f<
|
||||
T: TensorType + Add<Output = T> + Sub<Output = T> + Mul<Output = T> + Neg<Output = T>,
|
||||
>(
|
||||
@@ -74,7 +77,6 @@ impl BaseOp {
|
||||
BaseOp::Mult => "MULT",
|
||||
BaseOp::Sum => "SUM",
|
||||
BaseOp::SumInit => "SUMINIT",
|
||||
BaseOp::IsBoolean => "ISBOOLEAN",
|
||||
}
|
||||
}
|
||||
|
||||
@@ -90,7 +92,6 @@ impl BaseOp {
|
||||
BaseOp::Mult => (0, 1),
|
||||
BaseOp::Sum => (-1, 2),
|
||||
BaseOp::SumInit => (0, 1),
|
||||
BaseOp::IsBoolean => (0, 1),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -106,7 +107,6 @@ impl BaseOp {
|
||||
BaseOp::Mult => 2,
|
||||
BaseOp::Sum => 1,
|
||||
BaseOp::SumInit => 1,
|
||||
BaseOp::IsBoolean => 0,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -122,7 +122,6 @@ impl BaseOp {
|
||||
BaseOp::SumInit => 0,
|
||||
BaseOp::CumProd => 1,
|
||||
BaseOp::CumProdInit => 0,
|
||||
BaseOp::IsBoolean => 0,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,7 +2,7 @@ use std::str::FromStr;
|
||||
|
||||
use halo2_proofs::{
|
||||
circuit::Layouter,
|
||||
plonk::{ConstraintSystem, Constraints, Expression, Selector},
|
||||
plonk::{ConstraintSystem, Constraints, Expression, Selector, TableColumn},
|
||||
poly::Rotation,
|
||||
};
|
||||
use log::debug;
|
||||
@@ -20,7 +20,6 @@ use crate::{
|
||||
circuit::{
|
||||
ops::base::BaseOp,
|
||||
table::{Range, RangeCheck, Table},
|
||||
utils,
|
||||
},
|
||||
tensor::{Tensor, TensorType, ValTensor, VarTensor},
|
||||
};
|
||||
@@ -85,55 +84,6 @@ impl CheckMode {
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(missing_docs)]
|
||||
/// An enum representing the tolerance we can accept for the accumulated arguments, either absolute or percentage
|
||||
#[derive(Clone, Default, Debug, PartialEq, PartialOrd, Serialize, Deserialize, Copy)]
|
||||
pub struct Tolerance {
|
||||
pub val: f32,
|
||||
pub scale: utils::F32,
|
||||
}
|
||||
|
||||
impl std::fmt::Display for Tolerance {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "{:.2}", self.val)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
|
||||
impl ToFlags for Tolerance {
|
||||
/// Convert the struct to a subcommand string
|
||||
fn to_flags(&self) -> Vec<String> {
|
||||
vec![format!("{}", self)]
|
||||
}
|
||||
}
|
||||
|
||||
impl FromStr for Tolerance {
|
||||
type Err = String;
|
||||
|
||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||
if let Ok(val) = s.parse::<f32>() {
|
||||
Ok(Tolerance {
|
||||
val,
|
||||
scale: utils::F32(1.0),
|
||||
})
|
||||
} else {
|
||||
Err(
|
||||
"Invalid tolerance value provided. It should expressed as a percentage (f32)."
|
||||
.to_string(),
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<f32> for Tolerance {
|
||||
fn from(value: f32) -> Self {
|
||||
Tolerance {
|
||||
val: value,
|
||||
scale: utils::F32(1.0),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "python-bindings")]
|
||||
/// Converts CheckMode into a PyObject (Required for CheckMode to be compatible with Python)
|
||||
impl IntoPy<PyObject> for CheckMode {
|
||||
@@ -158,29 +108,6 @@ impl<'source> FromPyObject<'source> for CheckMode {
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "python-bindings")]
|
||||
/// Converts Tolerance into a PyObject (Required for Tolerance to be compatible with Python)
|
||||
impl IntoPy<PyObject> for Tolerance {
|
||||
fn into_py(self, py: Python) -> PyObject {
|
||||
(self.val, self.scale.0).to_object(py)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "python-bindings")]
|
||||
/// Obtains Tolerance from PyObject (Required for Tolerance to be compatible with Python)
|
||||
impl<'source> FromPyObject<'source> for Tolerance {
|
||||
fn extract_bound(ob: &pyo3::Bound<'source, pyo3::PyAny>) -> PyResult<Self> {
|
||||
if let Ok((val, scale)) = <(f32, f32)>::extract_bound(ob) {
|
||||
Ok(Tolerance {
|
||||
val,
|
||||
scale: utils::F32(scale),
|
||||
})
|
||||
} else {
|
||||
Err(PyValueError::new_err("Invalid tolerance value provided. "))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A struct representing the selectors for the dynamic lookup tables
|
||||
#[derive(Clone, Debug, Default)]
|
||||
pub struct DynamicLookups {
|
||||
@@ -215,15 +142,16 @@ impl DynamicLookups {
|
||||
|
||||
/// A struct representing the selectors for the dynamic lookup tables
|
||||
#[derive(Clone, Debug, Default)]
|
||||
|
||||
pub struct Shuffles {
|
||||
/// [Selector]s generated when configuring the layer. We use a [BTreeMap] as we expect to configure many dynamic lookup ops.
|
||||
pub input_selectors: BTreeMap<(usize, (usize, usize)), Selector>,
|
||||
/// Selectors for the dynamic lookup tables
|
||||
pub reference_selectors: Vec<Selector>,
|
||||
pub output_selectors: Vec<Selector>,
|
||||
/// Inputs:
|
||||
pub inputs: Vec<VarTensor>,
|
||||
/// tables
|
||||
pub references: Vec<VarTensor>,
|
||||
pub outputs: Vec<VarTensor>,
|
||||
}
|
||||
|
||||
impl Shuffles {
|
||||
@@ -234,9 +162,13 @@ impl Shuffles {
|
||||
|
||||
Self {
|
||||
input_selectors: BTreeMap::new(),
|
||||
reference_selectors: vec![],
|
||||
inputs: vec![dummy_var.clone(), dummy_var.clone()],
|
||||
references: vec![single_col_dummy_var.clone(), single_col_dummy_var.clone()],
|
||||
output_selectors: vec![],
|
||||
inputs: vec![dummy_var.clone(), dummy_var.clone(), dummy_var.clone()],
|
||||
outputs: vec![
|
||||
single_col_dummy_var.clone(),
|
||||
single_col_dummy_var.clone(),
|
||||
single_col_dummy_var.clone(),
|
||||
],
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -336,6 +268,8 @@ pub struct BaseConfig<F: PrimeField + TensorType + PartialOrd> {
|
||||
/// Activate sanity checks
|
||||
pub check_mode: CheckMode,
|
||||
_marker: PhantomData<F>,
|
||||
/// shared table inputs
|
||||
pub shared_table_inputs: Vec<TableColumn>,
|
||||
}
|
||||
|
||||
impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> BaseConfig<F> {
|
||||
@@ -348,6 +282,7 @@ impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> BaseConfig<F> {
|
||||
shuffles: Shuffles::dummy(col_size, num_inner_cols),
|
||||
range_checks: RangeChecks::dummy(col_size, num_inner_cols),
|
||||
check_mode: CheckMode::SAFE,
|
||||
shared_table_inputs: vec![],
|
||||
_marker: PhantomData,
|
||||
}
|
||||
}
|
||||
@@ -374,13 +309,18 @@ impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> BaseConfig<F> {
|
||||
if inputs[0].num_cols() != output.num_cols() {
|
||||
log::warn!("input and output shapes do not match");
|
||||
}
|
||||
if inputs[0].num_inner_cols() != inputs[1].num_inner_cols() {
|
||||
log::warn!("input number of inner columns do not match");
|
||||
}
|
||||
if inputs[0].num_inner_cols() != output.num_inner_cols() {
|
||||
log::warn!("input and output number of inner columns do not match");
|
||||
}
|
||||
|
||||
for i in 0..output.num_blocks() {
|
||||
for j in 0..output.num_inner_cols() {
|
||||
nonaccum_selectors.insert((BaseOp::Add, i, j), meta.selector());
|
||||
nonaccum_selectors.insert((BaseOp::Sub, i, j), meta.selector());
|
||||
nonaccum_selectors.insert((BaseOp::Mult, i, j), meta.selector());
|
||||
nonaccum_selectors.insert((BaseOp::IsBoolean, i, j), meta.selector());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -414,24 +354,13 @@ impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> BaseConfig<F> {
|
||||
// Get output expressions for each input channel
|
||||
let (rotation_offset, rng) = base_op.query_offset_rng();
|
||||
|
||||
let constraints = match base_op {
|
||||
BaseOp::IsBoolean => {
|
||||
let expected_output: Tensor<Expression<F>> = output
|
||||
.query_rng(meta, *block_idx, *inner_col_idx, 0, 1)
|
||||
.expect("non accum: output query failed");
|
||||
let constraints = {
|
||||
let expected_output: Tensor<Expression<F>> = output
|
||||
.query_rng(meta, *block_idx, *inner_col_idx, rotation_offset, rng)
|
||||
.expect("non accum: output query failed");
|
||||
|
||||
let output = expected_output[base_op.constraint_idx()].clone();
|
||||
|
||||
vec![(output.clone()) * (output.clone() - Expression::Constant(F::from(1)))]
|
||||
}
|
||||
_ => {
|
||||
let expected_output: Tensor<Expression<F>> = output
|
||||
.query_rng(meta, *block_idx, *inner_col_idx, rotation_offset, rng)
|
||||
.expect("non accum: output query failed");
|
||||
|
||||
let res = base_op.nonaccum_f((qis[0].clone(), qis[1].clone()));
|
||||
vec![expected_output[base_op.constraint_idx()].clone() - res]
|
||||
}
|
||||
let res = base_op.nonaccum_f((qis[0].clone(), qis[1].clone()));
|
||||
vec![expected_output[base_op.constraint_idx()].clone() - res]
|
||||
};
|
||||
|
||||
Constraints::with_selector(selector, constraints)
|
||||
@@ -486,6 +415,7 @@ impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> BaseConfig<F> {
|
||||
dynamic_lookups: DynamicLookups::default(),
|
||||
shuffles: Shuffles::default(),
|
||||
range_checks: RangeChecks::default(),
|
||||
shared_table_inputs: vec![],
|
||||
check_mode,
|
||||
_marker: PhantomData,
|
||||
}
|
||||
@@ -516,21 +446,9 @@ impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> BaseConfig<F> {
|
||||
return Err(CircuitError::WrongColumnType(output.name().to_string()));
|
||||
}
|
||||
|
||||
// we borrow mutably twice so we need to do this dance
|
||||
|
||||
let table = if !self.static_lookups.tables.contains_key(nl) {
|
||||
// as all tables have the same input we see if there's another table who's input we can reuse
|
||||
let table = if let Some(table) = self.static_lookups.tables.values().next() {
|
||||
Table::<F>::configure(
|
||||
cs,
|
||||
lookup_range,
|
||||
logrows,
|
||||
nl,
|
||||
Some(table.table_inputs.clone()),
|
||||
)
|
||||
} else {
|
||||
Table::<F>::configure(cs, lookup_range, logrows, nl, None)
|
||||
};
|
||||
let table =
|
||||
Table::<F>::configure(cs, lookup_range, logrows, nl, &mut self.shared_table_inputs);
|
||||
self.static_lookups.tables.insert(nl.clone(), table.clone());
|
||||
table
|
||||
} else {
|
||||
@@ -581,9 +499,9 @@ impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> BaseConfig<F> {
|
||||
// this is 0 if the index is the same as the column index (starting from 1)
|
||||
|
||||
let col_expr = sel.clone()
|
||||
* table
|
||||
* (table
|
||||
.selector_constructor
|
||||
.get_expr_at_idx(col_idx, synthetic_sel);
|
||||
.get_expr_at_idx(col_idx, synthetic_sel));
|
||||
|
||||
let multiplier =
|
||||
table.selector_constructor.get_selector_val_at_idx(col_idx);
|
||||
@@ -615,6 +533,40 @@ impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> BaseConfig<F> {
|
||||
res
|
||||
});
|
||||
}
|
||||
|
||||
// add a degree-k custom constraint of the following form to the range check and
|
||||
// static lookup configuration.
|
||||
// 𝑚𝑢𝑙𝑡𝑖𝑠𝑒𝑙 · ∏ (𝑠𝑒𝑙 − 𝑖) = 0 where 𝑠𝑒𝑙 is the synthetic_sel, and the product is over the set of overflowed columns
|
||||
// and 𝑚𝑢𝑙𝑡𝑖𝑠𝑒𝑙 is the selector value at the column index
|
||||
cs.create_gate("range_check_on_sel", |cs| {
|
||||
let synthetic_sel = match len {
|
||||
1 => Expression::Constant(F::from(1)),
|
||||
_ => match index {
|
||||
VarTensor::Advice { inner: advices, .. } => {
|
||||
cs.query_advice(advices[x][y], Rotation(0))
|
||||
}
|
||||
_ => unreachable!(),
|
||||
},
|
||||
};
|
||||
|
||||
let range_check_on_synthetic_sel = match len {
|
||||
1 => Expression::Constant(F::from(0)),
|
||||
_ => {
|
||||
let mut initial_expr = Expression::Constant(F::from(1));
|
||||
for i in 0..len {
|
||||
initial_expr = initial_expr
|
||||
* (synthetic_sel.clone()
|
||||
- Expression::Constant(F::from(i as u64)))
|
||||
}
|
||||
initial_expr
|
||||
}
|
||||
};
|
||||
|
||||
let sel = cs.query_selector(multi_col_selector);
|
||||
|
||||
Constraints::with_selector(sel, vec![range_check_on_synthetic_sel])
|
||||
});
|
||||
|
||||
self.static_lookups
|
||||
.selectors
|
||||
.insert((nl.clone(), x, y), multi_col_selector);
|
||||
@@ -740,8 +692,8 @@ impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> BaseConfig<F> {
|
||||
pub fn configure_shuffles(
|
||||
&mut self,
|
||||
cs: &mut ConstraintSystem<F>,
|
||||
inputs: &[VarTensor; 2],
|
||||
references: &[VarTensor; 2],
|
||||
inputs: &[VarTensor; 3],
|
||||
outputs: &[VarTensor; 3],
|
||||
) -> Result<(), CircuitError>
|
||||
where
|
||||
F: Field,
|
||||
@@ -752,14 +704,14 @@ impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> BaseConfig<F> {
|
||||
}
|
||||
}
|
||||
|
||||
for t in references.iter() {
|
||||
for t in outputs.iter() {
|
||||
if !t.is_advice() || t.num_inner_cols() > 1 {
|
||||
return Err(CircuitError::WrongDynamicColumnType(t.name().to_string()));
|
||||
}
|
||||
}
|
||||
|
||||
// assert all tables have the same number of blocks
|
||||
if references
|
||||
if outputs
|
||||
.iter()
|
||||
.map(|t| t.num_blocks())
|
||||
.collect::<Vec<_>>()
|
||||
@@ -767,23 +719,23 @@ impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> BaseConfig<F> {
|
||||
.any(|w| w[0] != w[1])
|
||||
{
|
||||
return Err(CircuitError::WrongDynamicColumnType(
|
||||
"references inner cols".to_string(),
|
||||
"outputs inner cols".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
let one = Expression::Constant(F::ONE);
|
||||
|
||||
for q in 0..references[0].num_blocks() {
|
||||
let s_reference = cs.complex_selector();
|
||||
for q in 0..outputs[0].num_blocks() {
|
||||
let s_output = cs.complex_selector();
|
||||
|
||||
for x in 0..inputs[0].num_blocks() {
|
||||
for y in 0..inputs[0].num_inner_cols() {
|
||||
let s_input = cs.complex_selector();
|
||||
|
||||
cs.lookup_any("lookup", |cs| {
|
||||
cs.lookup_any("shuffle", |cs| {
|
||||
let s_inputq = cs.query_selector(s_input);
|
||||
let mut expression = vec![];
|
||||
let s_referenceq = cs.query_selector(s_reference);
|
||||
let s_outputq = cs.query_selector(s_output);
|
||||
let mut input_queries = vec![one.clone()];
|
||||
|
||||
for input in inputs {
|
||||
@@ -795,9 +747,9 @@ impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> BaseConfig<F> {
|
||||
});
|
||||
}
|
||||
|
||||
let mut ref_queries = vec![one.clone()];
|
||||
for reference in references {
|
||||
ref_queries.push(match reference {
|
||||
let mut output_queries = vec![one.clone()];
|
||||
for output in outputs {
|
||||
output_queries.push(match output {
|
||||
VarTensor::Advice { inner: advices, .. } => {
|
||||
cs.query_advice(advices[q][0], Rotation(0))
|
||||
}
|
||||
@@ -806,7 +758,7 @@ impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> BaseConfig<F> {
|
||||
}
|
||||
|
||||
let lhs = input_queries.into_iter().map(|c| c * s_inputq.clone());
|
||||
let rhs = ref_queries.into_iter().map(|c| c * s_referenceq.clone());
|
||||
let rhs = output_queries.into_iter().map(|c| c * s_outputq.clone());
|
||||
expression.extend(lhs.zip(rhs));
|
||||
|
||||
expression
|
||||
@@ -817,13 +769,13 @@ impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> BaseConfig<F> {
|
||||
.or_insert(s_input);
|
||||
}
|
||||
}
|
||||
self.shuffles.reference_selectors.push(s_reference);
|
||||
self.shuffles.output_selectors.push(s_output);
|
||||
}
|
||||
|
||||
// if we haven't previously initialized the input/output, do so now
|
||||
if self.shuffles.references.is_empty() {
|
||||
debug!("assigning shuffles reference");
|
||||
self.shuffles.references = references.to_vec();
|
||||
if self.shuffles.outputs.is_empty() {
|
||||
debug!("assigning shuffles output");
|
||||
self.shuffles.outputs = outputs.to_vec();
|
||||
}
|
||||
if self.shuffles.inputs.is_empty() {
|
||||
debug!("assigning shuffles input");
|
||||
@@ -855,7 +807,6 @@ impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> BaseConfig<F> {
|
||||
let range_check = if let std::collections::btree_map::Entry::Vacant(e) =
|
||||
self.range_checks.ranges.entry(range)
|
||||
{
|
||||
// as all tables have the same input we see if there's another table who's input we can reuse
|
||||
let range_check = RangeCheck::<F>::configure(cs, range, logrows);
|
||||
e.insert(range_check.clone());
|
||||
range_check
|
||||
@@ -893,9 +844,9 @@ impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> BaseConfig<F> {
|
||||
let default_x = range_check.get_first_element(col_idx);
|
||||
|
||||
let col_expr = sel.clone()
|
||||
* range_check
|
||||
* (range_check
|
||||
.selector_constructor
|
||||
.get_expr_at_idx(col_idx, synthetic_sel);
|
||||
.get_expr_at_idx(col_idx, synthetic_sel));
|
||||
|
||||
let multiplier = range_check
|
||||
.selector_constructor
|
||||
@@ -918,6 +869,40 @@ impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> BaseConfig<F> {
|
||||
res
|
||||
});
|
||||
}
|
||||
|
||||
// add a degree-k custom constraint of the following form to the range check and
|
||||
// static lookup configuration.
|
||||
// 𝑚𝑢𝑙𝑡𝑖𝑠𝑒𝑙 · ∏ (𝑠𝑒𝑙 − 𝑖) = 0 where 𝑠𝑒𝑙 is the synthetic_sel, and the product is over the set of overflowed columns
|
||||
// and 𝑚𝑢𝑙𝑡𝑖𝑠𝑒𝑙 is the selector value at the column index
|
||||
cs.create_gate("range_check_on_sel", |cs| {
|
||||
let synthetic_sel = match len {
|
||||
1 => Expression::Constant(F::from(1)),
|
||||
_ => match index {
|
||||
VarTensor::Advice { inner: advices, .. } => {
|
||||
cs.query_advice(advices[x][y], Rotation(0))
|
||||
}
|
||||
_ => unreachable!(),
|
||||
},
|
||||
};
|
||||
|
||||
let range_check_on_synthetic_sel = match len {
|
||||
1 => Expression::Constant(F::from(0)),
|
||||
_ => {
|
||||
let mut initial_expr = Expression::Constant(F::from(1));
|
||||
for i in 0..len {
|
||||
initial_expr = initial_expr
|
||||
* (synthetic_sel.clone()
|
||||
- Expression::Constant(F::from(i as u64)))
|
||||
}
|
||||
initial_expr
|
||||
}
|
||||
};
|
||||
|
||||
let sel = cs.query_selector(multi_col_selector);
|
||||
|
||||
Constraints::with_selector(sel, vec![range_check_on_synthetic_sel])
|
||||
});
|
||||
|
||||
self.range_checks
|
||||
.selectors
|
||||
.insert((range, x, y), multi_col_selector);
|
||||
|
||||
@@ -25,7 +25,7 @@ pub enum CircuitError {
|
||||
/// This operation is unsupported
|
||||
#[error("unsupported operation in graph")]
|
||||
UnsupportedOp,
|
||||
///
|
||||
/// Invalid einsum expression
|
||||
#[error("invalid einsum expression")]
|
||||
InvalidEinsum,
|
||||
/// Flush error
|
||||
@@ -100,4 +100,13 @@ pub enum CircuitError {
|
||||
#[error("invalid input type {0}")]
|
||||
/// Invalid input type
|
||||
InvalidInputType(String),
|
||||
#[error("an element is missing from the shuffled version of the tensor")]
|
||||
/// An element is missing from the shuffled version of the tensor
|
||||
MissingShuffleElement,
|
||||
/// Visibility has not been set
|
||||
#[error("visibility has not been set")]
|
||||
UnsetVisibility,
|
||||
/// A decomposition base overflowed
|
||||
#[error("decomposition base overflowed")]
|
||||
DecompositionBaseOverflow,
|
||||
}
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
use super::*;
|
||||
use crate::{
|
||||
circuit::{layouts, utils, Tolerance},
|
||||
fieldutils::integer_rep_to_felt,
|
||||
circuit::{layouts, utils},
|
||||
fieldutils::{integer_rep_to_felt, IntegerRep},
|
||||
graph::multiplier_to_scale,
|
||||
tensor::{self, Tensor, TensorType, ValTensor},
|
||||
tensor::{self, DataFormat, Tensor, TensorType, ValTensor},
|
||||
};
|
||||
use halo2curves::ff::PrimeField;
|
||||
use serde::{Deserialize, Serialize};
|
||||
@@ -57,11 +57,13 @@ pub enum HybridOp {
|
||||
stride: Vec<usize>,
|
||||
kernel_shape: Vec<usize>,
|
||||
normalized: bool,
|
||||
data_format: DataFormat,
|
||||
},
|
||||
MaxPool {
|
||||
padding: Vec<(usize, usize)>,
|
||||
stride: Vec<usize>,
|
||||
pool_dims: Vec<usize>,
|
||||
data_format: DataFormat,
|
||||
},
|
||||
ReduceMin {
|
||||
axes: Vec<usize>,
|
||||
@@ -76,7 +78,9 @@ pub enum HybridOp {
|
||||
output_scale: utils::F32,
|
||||
axes: Vec<usize>,
|
||||
},
|
||||
RangeCheck(Tolerance),
|
||||
Output {
|
||||
decomp: bool,
|
||||
},
|
||||
Greater,
|
||||
GreaterEqual,
|
||||
Less,
|
||||
@@ -151,10 +155,10 @@ impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> Op<F> for Hybrid
|
||||
padding,
|
||||
stride,
|
||||
kernel_shape,
|
||||
normalized,
|
||||
normalized, data_format
|
||||
} => format!(
|
||||
"SUMPOOL (padding={:?}, stride={:?}, kernel_shape={:?}, normalized={})",
|
||||
padding, stride, kernel_shape, normalized
|
||||
"SUMPOOL (padding={:?}, stride={:?}, kernel_shape={:?}, normalized={}, data_format={:?})",
|
||||
padding, stride, kernel_shape, normalized, data_format
|
||||
),
|
||||
HybridOp::ReduceMax { axes } => format!("REDUCEMAX (axes={:?})", axes),
|
||||
HybridOp::ReduceArgMax { dim } => format!("REDUCEARGMAX (dim={})", dim),
|
||||
@@ -162,9 +166,10 @@ impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> Op<F> for Hybrid
|
||||
padding,
|
||||
stride,
|
||||
pool_dims,
|
||||
data_format,
|
||||
} => format!(
|
||||
"MaxPool (padding={:?}, stride={:?}, pool_dims={:?})",
|
||||
padding, stride, pool_dims
|
||||
"MaxPool (padding={:?}, stride={:?}, pool_dims={:?}, data_format={:?})",
|
||||
padding, stride, pool_dims, data_format
|
||||
),
|
||||
HybridOp::ReduceMin { axes } => format!("REDUCEMIN (axes={:?})", axes),
|
||||
HybridOp::ReduceArgMin { dim } => format!("REDUCEARGMIN (dim={})", dim),
|
||||
@@ -178,7 +183,9 @@ impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> Op<F> for Hybrid
|
||||
input_scale, output_scale, axes
|
||||
)
|
||||
}
|
||||
HybridOp::RangeCheck(p) => format!("RANGECHECK (tol={:?})", p),
|
||||
HybridOp::Output { decomp } => {
|
||||
format!("OUTPUT (decomp={})", decomp)
|
||||
}
|
||||
HybridOp::Greater => "GREATER".to_string(),
|
||||
HybridOp::GreaterEqual => "GREATEREQUAL".to_string(),
|
||||
HybridOp::Less => "LESS".to_string(),
|
||||
@@ -234,6 +241,7 @@ impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> Op<F> for Hybrid
|
||||
stride,
|
||||
kernel_shape,
|
||||
normalized,
|
||||
data_format,
|
||||
} => layouts::sumpool(
|
||||
config,
|
||||
region,
|
||||
@@ -242,6 +250,7 @@ impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> Op<F> for Hybrid
|
||||
stride,
|
||||
kernel_shape,
|
||||
*normalized,
|
||||
*data_format,
|
||||
)?,
|
||||
HybridOp::Recip {
|
||||
input_scale,
|
||||
@@ -250,8 +259,8 @@ impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> Op<F> for Hybrid
|
||||
config,
|
||||
region,
|
||||
values[..].try_into()?,
|
||||
integer_rep_to_felt(input_scale.0 as i128),
|
||||
integer_rep_to_felt(output_scale.0 as i128),
|
||||
integer_rep_to_felt(input_scale.0 as IntegerRep),
|
||||
integer_rep_to_felt(output_scale.0 as IntegerRep),
|
||||
)?,
|
||||
HybridOp::Div { denom, .. } => {
|
||||
if denom.0.fract() == 0.0 {
|
||||
@@ -259,7 +268,7 @@ impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> Op<F> for Hybrid
|
||||
config,
|
||||
region,
|
||||
values[..].try_into()?,
|
||||
integer_rep_to_felt(denom.0 as i128),
|
||||
integer_rep_to_felt(denom.0 as IntegerRep),
|
||||
)?
|
||||
} else {
|
||||
layouts::nonlinearity(
|
||||
@@ -282,6 +291,7 @@ impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> Op<F> for Hybrid
|
||||
padding,
|
||||
stride,
|
||||
pool_dims,
|
||||
data_format,
|
||||
} => layouts::max_pool(
|
||||
config,
|
||||
region,
|
||||
@@ -289,6 +299,7 @@ impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> Op<F> for Hybrid
|
||||
padding,
|
||||
stride,
|
||||
pool_dims,
|
||||
*data_format,
|
||||
)?,
|
||||
HybridOp::ReduceMax { axes } => {
|
||||
layouts::max_axes(config, region, values[..].try_into()?, axes)?
|
||||
@@ -314,13 +325,9 @@ impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> Op<F> for Hybrid
|
||||
*output_scale,
|
||||
axes,
|
||||
)?,
|
||||
HybridOp::RangeCheck(tol) => layouts::range_check_percent(
|
||||
config,
|
||||
region,
|
||||
values[..].try_into()?,
|
||||
tol.scale,
|
||||
tol.val,
|
||||
)?,
|
||||
HybridOp::Output { decomp } => {
|
||||
layouts::output(config, region, values[..].try_into()?, *decomp)?
|
||||
}
|
||||
HybridOp::Greater => layouts::greater(config, region, values[..].try_into()?)?,
|
||||
HybridOp::GreaterEqual => {
|
||||
layouts::greater_equal(config, region, values[..].try_into()?)?
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,6 +1,8 @@
|
||||
use std::any::Any;
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
|
||||
use tract_onnx::prelude::DatumType;
|
||||
|
||||
use crate::{
|
||||
graph::quantize_tensor,
|
||||
@@ -96,6 +98,8 @@ pub enum InputType {
|
||||
Int,
|
||||
///
|
||||
TDim,
|
||||
///
|
||||
Unknown,
|
||||
}
|
||||
|
||||
impl InputType {
|
||||
@@ -132,6 +136,7 @@ impl InputType {
|
||||
let int_input = input.clone().to_i64().unwrap();
|
||||
*input = T::from_i64(int_input).unwrap();
|
||||
}
|
||||
InputType::Unknown => {}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -152,6 +157,30 @@ impl std::str::FromStr for InputType {
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
|
||||
impl From<DatumType> for InputType {
|
||||
/// # Panics
|
||||
/// Panics if the datum type is not supported
|
||||
fn from(datum_type: DatumType) -> Self {
|
||||
match datum_type {
|
||||
DatumType::Bool => InputType::Bool,
|
||||
DatumType::F16 => InputType::F16,
|
||||
DatumType::F32 => InputType::F32,
|
||||
DatumType::F64 => InputType::F64,
|
||||
DatumType::I8 => InputType::Int,
|
||||
DatumType::I16 => InputType::Int,
|
||||
DatumType::I32 => InputType::Int,
|
||||
DatumType::I64 => InputType::Int,
|
||||
DatumType::U8 => InputType::Int,
|
||||
DatumType::U16 => InputType::Int,
|
||||
DatumType::U32 => InputType::Int,
|
||||
DatumType::U64 => InputType::Int,
|
||||
DatumType::TDim => InputType::TDim,
|
||||
_ => unimplemented!(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
///
|
||||
#[derive(Clone, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, Serialize, Deserialize)]
|
||||
pub struct Input {
|
||||
@@ -159,6 +188,8 @@ pub struct Input {
|
||||
pub scale: crate::Scale,
|
||||
///
|
||||
pub datum_type: InputType,
|
||||
/// decomp check
|
||||
pub decomp: bool,
|
||||
}
|
||||
|
||||
impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> Op<F> for Input {
|
||||
@@ -196,6 +227,7 @@ impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> Op<F> for Input
|
||||
config,
|
||||
region,
|
||||
values[..].try_into()?,
|
||||
self.decomp,
|
||||
)?)),
|
||||
}
|
||||
} else {
|
||||
@@ -251,20 +283,26 @@ pub struct Constant<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> {
|
||||
///
|
||||
#[serde(skip)]
|
||||
pub pre_assigned_val: Option<ValTensor<F>>,
|
||||
///
|
||||
pub decomp: bool,
|
||||
}
|
||||
|
||||
impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> Constant<F> {
|
||||
///
|
||||
pub fn new(quantized_values: Tensor<F>, raw_values: Tensor<f32>) -> Self {
|
||||
pub fn new(quantized_values: Tensor<F>, raw_values: Tensor<f32>, decomp: bool) -> Self {
|
||||
Self {
|
||||
quantized_values,
|
||||
raw_values,
|
||||
pre_assigned_val: None,
|
||||
decomp,
|
||||
}
|
||||
}
|
||||
/// Rebase the scale of the constant
|
||||
pub fn rebase_scale(&mut self, new_scale: crate::Scale) -> Result<(), CircuitError> {
|
||||
let visibility = self.quantized_values.visibility().unwrap();
|
||||
let visibility = match self.quantized_values.visibility() {
|
||||
Some(v) => v,
|
||||
None => return Err(CircuitError::UnsetVisibility),
|
||||
};
|
||||
self.quantized_values = quantize_tensor(self.raw_values.clone(), new_scale, &visibility)?;
|
||||
Ok(())
|
||||
}
|
||||
@@ -281,13 +319,8 @@ impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> Constant<F> {
|
||||
}
|
||||
|
||||
impl<
|
||||
F: PrimeField
|
||||
+ TensorType
|
||||
+ PartialOrd
|
||||
+ std::hash::Hash
|
||||
+ Serialize
|
||||
+ for<'de> Deserialize<'de>,
|
||||
> Op<F> for Constant<F>
|
||||
F: PrimeField + TensorType + PartialOrd + std::hash::Hash + Serialize + for<'de> Deserialize<'de>,
|
||||
> Op<F> for Constant<F>
|
||||
{
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
self
|
||||
@@ -308,7 +341,12 @@ impl<
|
||||
self.quantized_values.clone().try_into()?
|
||||
};
|
||||
// we gotta constrain it once if its used multiple times
|
||||
Ok(Some(layouts::identity(config, region, &[value])?))
|
||||
Ok(Some(layouts::identity(
|
||||
config,
|
||||
region,
|
||||
&[value],
|
||||
self.decomp,
|
||||
)?))
|
||||
}
|
||||
|
||||
fn clone_dyn(&self) -> Box<dyn Op<F>> {
|
||||
|
||||
@@ -4,6 +4,7 @@ use crate::{
|
||||
utils::{self, F32},
|
||||
},
|
||||
tensor::{self, Tensor, TensorError},
|
||||
tensor::{DataFormat, KernelFormat},
|
||||
};
|
||||
|
||||
use super::{base::BaseOp, *};
|
||||
@@ -43,10 +44,12 @@ pub enum PolyOp {
|
||||
padding: Vec<(usize, usize)>,
|
||||
stride: Vec<usize>,
|
||||
group: usize,
|
||||
data_format: DataFormat,
|
||||
kernel_format: KernelFormat,
|
||||
},
|
||||
Downsample {
|
||||
axis: usize,
|
||||
stride: usize,
|
||||
stride: isize,
|
||||
modulo: usize,
|
||||
},
|
||||
DeConv {
|
||||
@@ -54,6 +57,8 @@ pub enum PolyOp {
|
||||
output_padding: Vec<usize>,
|
||||
stride: Vec<usize>,
|
||||
group: usize,
|
||||
data_format: DataFormat,
|
||||
kernel_format: KernelFormat,
|
||||
},
|
||||
Add,
|
||||
Sub,
|
||||
@@ -103,13 +108,8 @@ pub enum PolyOp {
|
||||
}
|
||||
|
||||
impl<
|
||||
F: PrimeField
|
||||
+ TensorType
|
||||
+ PartialOrd
|
||||
+ std::hash::Hash
|
||||
+ Serialize
|
||||
+ for<'de> Deserialize<'de>,
|
||||
> Op<F> for PolyOp
|
||||
F: PrimeField + TensorType + PartialOrd + std::hash::Hash + Serialize + for<'de> Deserialize<'de>,
|
||||
> Op<F> for PolyOp
|
||||
{
|
||||
/// Returns a reference to the Any trait.
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
@@ -165,10 +165,12 @@ impl<
|
||||
stride,
|
||||
padding,
|
||||
group,
|
||||
data_format,
|
||||
kernel_format,
|
||||
} => {
|
||||
format!(
|
||||
"CONV (stride={:?}, padding={:?}, group={})",
|
||||
stride, padding, group
|
||||
"CONV (stride={:?}, padding={:?}, group={}, data_format={:?}, kernel_format={:?})",
|
||||
stride, padding, group, data_format, kernel_format
|
||||
)
|
||||
}
|
||||
PolyOp::DeConv {
|
||||
@@ -176,10 +178,12 @@ impl<
|
||||
padding,
|
||||
output_padding,
|
||||
group,
|
||||
data_format,
|
||||
kernel_format,
|
||||
} => {
|
||||
format!(
|
||||
"DECONV (stride={:?}, padding={:?}, output_padding={:?}, group={})",
|
||||
stride, padding, output_padding, group
|
||||
"DECONV (stride={:?}, padding={:?}, output_padding={:?}, group={}, data_format={:?}, kernel_format={:?})",
|
||||
stride, padding, output_padding, group, data_format, kernel_format
|
||||
)
|
||||
}
|
||||
PolyOp::Concat { axis } => format!("CONCAT (axis={})", axis),
|
||||
@@ -242,6 +246,8 @@ impl<
|
||||
padding,
|
||||
stride,
|
||||
group,
|
||||
data_format,
|
||||
kernel_format,
|
||||
} => layouts::conv(
|
||||
config,
|
||||
region,
|
||||
@@ -249,9 +255,17 @@ impl<
|
||||
padding,
|
||||
stride,
|
||||
*group,
|
||||
*data_format,
|
||||
*kernel_format,
|
||||
)?,
|
||||
PolyOp::GatherElements { dim, constant_idx } => {
|
||||
if let Some(idx) = constant_idx {
|
||||
if values.len() != 1 {
|
||||
return Err(TensorError::DimError(
|
||||
"GatherElements only accepts single inputs".to_string(),
|
||||
)
|
||||
.into());
|
||||
}
|
||||
tensor::ops::gather_elements(values[0].get_inner_tensor()?, idx, *dim)?.into()
|
||||
} else {
|
||||
layouts::gather_elements(config, region, values[..].try_into()?, *dim)?.0
|
||||
@@ -269,6 +283,12 @@ impl<
|
||||
}
|
||||
PolyOp::ScatterElements { dim, constant_idx } => {
|
||||
if let Some(idx) = constant_idx {
|
||||
if values.len() != 2 {
|
||||
return Err(TensorError::DimError(
|
||||
"ScatterElements requires two inputs".to_string(),
|
||||
)
|
||||
.into());
|
||||
}
|
||||
tensor::ops::scatter(
|
||||
values[0].get_inner_tensor()?,
|
||||
idx,
|
||||
@@ -297,6 +317,8 @@ impl<
|
||||
output_padding,
|
||||
stride,
|
||||
group,
|
||||
data_format,
|
||||
kernel_format,
|
||||
} => layouts::deconv(
|
||||
config,
|
||||
region,
|
||||
@@ -305,13 +327,17 @@ impl<
|
||||
output_padding,
|
||||
stride,
|
||||
*group,
|
||||
*data_format,
|
||||
*kernel_format,
|
||||
)?,
|
||||
PolyOp::Add => layouts::pairwise(config, region, values[..].try_into()?, BaseOp::Add)?,
|
||||
PolyOp::Sub => layouts::pairwise(config, region, values[..].try_into()?, BaseOp::Sub)?,
|
||||
PolyOp::Mult => {
|
||||
layouts::pairwise(config, region, values[..].try_into()?, BaseOp::Mult)?
|
||||
}
|
||||
PolyOp::Identity { .. } => layouts::identity(config, region, values[..].try_into()?)?,
|
||||
PolyOp::Identity { .. } => {
|
||||
layouts::identity(config, region, values[..].try_into()?, false)?
|
||||
}
|
||||
PolyOp::Reshape(d) | PolyOp::Flatten(d) => layouts::reshape(values[..].try_into()?, d)?,
|
||||
PolyOp::Pad(p) => {
|
||||
if values.len() != 1 {
|
||||
|
||||
@@ -132,21 +132,16 @@ impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> Table<F> {
|
||||
(first_element, op_f.output[0])
|
||||
}
|
||||
|
||||
///
|
||||
/// calculates the column size given the number of rows and reserved blinding rows
|
||||
pub fn cal_col_size(logrows: usize, reserved_blinding_rows: usize) -> usize {
|
||||
2usize.pow(logrows as u32) - reserved_blinding_rows
|
||||
}
|
||||
|
||||
///
|
||||
pub fn cal_bit_range(bits: usize, reserved_blinding_rows: usize) -> usize {
|
||||
2usize.pow(bits as u32) - reserved_blinding_rows
|
||||
}
|
||||
}
|
||||
|
||||
///
|
||||
pub fn num_cols_required(range_len: IntegerRep, col_size: usize) -> usize {
|
||||
// number of cols needed to store the range
|
||||
(range_len / (col_size as IntegerRep)) as usize + 1
|
||||
(range_len / col_size as IntegerRep) as usize + 1
|
||||
}
|
||||
|
||||
impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> Table<F> {
|
||||
@@ -168,7 +163,7 @@ impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> Table<F> {
|
||||
range: Range,
|
||||
logrows: usize,
|
||||
nonlinearity: &LookupOp,
|
||||
preexisting_inputs: Option<Vec<TableColumn>>,
|
||||
preexisting_inputs: &mut Vec<TableColumn>,
|
||||
) -> Table<F> {
|
||||
let factors = cs.blinding_factors() + RESERVED_BLINDING_ROWS_PAD;
|
||||
let col_size = Self::cal_col_size(logrows, factors);
|
||||
@@ -177,28 +172,28 @@ impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> Table<F> {
|
||||
|
||||
debug!("table range: {:?}", range);
|
||||
|
||||
let table_inputs = preexisting_inputs.unwrap_or_else(|| {
|
||||
let mut cols = vec![];
|
||||
for _ in 0..num_cols {
|
||||
cols.push(cs.lookup_table_column());
|
||||
// validate enough columns are provided to store the range
|
||||
if preexisting_inputs.len() < num_cols {
|
||||
// add columns to match the required number of columns
|
||||
let diff = num_cols - preexisting_inputs.len();
|
||||
for _ in 0..diff {
|
||||
preexisting_inputs.push(cs.lookup_table_column());
|
||||
}
|
||||
cols
|
||||
});
|
||||
|
||||
let num_cols = table_inputs.len();
|
||||
}
|
||||
|
||||
let num_cols = preexisting_inputs.len();
|
||||
if num_cols > 1 {
|
||||
warn!("Using {} columns for non-linearity table.", num_cols);
|
||||
}
|
||||
|
||||
let table_outputs = table_inputs
|
||||
let table_outputs = preexisting_inputs
|
||||
.iter()
|
||||
.map(|_| cs.lookup_table_column())
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
Table {
|
||||
nonlinearity: nonlinearity.clone(),
|
||||
table_inputs,
|
||||
table_inputs: preexisting_inputs.clone(),
|
||||
table_outputs,
|
||||
is_assigned: false,
|
||||
selector_constructor: SelectorConstructor::new(num_cols),
|
||||
@@ -355,16 +350,11 @@ impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> RangeCheck<F> {
|
||||
integer_rep_to_felt(chunk * (self.col_size as IntegerRep) + self.range.0)
|
||||
}
|
||||
|
||||
///
|
||||
/// calculates the column size
|
||||
pub fn cal_col_size(logrows: usize, reserved_blinding_rows: usize) -> usize {
|
||||
2usize.pow(logrows as u32) - reserved_blinding_rows
|
||||
}
|
||||
|
||||
///
|
||||
pub fn cal_bit_range(bits: usize, reserved_blinding_rows: usize) -> usize {
|
||||
2usize.pow(bits as u32) - reserved_blinding_rows
|
||||
}
|
||||
|
||||
/// get column index given input
|
||||
pub fn get_col_index(&self, input: F) -> F {
|
||||
// range is split up into chunks of size col_size, find the chunk that input is in
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
use crate::circuit::ops::poly::PolyOp;
|
||||
use crate::circuit::*;
|
||||
use crate::tensor::{DataFormat, KernelFormat};
|
||||
use crate::tensor::{Tensor, TensorType, ValTensor, VarTensor};
|
||||
use halo2_proofs::{
|
||||
circuit::{Layouter, SimpleFloorPlanner, Value},
|
||||
@@ -1040,6 +1041,10 @@ mod conv {
|
||||
let a = VarTensor::new_advice(cs, K, 1, (LEN + 1) * LEN);
|
||||
let b = VarTensor::new_advice(cs, K, 1, (LEN + 1) * LEN);
|
||||
let output = VarTensor::new_advice(cs, K, 1, (LEN + 1) * LEN);
|
||||
|
||||
// column for constants
|
||||
let _constant = VarTensor::constant_cols(cs, K, 8, false);
|
||||
|
||||
Self::Config::configure(cs, &[a, b], &output, CheckMode::SAFE)
|
||||
}
|
||||
|
||||
@@ -1061,6 +1066,8 @@ mod conv {
|
||||
padding: vec![(1, 1); 2],
|
||||
stride: vec![2; 2],
|
||||
group: 1,
|
||||
data_format: DataFormat::default(),
|
||||
kernel_format: KernelFormat::default(),
|
||||
}),
|
||||
)
|
||||
.map_err(|_| Error::Synthesis)
|
||||
@@ -1171,7 +1178,7 @@ mod conv_col_ultra_overflow {
|
||||
|
||||
use super::*;
|
||||
|
||||
const K: usize = 4;
|
||||
const K: usize = 6;
|
||||
const LEN: usize = 10;
|
||||
|
||||
#[derive(Clone)]
|
||||
@@ -1191,9 +1198,10 @@ mod conv_col_ultra_overflow {
|
||||
}
|
||||
|
||||
fn configure(cs: &mut ConstraintSystem<F>) -> Self::Config {
|
||||
let a = VarTensor::new_advice(cs, K, 1, LEN * LEN * LEN);
|
||||
let b = VarTensor::new_advice(cs, K, 1, LEN * LEN * LEN);
|
||||
let output = VarTensor::new_advice(cs, K, 1, LEN * LEN * LEN);
|
||||
let a = VarTensor::new_advice(cs, K, 1, LEN * LEN * LEN * LEN);
|
||||
let b = VarTensor::new_advice(cs, K, 1, LEN * LEN * LEN * LEN);
|
||||
let output = VarTensor::new_advice(cs, K, 1, LEN * LEN * LEN * LEN);
|
||||
let _constant = VarTensor::constant_cols(cs, K, LEN * LEN * LEN * LEN, false);
|
||||
Self::Config::configure(cs, &[a, b], &output, CheckMode::SAFE)
|
||||
}
|
||||
|
||||
@@ -1215,6 +1223,8 @@ mod conv_col_ultra_overflow {
|
||||
padding: vec![(1, 1); 2],
|
||||
stride: vec![2; 2],
|
||||
group: 1,
|
||||
data_format: DataFormat::default(),
|
||||
kernel_format: KernelFormat::default(),
|
||||
}),
|
||||
)
|
||||
.map_err(|_| Error::Synthesis)
|
||||
@@ -1372,6 +1382,8 @@ mod conv_relu_col_ultra_overflow {
|
||||
padding: vec![(1, 1); 2],
|
||||
stride: vec![2; 2],
|
||||
group: 1,
|
||||
data_format: DataFormat::default(),
|
||||
kernel_format: KernelFormat::default(),
|
||||
}),
|
||||
)
|
||||
.map_err(|_| Error::Synthesis);
|
||||
@@ -1776,13 +1788,18 @@ mod shuffle {
|
||||
|
||||
let d = VarTensor::new_advice(cs, K, 1, LEN);
|
||||
let e = VarTensor::new_advice(cs, K, 1, LEN);
|
||||
let f: VarTensor = VarTensor::new_advice(cs, K, 1, LEN);
|
||||
|
||||
let _constant = VarTensor::constant_cols(cs, K, LEN * NUM_LOOP, false);
|
||||
|
||||
let mut config =
|
||||
Self::Config::configure(cs, &[a.clone(), b.clone()], &c, CheckMode::SAFE);
|
||||
config
|
||||
.configure_shuffles(cs, &[a.clone(), b.clone()], &[d.clone(), e.clone()])
|
||||
.configure_shuffles(
|
||||
cs,
|
||||
&[a.clone(), b.clone(), c.clone()],
|
||||
&[d.clone(), e.clone(), f.clone()],
|
||||
)
|
||||
.unwrap();
|
||||
config
|
||||
}
|
||||
@@ -1803,6 +1820,7 @@ mod shuffle {
|
||||
&mut region,
|
||||
&self.inputs[i],
|
||||
&self.references[i],
|
||||
layouts::SortCollisionMode::Unsorted,
|
||||
)
|
||||
.map_err(|_| Error::Synthesis)?;
|
||||
}
|
||||
@@ -1988,7 +2006,7 @@ mod add_with_overflow_and_poseidon {
|
||||
let base = BaseConfig::configure(cs, &[a, b], &output, CheckMode::SAFE);
|
||||
VarTensor::constant_cols(cs, K, 2, false);
|
||||
|
||||
let poseidon = PoseidonChip::<PoseidonSpec, WIDTH, RATE, WIDTH>::configure(cs, ());
|
||||
let poseidon = PoseidonChip::<PoseidonSpec, WIDTH, RATE>::configure(cs, ());
|
||||
|
||||
MyCircuitConfig { base, poseidon }
|
||||
}
|
||||
@@ -1998,7 +2016,7 @@ mod add_with_overflow_and_poseidon {
|
||||
mut config: Self::Config,
|
||||
mut layouter: impl Layouter<Fr>,
|
||||
) -> Result<(), Error> {
|
||||
let poseidon_chip: PoseidonChip<PoseidonSpec, WIDTH, RATE, WIDTH> =
|
||||
let poseidon_chip: PoseidonChip<PoseidonSpec, WIDTH, RATE> =
|
||||
PoseidonChip::new(config.poseidon.clone());
|
||||
|
||||
let assigned_inputs_a =
|
||||
@@ -2033,11 +2051,9 @@ mod add_with_overflow_and_poseidon {
|
||||
let b = (0..LEN)
|
||||
.map(|i| halo2curves::bn256::Fr::from(i as u64 + 1))
|
||||
.collect::<Vec<_>>();
|
||||
let commitment_a =
|
||||
PoseidonChip::<PoseidonSpec, WIDTH, RATE, WIDTH>::run(a.clone()).unwrap()[0][0];
|
||||
let commitment_a = PoseidonChip::<PoseidonSpec, WIDTH, RATE>::run(a.clone()).unwrap()[0][0];
|
||||
|
||||
let commitment_b =
|
||||
PoseidonChip::<PoseidonSpec, WIDTH, RATE, WIDTH>::run(b.clone()).unwrap()[0][0];
|
||||
let commitment_b = PoseidonChip::<PoseidonSpec, WIDTH, RATE>::run(b.clone()).unwrap()[0][0];
|
||||
|
||||
// parameters
|
||||
let a = Tensor::from(a.into_iter().map(Value::known));
|
||||
@@ -2059,13 +2075,11 @@ mod add_with_overflow_and_poseidon {
|
||||
let b = (0..LEN)
|
||||
.map(|i| halo2curves::bn256::Fr::from(i as u64 + 1))
|
||||
.collect::<Vec<_>>();
|
||||
let commitment_a = PoseidonChip::<PoseidonSpec, WIDTH, RATE, WIDTH>::run(a.clone())
|
||||
.unwrap()[0][0]
|
||||
+ Fr::one();
|
||||
let commitment_a =
|
||||
PoseidonChip::<PoseidonSpec, WIDTH, RATE>::run(a.clone()).unwrap()[0][0] + Fr::one();
|
||||
|
||||
let commitment_b = PoseidonChip::<PoseidonSpec, WIDTH, RATE, WIDTH>::run(b.clone())
|
||||
.unwrap()[0][0]
|
||||
+ Fr::one();
|
||||
let commitment_b =
|
||||
PoseidonChip::<PoseidonSpec, WIDTH, RATE>::run(b.clone()).unwrap()[0][0] + Fr::one();
|
||||
|
||||
// parameters
|
||||
let a = Tensor::from(a.into_iter().map(Value::known));
|
||||
|
||||
110
src/commands.rs
110
src/commands.rs
@@ -1,6 +1,6 @@
|
||||
use alloy::primitives::Address as H160;
|
||||
use clap::{Command, Parser, Subcommand};
|
||||
use clap_complete::{generate, Generator, Shell};
|
||||
use clap_complete::{Generator, Shell, generate};
|
||||
#[cfg(feature = "python-bindings")]
|
||||
use pyo3::{conversion::FromPyObject, exceptions::PyValueError, prelude::*};
|
||||
use serde::{Deserialize, Serialize};
|
||||
@@ -8,7 +8,7 @@ use std::path::PathBuf;
|
||||
use std::str::FromStr;
|
||||
use tosubcommand::{ToFlags, ToSubcommand};
|
||||
|
||||
use crate::{pfsys::ProofType, Commitments, RunArgs};
|
||||
use crate::{Commitments, RunArgs, pfsys::ProofType};
|
||||
|
||||
use crate::circuit::CheckMode;
|
||||
use crate::graph::TestDataSource;
|
||||
@@ -83,7 +83,7 @@ pub const DEFAULT_VK_SOL: &str = "vk.sol";
|
||||
/// Default VK abi path
|
||||
pub const DEFAULT_VK_ABI: &str = "vk.abi";
|
||||
/// Default scale rebase multipliers for calibration
|
||||
pub const DEFAULT_SCALE_REBASE_MULTIPLIERS: &str = "1,2,10";
|
||||
pub const DEFAULT_SCALE_REBASE_MULTIPLIERS: &str = "1,10";
|
||||
/// Default use reduced srs for verification
|
||||
pub const DEFAULT_USE_REDUCED_SRS_FOR_VERIFICATION: &str = "false";
|
||||
/// Default only check for range check rebase
|
||||
@@ -360,8 +360,13 @@ pub fn get_styles() -> clap::builder::Styles {
|
||||
}
|
||||
|
||||
/// Print completions for the given generator
|
||||
pub fn print_completions<G: Generator>(gen: G, cmd: &mut Command) {
|
||||
generate(gen, cmd, cmd.get_name().to_string(), &mut std::io::stdout());
|
||||
pub fn print_completions<G: Generator>(r#gen: G, cmd: &mut Command) {
|
||||
generate(
|
||||
r#gen,
|
||||
cmd,
|
||||
cmd.get_name().to_string(),
|
||||
&mut std::io::stdout(),
|
||||
);
|
||||
}
|
||||
|
||||
#[allow(missing_docs)]
|
||||
@@ -377,6 +382,44 @@ pub struct Cli {
|
||||
pub command: Option<Commands>,
|
||||
}
|
||||
|
||||
/// Custom parser for data field that handles both direct JSON strings and file paths with '@' prefix
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, PartialOrd)]
|
||||
pub struct DataField(pub String);
|
||||
|
||||
impl FromStr for DataField {
|
||||
type Err = String;
|
||||
|
||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||
// Check if the input starts with '@'
|
||||
if s.starts_with('@') {
|
||||
// Extract the file path (remove the '@' prefix)
|
||||
let file_path = &s[1..];
|
||||
|
||||
// Read the file content
|
||||
let content = std::fs::read_to_string(file_path)
|
||||
.map_err(|e| format!("Failed to read data file '{}': {}", file_path, e))?;
|
||||
|
||||
// Return the file content as the data field value
|
||||
Ok(DataField(content))
|
||||
} else {
|
||||
// Use the input string directly
|
||||
Ok(DataField(s.to_string()))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl ToFlags for DataField {
|
||||
fn to_flags(&self) -> Vec<String> {
|
||||
vec![self.0.clone()]
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Display for DataField {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "{}", self.0)
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(missing_docs)]
|
||||
#[derive(Debug, Subcommand, Clone, Deserialize, Serialize, PartialEq, PartialOrd, ToSubcommand)]
|
||||
pub enum Commands {
|
||||
@@ -395,9 +438,9 @@ pub enum Commands {
|
||||
|
||||
/// Generates the witness from an input file.
|
||||
GenWitness {
|
||||
/// The path to the .json data file
|
||||
#[arg(short = 'D', long, default_value = DEFAULT_DATA, value_hint = clap::ValueHint::FilePath)]
|
||||
data: Option<PathBuf>,
|
||||
/// The path to the .json data file (with @ prefix) or a raw data string of the form '{"input_data": [[1, 2, 3]]}'
|
||||
#[arg(short = 'D', long, default_value = DEFAULT_DATA, value_parser = DataField::from_str)]
|
||||
data: Option<DataField>,
|
||||
/// The path to the compiled model file (generated using the compile-circuit command)
|
||||
#[arg(short = 'M', long, default_value = DEFAULT_COMPILED_CIRCUIT, value_hint = clap::ValueHint::FilePath)]
|
||||
compiled_circuit: Option<PathBuf>,
|
||||
@@ -438,12 +481,18 @@ pub enum Commands {
|
||||
/// random seed for reproducibility (optional)
|
||||
#[arg(long, value_hint = clap::ValueHint::Other, default_value = DEFAULT_SEED)]
|
||||
seed: u64,
|
||||
/// min value for random data
|
||||
#[arg(long, value_hint = clap::ValueHint::Other)]
|
||||
min: Option<f32>,
|
||||
/// max value for random data
|
||||
#[arg(long, value_hint = clap::ValueHint::Other)]
|
||||
max: Option<f32>,
|
||||
},
|
||||
/// Calibrates the proving scale, lookup bits and logrows from a circuit settings file.
|
||||
CalibrateSettings {
|
||||
/// The path to the .json calibration data file.
|
||||
#[arg(short = 'D', long, default_value = DEFAULT_CALIBRATION_FILE, value_hint = clap::ValueHint::FilePath)]
|
||||
data: Option<PathBuf>,
|
||||
data: Option<String>,
|
||||
/// The path to the .onnx model file
|
||||
#[arg(short = 'M', long, default_value = DEFAULT_MODEL, value_hint = clap::ValueHint::FilePath)]
|
||||
model: Option<PathBuf>,
|
||||
@@ -627,7 +676,7 @@ pub enum Commands {
|
||||
SetupTestEvmData {
|
||||
/// The path to the .json data file, which should include both the network input (possibly private) and the network output (public input to the proof)
|
||||
#[arg(short = 'D', long, value_hint = clap::ValueHint::FilePath)]
|
||||
data: Option<PathBuf>,
|
||||
data: Option<String>,
|
||||
/// The path to the compiled model file (generated using the compile-circuit command)
|
||||
#[arg(short = 'M', long, value_hint = clap::ValueHint::FilePath)]
|
||||
compiled_circuit: Option<PathBuf>,
|
||||
@@ -636,9 +685,9 @@ pub enum Commands {
|
||||
/// Should include both the network input (possibly private) and the network output (public input to the proof)
|
||||
#[arg(short = 'T', long, value_hint = clap::ValueHint::FilePath)]
|
||||
test_data: PathBuf,
|
||||
/// RPC URL for an Ethereum node, if None will use Anvil but WON'T persist state
|
||||
/// RPC URL for an Ethereum node
|
||||
#[arg(short = 'U', long, value_hint = clap::ValueHint::Url)]
|
||||
rpc_url: Option<String>,
|
||||
rpc_url: String,
|
||||
/// where the input data come from
|
||||
#[arg(long, default_value = "on-chain", value_hint = clap::ValueHint::Other)]
|
||||
input_source: TestDataSource,
|
||||
@@ -646,19 +695,6 @@ pub enum Commands {
|
||||
#[arg(long, default_value = "on-chain", value_hint = clap::ValueHint::Other)]
|
||||
output_source: TestDataSource,
|
||||
},
|
||||
/// The Data Attestation Verifier contract stores the account calls to fetch data to feed into ezkl. This call data can be updated by an admin account. This tests that admin account is able to update this call data.
|
||||
#[command(arg_required_else_help = true)]
|
||||
TestUpdateAccountCalls {
|
||||
/// The path to the verifier contract's address
|
||||
#[arg(long, value_hint = clap::ValueHint::Other)]
|
||||
addr: H160Flag,
|
||||
/// The path to the .json data file.
|
||||
#[arg(short = 'D', long, value_hint = clap::ValueHint::FilePath)]
|
||||
data: Option<PathBuf>,
|
||||
/// RPC URL for an Ethereum node, if None will use Anvil but WON'T persist state
|
||||
#[arg(short = 'U', long, value_hint = clap::ValueHint::Url)]
|
||||
rpc_url: Option<String>,
|
||||
},
|
||||
/// Swaps the positions in the transcript that correspond to commitments
|
||||
SwapProofCommitments {
|
||||
/// The path to the proof file
|
||||
@@ -736,7 +772,7 @@ pub enum Commands {
|
||||
},
|
||||
/// Creates an Evm verifier artifact for a single proof to be used by the reusable verifier
|
||||
#[command(name = "create-evm-vka")]
|
||||
CreateEvmVKArtifact {
|
||||
CreateEvmVka {
|
||||
/// The path to SRS, if None will use ~/.ezkl/srs/kzg{logrows}.srs
|
||||
#[arg(long, value_hint = clap::ValueHint::FilePath)]
|
||||
srs_path: Option<PathBuf>,
|
||||
@@ -755,7 +791,7 @@ pub enum Commands {
|
||||
},
|
||||
/// Creates an Evm verifier that attests to on-chain inputs for a single proof
|
||||
#[command(name = "create-evm-da")]
|
||||
CreateEvmDataAttestation {
|
||||
CreateEvmDa {
|
||||
/// The path to load circuit settings .json file from (generated using the gen-settings command)
|
||||
#[arg(short = 'S', long, default_value = DEFAULT_SETTINGS, value_hint = clap::ValueHint::FilePath)]
|
||||
settings_path: Option<PathBuf>,
|
||||
@@ -771,7 +807,7 @@ pub enum Commands {
|
||||
/// view functions that return the data that the network
|
||||
/// ingests as inputs.
|
||||
#[arg(short = 'D', long, default_value = DEFAULT_DATA, value_hint = clap::ValueHint::FilePath)]
|
||||
data: Option<PathBuf>,
|
||||
data: Option<String>,
|
||||
/// The path to the witness file. This is needed for proof swapping for kzg commitments.
|
||||
#[arg(short = 'W', long, default_value = DEFAULT_WITNESS, value_hint = clap::ValueHint::FilePath)]
|
||||
witness: Option<PathBuf>,
|
||||
@@ -846,9 +882,9 @@ pub enum Commands {
|
||||
/// The path to the Solidity code (generated using the create-evm-verifier command)
|
||||
#[arg(long, default_value = DEFAULT_SOL_CODE, value_hint = clap::ValueHint::FilePath)]
|
||||
sol_code_path: Option<PathBuf>,
|
||||
/// RPC URL for an Ethereum node, if None will use Anvil but WON'T persist state
|
||||
#[arg(short = 'U', long, value_hint = clap::ValueHint::Url)]
|
||||
rpc_url: Option<String>,
|
||||
/// RPC URL for an Ethereum node
|
||||
#[arg(short = 'U', long, default_value = DEFAULT_CONTRACT_ADDRESS, value_hint = clap::ValueHint::Url)]
|
||||
rpc_url: String,
|
||||
#[arg(long, default_value = DEFAULT_CONTRACT_ADDRESS, value_hint = clap::ValueHint::Other)]
|
||||
/// The path to output the contract address
|
||||
addr_path: Option<PathBuf>,
|
||||
@@ -864,19 +900,19 @@ pub enum Commands {
|
||||
},
|
||||
/// Deploys an evm verifier that allows for data attestation
|
||||
#[command(name = "deploy-evm-da")]
|
||||
DeployEvmDataAttestation {
|
||||
DeployEvmDa {
|
||||
/// The path to the .json data file, which should include both the network input (possibly private) and the network output (public input to the proof)
|
||||
#[arg(short = 'D', long, default_value = DEFAULT_DATA, value_hint = clap::ValueHint::FilePath)]
|
||||
data: Option<PathBuf>,
|
||||
data: Option<String>,
|
||||
/// The path to load circuit settings .json file from (generated using the gen-settings command)
|
||||
#[arg(long, default_value = DEFAULT_SETTINGS, value_hint = clap::ValueHint::FilePath)]
|
||||
settings_path: Option<PathBuf>,
|
||||
/// The path to the Solidity code
|
||||
#[arg(long, default_value = DEFAULT_SOL_CODE_DA, value_hint = clap::ValueHint::FilePath)]
|
||||
sol_code_path: Option<PathBuf>,
|
||||
/// RPC URL for an Ethereum node, if None will use Anvil but WON'T persist state
|
||||
/// RPC URL for an Ethereum node
|
||||
#[arg(short = 'U', long, value_hint = clap::ValueHint::Url)]
|
||||
rpc_url: Option<String>,
|
||||
rpc_url: String,
|
||||
#[arg(long, default_value = DEFAULT_CONTRACT_ADDRESS_DA, value_hint = clap::ValueHint::FilePath)]
|
||||
/// The path to output the contract address
|
||||
addr_path: Option<PathBuf>,
|
||||
@@ -896,9 +932,9 @@ pub enum Commands {
|
||||
/// The path to verifier contract's address
|
||||
#[arg(long, default_value = DEFAULT_CONTRACT_ADDRESS, value_hint = clap::ValueHint::Other)]
|
||||
addr_verifier: H160Flag,
|
||||
/// RPC URL for an Ethereum node, if None will use Anvil but WON'T persist state
|
||||
/// RPC URL for an Ethereum node
|
||||
#[arg(short = 'U', long, value_hint = clap::ValueHint::Url)]
|
||||
rpc_url: Option<String>,
|
||||
rpc_url: String,
|
||||
/// does the verifier use data attestation ?
|
||||
#[arg(long, value_hint = clap::ValueHint::Other)]
|
||||
addr_da: Option<H160Flag>,
|
||||
|
||||
719
src/eth.rs
719
src/eth.rs
File diff suppressed because one or more lines are too long
187
src/execute.rs
187
src/execute.rs
@@ -1,32 +1,30 @@
|
||||
use crate::circuit::region::RegionSettings;
|
||||
use crate::EZKL_BUF_CAPACITY;
|
||||
use crate::circuit::CheckMode;
|
||||
use crate::circuit::region::RegionSettings;
|
||||
use crate::commands::CalibrationTarget;
|
||||
use crate::eth::{
|
||||
deploy_contract_via_solidity, deploy_da_verifier_via_solidity, fix_da_multi_sol,
|
||||
fix_da_single_sol,
|
||||
};
|
||||
use crate::eth::{deploy_contract_via_solidity, deploy_da_verifier_via_solidity, fix_da_sol};
|
||||
#[allow(unused_imports)]
|
||||
use crate::eth::{get_contract_artifacts, verify_proof_via_solidity};
|
||||
use crate::graph::input::{Calls, GraphData};
|
||||
use crate::graph::input::GraphData;
|
||||
use crate::graph::{GraphCircuit, GraphSettings, GraphWitness, Model};
|
||||
use crate::graph::{TestDataSource, TestSources};
|
||||
use crate::pfsys::evm::aggregation_kzg::{AggregationCircuit, PoseidonTranscript};
|
||||
use crate::pfsys::{
|
||||
create_keys, load_pk, load_vk, save_params, save_pk, Snark, StrategyType, TranscriptType,
|
||||
ProofSplitCommit, create_proof_circuit, swap_proof_commitments_polycommit, verify_proof_circuit,
|
||||
};
|
||||
use crate::pfsys::{
|
||||
create_proof_circuit, swap_proof_commitments_polycommit, verify_proof_circuit, ProofSplitCommit,
|
||||
Snark, StrategyType, TranscriptType, create_keys, load_pk, load_vk, save_params, save_pk,
|
||||
};
|
||||
use crate::pfsys::{save_vk, srs::*};
|
||||
use crate::tensor::TensorError;
|
||||
use crate::EZKL_BUF_CAPACITY;
|
||||
use crate::{commands::*, EZKLError};
|
||||
use crate::{Commitments, RunArgs};
|
||||
use crate::{EZKLError, commands::*};
|
||||
use colored::Colorize;
|
||||
#[cfg(unix)]
|
||||
use gag::Gag;
|
||||
use halo2_proofs::dev::VerifyFailure;
|
||||
use halo2_proofs::plonk::{self, Circuit};
|
||||
use halo2_proofs::poly::VerificationStrategy;
|
||||
use halo2_proofs::poly::commitment::{CommitmentScheme, Params};
|
||||
use halo2_proofs::poly::commitment::{ParamsProver, Verifier};
|
||||
use halo2_proofs::poly::ipa::commitment::{IPACommitmentScheme, ParamsIPA};
|
||||
@@ -39,7 +37,6 @@ use halo2_proofs::poly::kzg::strategy::AccumulatorStrategy as KZGAccumulatorStra
|
||||
use halo2_proofs::poly::kzg::{
|
||||
commitment::ParamsKZG, strategy::SingleStrategy as KZGSingleStrategy,
|
||||
};
|
||||
use halo2_proofs::poly::VerificationStrategy;
|
||||
use halo2_proofs::transcript::{EncodedChallenge, TranscriptReadBuffer};
|
||||
use halo2_solidity_verifier;
|
||||
use halo2curves::bn256::{Bn256, Fr, G1Affine};
|
||||
@@ -48,14 +45,15 @@ use halo2curves::serde::SerdeObject;
|
||||
use indicatif::{ProgressBar, ProgressStyle};
|
||||
use instant::Instant;
|
||||
use itertools::Itertools;
|
||||
use lazy_static::lazy_static;
|
||||
use log::debug;
|
||||
use log::{info, trace, warn};
|
||||
use serde::de::DeserializeOwned;
|
||||
use serde::Serialize;
|
||||
use serde::de::DeserializeOwned;
|
||||
use snark_verifier::loader::native::NativeLoader;
|
||||
use snark_verifier::system::halo2::Config;
|
||||
use snark_verifier::system::halo2::compile;
|
||||
use snark_verifier::system::halo2::transcript::evm::EvmTranscript;
|
||||
use snark_verifier::system::halo2::Config;
|
||||
use std::fs::File;
|
||||
use std::io::BufWriter;
|
||||
use std::io::{Cursor, Write};
|
||||
@@ -68,8 +66,6 @@ use thiserror::Error;
|
||||
use tract_onnx::prelude::IntoTensor;
|
||||
use tract_onnx::prelude::Tensor as TractTensor;
|
||||
|
||||
use lazy_static::lazy_static;
|
||||
|
||||
lazy_static! {
|
||||
#[derive(Debug)]
|
||||
/// The path to the ezkl related data.
|
||||
@@ -118,7 +114,7 @@ pub async fn run(command: Commands) -> Result<String, EZKLError> {
|
||||
} => gen_srs_cmd(
|
||||
srs_path,
|
||||
logrows as u32,
|
||||
commitment.unwrap_or(Commitments::from_str(DEFAULT_COMMITMENT).unwrap()),
|
||||
commitment.unwrap_or_else(|| Commitments::from_str(DEFAULT_COMMITMENT).unwrap()),
|
||||
),
|
||||
Commands::GetSrs {
|
||||
srs_path,
|
||||
@@ -141,11 +137,15 @@ pub async fn run(command: Commands) -> Result<String, EZKLError> {
|
||||
data,
|
||||
variables,
|
||||
seed,
|
||||
min,
|
||||
max,
|
||||
} => gen_random_data(
|
||||
model.unwrap_or(DEFAULT_MODEL.into()),
|
||||
data.unwrap_or(DEFAULT_DATA.into()),
|
||||
variables,
|
||||
seed,
|
||||
min,
|
||||
max,
|
||||
),
|
||||
Commands::CalibrateSettings {
|
||||
model,
|
||||
@@ -176,7 +176,7 @@ pub async fn run(command: Commands) -> Result<String, EZKLError> {
|
||||
srs_path,
|
||||
} => gen_witness(
|
||||
compiled_circuit.unwrap_or(DEFAULT_COMPILED_CIRCUIT.into()),
|
||||
data.unwrap_or(DEFAULT_DATA.into()),
|
||||
data.unwrap_or(DataField(DEFAULT_DATA.into())).to_string(),
|
||||
Some(output.unwrap_or(DEFAULT_WITNESS.into())),
|
||||
vk_path,
|
||||
srs_path,
|
||||
@@ -215,8 +215,7 @@ pub async fn run(command: Commands) -> Result<String, EZKLError> {
|
||||
addr_vk,
|
||||
)
|
||||
.map(|e| serde_json::to_string(&e).unwrap()),
|
||||
|
||||
Commands::CreateEvmVKArtifact {
|
||||
Commands::CreateEvmVka {
|
||||
vk_path,
|
||||
srs_path,
|
||||
settings_path,
|
||||
@@ -232,7 +231,7 @@ pub async fn run(command: Commands) -> Result<String, EZKLError> {
|
||||
)
|
||||
.await
|
||||
}
|
||||
Commands::CreateEvmDataAttestation {
|
||||
Commands::CreateEvmDa {
|
||||
settings_path,
|
||||
sol_code_path,
|
||||
abi_path,
|
||||
@@ -301,7 +300,7 @@ pub async fn run(command: Commands) -> Result<String, EZKLError> {
|
||||
input_source,
|
||||
output_source,
|
||||
} => {
|
||||
setup_test_evm_witness(
|
||||
setup_test_evm_data(
|
||||
data.unwrap_or(DEFAULT_DATA.into()),
|
||||
compiled_circuit.unwrap_or(DEFAULT_COMPILED_CIRCUIT.into()),
|
||||
test_data,
|
||||
@@ -311,11 +310,6 @@ pub async fn run(command: Commands) -> Result<String, EZKLError> {
|
||||
)
|
||||
.await
|
||||
}
|
||||
Commands::TestUpdateAccountCalls {
|
||||
addr,
|
||||
data,
|
||||
rpc_url,
|
||||
} => test_update_account_calls(addr, data.unwrap_or(DEFAULT_DATA.into()), rpc_url).await,
|
||||
Commands::SwapProofCommitments {
|
||||
proof_path,
|
||||
witness_path,
|
||||
@@ -442,7 +436,7 @@ pub async fn run(command: Commands) -> Result<String, EZKLError> {
|
||||
)
|
||||
.await
|
||||
}
|
||||
Commands::DeployEvmDataAttestation {
|
||||
Commands::DeployEvmDa {
|
||||
data,
|
||||
settings_path,
|
||||
sol_code_path,
|
||||
@@ -516,7 +510,9 @@ fn update_ezkl_binary(version: &Option<String>) -> Result<String, EZKLError> {
|
||||
.status()
|
||||
.is_err()
|
||||
{
|
||||
log::warn!("bash is not installed on this system, trying to run the install script with sh (may fail)");
|
||||
log::warn!(
|
||||
"bash is not installed on this system, trying to run the install script with sh (may fail)"
|
||||
);
|
||||
"sh"
|
||||
} else {
|
||||
"bash"
|
||||
@@ -725,7 +721,7 @@ pub(crate) fn table(model: PathBuf, run_args: RunArgs) -> Result<String, EZKLErr
|
||||
|
||||
pub(crate) async fn gen_witness(
|
||||
compiled_circuit_path: PathBuf,
|
||||
data: PathBuf,
|
||||
data: String,
|
||||
output: Option<PathBuf>,
|
||||
vk_path: Option<PathBuf>,
|
||||
srs_path: Option<PathBuf>,
|
||||
@@ -733,7 +729,7 @@ pub(crate) async fn gen_witness(
|
||||
// these aren't real values so the sanity checks are mostly meaningless
|
||||
|
||||
let mut circuit = GraphCircuit::load(compiled_circuit_path)?;
|
||||
let data: GraphData = GraphData::from_path(data)?;
|
||||
let data = GraphData::from_str(&data)?;
|
||||
let settings = circuit.settings().clone();
|
||||
|
||||
let vk = if let Some(vk) = vk_path {
|
||||
@@ -847,6 +843,8 @@ pub(crate) fn gen_random_data(
|
||||
data_path: PathBuf,
|
||||
variables: Vec<(String, usize)>,
|
||||
seed: u64,
|
||||
min: Option<f32>,
|
||||
max: Option<f32>,
|
||||
) -> Result<String, EZKLError> {
|
||||
let mut file = std::fs::File::open(&model_path).map_err(|e| {
|
||||
crate::graph::errors::GraphError::ReadWriteFileError(
|
||||
@@ -865,22 +863,32 @@ pub(crate) fn gen_random_data(
|
||||
.collect::<tract_onnx::prelude::TractResult<Vec<_>>>()
|
||||
.map_err(|e| EZKLError::from(e.to_string()))?;
|
||||
|
||||
let min = min.unwrap_or(0.0);
|
||||
let max = max.unwrap_or(1.0);
|
||||
|
||||
/// Generates a random tensor of a given size and type.
|
||||
fn random(
|
||||
sizes: &[usize],
|
||||
datum_type: tract_onnx::prelude::DatumType,
|
||||
seed: u64,
|
||||
min: f32,
|
||||
max: f32,
|
||||
) -> TractTensor {
|
||||
use rand::{Rng, SeedableRng};
|
||||
let mut rng = rand::rngs::StdRng::seed_from_u64(seed);
|
||||
|
||||
let mut tensor = TractTensor::zero::<f32>(sizes).unwrap();
|
||||
let slice = tensor.as_slice_mut::<f32>().unwrap();
|
||||
slice.iter_mut().for_each(|x| *x = rng.gen());
|
||||
slice.iter_mut().for_each(|x| *x = rng.gen_range(min..max));
|
||||
tensor.cast_to_dt(datum_type).unwrap().into_owned()
|
||||
}
|
||||
|
||||
fn tensor_for_fact(fact: &tract_onnx::prelude::TypedFact, seed: u64) -> TractTensor {
|
||||
fn tensor_for_fact(
|
||||
fact: &tract_onnx::prelude::TypedFact,
|
||||
seed: u64,
|
||||
min: f32,
|
||||
max: f32,
|
||||
) -> TractTensor {
|
||||
if let Some(value) = &fact.konst {
|
||||
return value.clone().into_tensor();
|
||||
}
|
||||
@@ -891,12 +899,14 @@ pub(crate) fn gen_random_data(
|
||||
.expect("Expected concrete shape, found: {fact:?}"),
|
||||
fact.datum_type,
|
||||
seed,
|
||||
min,
|
||||
max,
|
||||
)
|
||||
}
|
||||
|
||||
let generated = input_facts
|
||||
.iter()
|
||||
.map(|v| tensor_for_fact(v, seed))
|
||||
.map(|v| tensor_for_fact(v, seed, min, max))
|
||||
.collect_vec();
|
||||
|
||||
let data = GraphData::from_tract_data(&generated)?;
|
||||
@@ -1044,7 +1054,7 @@ impl AccuracyResults {
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub(crate) async fn calibrate(
|
||||
model_path: PathBuf,
|
||||
data: PathBuf,
|
||||
data: String,
|
||||
settings_path: PathBuf,
|
||||
target: CalibrationTarget,
|
||||
lookup_safety_margin: f64,
|
||||
@@ -1058,7 +1068,7 @@ pub(crate) async fn calibrate(
|
||||
|
||||
use crate::fieldutils::IntegerRep;
|
||||
|
||||
let data = GraphData::from_path(data)?;
|
||||
let data = GraphData::from_str(&data)?;
|
||||
// load the pre-generated settings
|
||||
let settings = GraphSettings::load(&settings_path)?;
|
||||
// now retrieve the run args
|
||||
@@ -1522,7 +1532,7 @@ pub(crate) async fn create_evm_data_attestation(
|
||||
settings_path: PathBuf,
|
||||
sol_code_path: PathBuf,
|
||||
abi_path: PathBuf,
|
||||
input: PathBuf,
|
||||
input: String,
|
||||
witness: Option<PathBuf>,
|
||||
) -> Result<String, EZKLError> {
|
||||
#[allow(unused_imports)]
|
||||
@@ -1535,52 +1545,31 @@ pub(crate) async fn create_evm_data_attestation(
|
||||
trace!("params computed");
|
||||
|
||||
// if input is not provided, we just instantiate dummy input data
|
||||
let data = GraphData::from_path(input).unwrap_or(GraphData::new(DataSource::File(vec![])));
|
||||
let data =
|
||||
GraphData::from_str(&input).unwrap_or_else(|_| GraphData::new(DataSource::File(vec![])));
|
||||
|
||||
debug!("data attestation data: {:?}", data);
|
||||
|
||||
// The number of input and output instances we attest to for the single call data attestation
|
||||
let mut input_len = None;
|
||||
let mut output_len = None;
|
||||
|
||||
let output_data = if let Some(DataSource::OnChain(source)) = data.output_data {
|
||||
if let Some(DataSource::OnChain(source)) = data.output_data {
|
||||
if visibility.output.is_private() {
|
||||
return Err("private output data on chain is not supported on chain".into());
|
||||
}
|
||||
let mut on_chain_output_data = vec![];
|
||||
match source.calls {
|
||||
Calls::Multiple(calls) => {
|
||||
for call in calls {
|
||||
on_chain_output_data.push(call);
|
||||
}
|
||||
}
|
||||
Calls::Single(call) => {
|
||||
output_len = Some(call.len);
|
||||
}
|
||||
}
|
||||
Some(on_chain_output_data)
|
||||
} else {
|
||||
None
|
||||
output_len = Some(source.call.decimals.len());
|
||||
};
|
||||
|
||||
let input_data = if let DataSource::OnChain(source) = data.input_data {
|
||||
if let DataSource::OnChain(source) = data.input_data {
|
||||
if visibility.input.is_private() {
|
||||
return Err("private input data on chain is not supported on chain".into());
|
||||
}
|
||||
let mut on_chain_input_data = vec![];
|
||||
match source.calls {
|
||||
Calls::Multiple(calls) => {
|
||||
for call in calls {
|
||||
on_chain_input_data.push(call);
|
||||
}
|
||||
}
|
||||
Calls::Single(call) => {
|
||||
input_len = Some(call.len);
|
||||
}
|
||||
}
|
||||
Some(on_chain_input_data)
|
||||
} else {
|
||||
None
|
||||
input_len = Some(source.call.decimals.len());
|
||||
};
|
||||
|
||||
// If both model inputs and outputs are attested to then we
|
||||
|
||||
// Read the settings file. Look if either the run_ars.input_visibility, run_args.output_visibility or run_args.param_visibility is KZGCommit
|
||||
// if so, then we need to load the witness
|
||||
|
||||
@@ -1601,33 +1590,25 @@ pub(crate) async fn create_evm_data_attestation(
|
||||
None
|
||||
};
|
||||
|
||||
// if either input_len or output_len is Some then we are in the single call data attestation mode
|
||||
if input_len.is_some() || output_len.is_some() {
|
||||
let output = fix_da_single_sol(input_len, output_len)?;
|
||||
let mut f = File::create(sol_code_path.clone())?;
|
||||
let _ = f.write(output.as_bytes());
|
||||
// fetch abi of the contract
|
||||
let (abi, _, _) = get_contract_artifacts(sol_code_path, "DataAttestationSingle", 0).await?;
|
||||
// save abi to file
|
||||
serde_json::to_writer(std::fs::File::create(abi_path)?, &abi)?;
|
||||
} else {
|
||||
let output = fix_da_multi_sol(input_data, output_data, commitment_bytes)?;
|
||||
let mut f = File::create(sol_code_path.clone())?;
|
||||
let _ = f.write(output.as_bytes());
|
||||
// fetch abi of the contract
|
||||
let (abi, _, _) = get_contract_artifacts(sol_code_path, "DataAttestationMulti", 0).await?;
|
||||
// save abi to file
|
||||
serde_json::to_writer(std::fs::File::create(abi_path)?, &abi)?;
|
||||
}
|
||||
let output: String = fix_da_sol(
|
||||
commitment_bytes,
|
||||
input_len.is_none() && output_len.is_none(),
|
||||
)?;
|
||||
let mut f = File::create(sol_code_path.clone())?;
|
||||
let _ = f.write(output.as_bytes());
|
||||
// fetch abi of the contract
|
||||
let (abi, _, _) = get_contract_artifacts(sol_code_path, "DataAttestation", 0).await?;
|
||||
// save abi to file
|
||||
serde_json::to_writer(std::fs::File::create(abi_path)?, &abi)?;
|
||||
|
||||
Ok(String::new())
|
||||
}
|
||||
|
||||
pub(crate) async fn deploy_da_evm(
|
||||
data: PathBuf,
|
||||
data: String,
|
||||
settings_path: PathBuf,
|
||||
sol_code_path: PathBuf,
|
||||
rpc_url: Option<String>,
|
||||
rpc_url: String,
|
||||
addr_path: PathBuf,
|
||||
runs: usize,
|
||||
private_key: Option<String>,
|
||||
@@ -1636,7 +1617,7 @@ pub(crate) async fn deploy_da_evm(
|
||||
settings_path,
|
||||
data,
|
||||
sol_code_path,
|
||||
rpc_url.as_deref(),
|
||||
&rpc_url,
|
||||
runs,
|
||||
private_key.as_deref(),
|
||||
)
|
||||
@@ -1651,7 +1632,7 @@ pub(crate) async fn deploy_da_evm(
|
||||
|
||||
pub(crate) async fn deploy_evm(
|
||||
sol_code_path: PathBuf,
|
||||
rpc_url: Option<String>,
|
||||
rpc_url: String,
|
||||
addr_path: PathBuf,
|
||||
runs: usize,
|
||||
private_key: Option<String>,
|
||||
@@ -1664,7 +1645,7 @@ pub(crate) async fn deploy_evm(
|
||||
};
|
||||
let contract_address = deploy_contract_via_solidity(
|
||||
sol_code_path,
|
||||
rpc_url.as_deref(),
|
||||
&rpc_url,
|
||||
runs,
|
||||
private_key.as_deref(),
|
||||
contract_name,
|
||||
@@ -1707,7 +1688,7 @@ pub(crate) fn encode_evm_calldata(
|
||||
pub(crate) async fn verify_evm(
|
||||
proof_path: PathBuf,
|
||||
addr_verifier: H160Flag,
|
||||
rpc_url: Option<String>,
|
||||
rpc_url: String,
|
||||
addr_da: Option<H160Flag>,
|
||||
addr_vk: Option<H160Flag>,
|
||||
) -> Result<String, EZKLError> {
|
||||
@@ -1721,7 +1702,7 @@ pub(crate) async fn verify_evm(
|
||||
addr_verifier.into(),
|
||||
addr_da.into(),
|
||||
addr_vk.map(|s| s.into()),
|
||||
rpc_url.as_deref(),
|
||||
&rpc_url,
|
||||
)
|
||||
.await?
|
||||
} else {
|
||||
@@ -1729,7 +1710,7 @@ pub(crate) async fn verify_evm(
|
||||
proof.clone(),
|
||||
addr_verifier.into(),
|
||||
addr_vk.map(|s| s.into()),
|
||||
rpc_url.as_deref(),
|
||||
&rpc_url,
|
||||
)
|
||||
.await?
|
||||
};
|
||||
@@ -1866,17 +1847,17 @@ pub(crate) fn setup(
|
||||
Ok(String::new())
|
||||
}
|
||||
|
||||
pub(crate) async fn setup_test_evm_witness(
|
||||
data_path: PathBuf,
|
||||
pub(crate) async fn setup_test_evm_data(
|
||||
data_path: String,
|
||||
compiled_circuit_path: PathBuf,
|
||||
test_data: PathBuf,
|
||||
rpc_url: Option<String>,
|
||||
rpc_url: String,
|
||||
input_source: TestDataSource,
|
||||
output_source: TestDataSource,
|
||||
) -> Result<String, EZKLError> {
|
||||
use crate::graph::TestOnChainData;
|
||||
|
||||
let mut data = GraphData::from_path(data_path)?;
|
||||
let mut data = GraphData::from_str(&data_path)?;
|
||||
let mut circuit = GraphCircuit::load(compiled_circuit_path)?;
|
||||
|
||||
// if both input and output are from files fail
|
||||
@@ -1902,17 +1883,6 @@ pub(crate) async fn setup_test_evm_witness(
|
||||
}
|
||||
|
||||
use crate::pfsys::ProofType;
|
||||
pub(crate) async fn test_update_account_calls(
|
||||
addr: H160Flag,
|
||||
data: PathBuf,
|
||||
rpc_url: Option<String>,
|
||||
) -> Result<String, EZKLError> {
|
||||
use crate::eth::update_account_calls;
|
||||
|
||||
update_account_calls(addr.into(), data, rpc_url.as_deref()).await?;
|
||||
|
||||
Ok(String::new())
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub(crate) fn prove(
|
||||
@@ -2126,6 +2096,7 @@ pub(crate) fn mock_aggregate(
|
||||
Ok(String::new())
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub(crate) fn setup_aggregate(
|
||||
sample_snarks: Vec<PathBuf>,
|
||||
vk_path: PathBuf,
|
||||
|
||||
@@ -5,10 +5,12 @@ use halo2curves::ff::PrimeField;
|
||||
/// Integer representation of a PrimeField element.
|
||||
pub type IntegerRep = i128;
|
||||
|
||||
/// Converts an i64 to a PrimeField element.
|
||||
/// Converts an integer rep to a PrimeField element.
|
||||
pub fn integer_rep_to_felt<F: PrimeField>(x: IntegerRep) -> F {
|
||||
if x >= 0 {
|
||||
F::from_u128(x as u128)
|
||||
} else if x == IntegerRep::MIN {
|
||||
-F::from_u128(x.saturating_neg() as u128) - F::ONE
|
||||
} else {
|
||||
-F::from_u128(x.saturating_neg() as u128)
|
||||
}
|
||||
@@ -32,6 +34,9 @@ pub fn felt_to_f64<F: PrimeField + PartialOrd + Field>(x: F) -> f64 {
|
||||
/// Converts a PrimeField element to an i64.
|
||||
pub fn felt_to_integer_rep<F: PrimeField + PartialOrd + Field>(x: F) -> IntegerRep {
|
||||
if x > F::from_u128(IntegerRep::MAX as u128) {
|
||||
if x == -F::from_u128(IntegerRep::MAX as u128) - F::ONE {
|
||||
return IntegerRep::MIN;
|
||||
}
|
||||
let rep = (-x).to_repr();
|
||||
let negtmp: &[u8] = rep.as_ref();
|
||||
let lower_128: u128 = u128::from_le_bytes(negtmp[..16].try_into().unwrap());
|
||||
@@ -51,7 +56,7 @@ mod test {
|
||||
use halo2curves::pasta::Fp as F;
|
||||
|
||||
#[test]
|
||||
fn test_conv() {
|
||||
fn integerreptofelt() {
|
||||
let res: F = integer_rep_to_felt(-15);
|
||||
assert_eq!(res, -F::from(15));
|
||||
|
||||
@@ -69,8 +74,24 @@ mod test {
|
||||
fn felttointegerrep() {
|
||||
for x in -(2_i128.pow(16))..(2_i128.pow(16)) {
|
||||
let fieldx: F = integer_rep_to_felt::<F>(x);
|
||||
let xf: i128 = felt_to_integer_rep::<F>(fieldx);
|
||||
let xf: IntegerRep = felt_to_integer_rep::<F>(fieldx);
|
||||
assert_eq!(x, xf);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn felttointegerrepmin() {
|
||||
let x = IntegerRep::MIN;
|
||||
let fieldx: F = integer_rep_to_felt::<F>(x);
|
||||
let xf: IntegerRep = felt_to_integer_rep::<F>(fieldx);
|
||||
assert_eq!(x, xf);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn felttointegerrepmax() {
|
||||
let x = IntegerRep::MAX;
|
||||
let fieldx: F = integer_rep_to_felt::<F>(x);
|
||||
let xf: IntegerRep = felt_to_integer_rep::<F>(fieldx);
|
||||
assert_eq!(x, xf);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -11,6 +11,12 @@ pub enum GraphError {
|
||||
/// Shape mismatch in circuit construction
|
||||
#[error("invalid dimensions used for node {0} ({1})")]
|
||||
InvalidDims(usize, String),
|
||||
/// Non scalar power
|
||||
#[error("we only support scalar powers")]
|
||||
NonScalarPower,
|
||||
/// Non scalar base for exponentiation
|
||||
#[error("we only support scalar bases for exponentiation")]
|
||||
NonScalarBase,
|
||||
/// Wrong method was called to configure an op
|
||||
#[error("wrong method was called to configure node {0} ({1})")]
|
||||
WrongMethod(usize, String),
|
||||
@@ -27,7 +33,7 @@ pub enum GraphError {
|
||||
#[error("a node is missing required params: {0}")]
|
||||
MissingParams(String),
|
||||
/// A node has missing parameters
|
||||
#[error("a node is has misformed params: {0}")]
|
||||
#[error("a node has misformed params: {0}")]
|
||||
MisformedParams(String),
|
||||
/// Error in the configuration of the visibility of variables
|
||||
#[error("there should be at least one set of public variables")]
|
||||
@@ -113,13 +119,13 @@ pub enum GraphError {
|
||||
/// Missing input for a node
|
||||
#[error("missing input for node {0}")]
|
||||
MissingInput(usize),
|
||||
///
|
||||
/// Ranges can only be constant
|
||||
#[error("range only supports constant inputs in a zk circuit")]
|
||||
NonConstantRange,
|
||||
///
|
||||
/// Trilu diagonal must be constant
|
||||
#[error("trilu only supports constant diagonals in a zk circuit")]
|
||||
NonConstantTrilu,
|
||||
///
|
||||
/// The witness was too short
|
||||
#[error("insufficient witness values to generate a fixed output")]
|
||||
InsufficientWitnessValues,
|
||||
/// Missing scale
|
||||
@@ -143,4 +149,13 @@ pub enum GraphError {
|
||||
/// Invalid RunArg
|
||||
#[error("invalid RunArgs: {0}")]
|
||||
InvalidRunArgs(String),
|
||||
/// Only nearest neighbor interpolation is supported
|
||||
#[error("only nearest neighbor interpolation is supported")]
|
||||
InvalidInterpolation,
|
||||
/// Node has a missing output
|
||||
#[error("node {0} has a missing output")]
|
||||
MissingOutput(usize),
|
||||
/// Inssuficient advice columns
|
||||
#[error("insuficcient advice columns (need {0} at least)")]
|
||||
InsufficientAdviceColumns(usize),
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
108
src/graph/mod.rs
108
src/graph/mod.rs
@@ -36,12 +36,12 @@ use self::modules::{GraphModules, ModuleConfigs, ModuleForwardResult, ModuleSize
|
||||
use crate::circuit::lookup::LookupOp;
|
||||
use crate::circuit::modules::ModulePlanner;
|
||||
use crate::circuit::region::{ConstantsMap, RegionSettings};
|
||||
use crate::circuit::table::{num_cols_required, Range, Table, RESERVED_BLINDING_ROWS_PAD};
|
||||
use crate::circuit::table::{RESERVED_BLINDING_ROWS_PAD, Range, Table, num_cols_required};
|
||||
use crate::circuit::{CheckMode, InputType};
|
||||
use crate::fieldutils::{felt_to_f64, IntegerRep};
|
||||
use crate::fieldutils::{IntegerRep, felt_to_f64};
|
||||
use crate::pfsys::PrettyElements;
|
||||
use crate::tensor::{Tensor, ValTensor};
|
||||
use crate::{RunArgs, EZKL_BUF_CAPACITY};
|
||||
use crate::{EZKL_BUF_CAPACITY, RunArgs};
|
||||
|
||||
use halo2_proofs::{
|
||||
circuit::Layouter,
|
||||
@@ -56,13 +56,13 @@ use maybe_rayon::prelude::{IntoParallelRefIterator, ParallelIterator};
|
||||
pub use model::*;
|
||||
pub use node::*;
|
||||
#[cfg(feature = "python-bindings")]
|
||||
use pyo3::ToPyObject;
|
||||
#[cfg(feature = "python-bindings")]
|
||||
use pyo3::prelude::*;
|
||||
#[cfg(feature = "python-bindings")]
|
||||
use pyo3::types::PyDict;
|
||||
#[cfg(feature = "python-bindings")]
|
||||
use pyo3::types::PyDictMethods;
|
||||
#[cfg(feature = "python-bindings")]
|
||||
use pyo3::ToPyObject;
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::ops::Deref;
|
||||
@@ -455,6 +455,10 @@ pub struct GraphSettings {
|
||||
pub num_blinding_factors: Option<usize>,
|
||||
/// unix time timestamp
|
||||
pub timestamp: Option<u128>,
|
||||
/// Model inputs types (if any)
|
||||
pub input_types: Option<Vec<InputType>>,
|
||||
/// Model outputs types (if any)
|
||||
pub output_types: Option<Vec<InputType>>,
|
||||
}
|
||||
|
||||
impl GraphSettings {
|
||||
@@ -619,11 +623,6 @@ impl GraphSettings {
|
||||
}
|
||||
}
|
||||
|
||||
///
|
||||
pub fn uses_modules(&self) -> bool {
|
||||
!self.module_sizes.max_constraints() > 0
|
||||
}
|
||||
|
||||
/// if any visibility is encrypted or hashed
|
||||
pub fn module_requires_fixed(&self) -> bool {
|
||||
self.run_args.input_visibility.is_hashed()
|
||||
@@ -765,8 +764,8 @@ pub struct TestOnChainData {
|
||||
/// The path to the test witness
|
||||
pub data: std::path::PathBuf,
|
||||
/// rpc endpoint
|
||||
pub rpc: Option<String>,
|
||||
///
|
||||
pub rpc: String,
|
||||
/// data sources for the on chain data
|
||||
pub data_sources: TestSources,
|
||||
}
|
||||
|
||||
@@ -954,7 +953,7 @@ impl GraphCircuit {
|
||||
DataSource::File(file_data) => {
|
||||
self.load_file_data(file_data, &shapes, scales, input_types)
|
||||
}
|
||||
_ => unreachable!("cannot load from on-chain data"),
|
||||
_ => Err(GraphError::OnChainDataSource),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1027,24 +1026,11 @@ impl GraphCircuit {
|
||||
shapes: &Vec<Vec<usize>>,
|
||||
scales: Vec<crate::Scale>,
|
||||
) -> Result<Vec<Tensor<Fp>>, GraphError> {
|
||||
use crate::eth::{
|
||||
evm_quantize_multi, evm_quantize_single, read_on_chain_inputs_multi,
|
||||
read_on_chain_inputs_single, setup_eth_backend,
|
||||
};
|
||||
let (client, client_address) = setup_eth_backend(Some(&source.rpc), None).await?;
|
||||
let quantized_evm_inputs = match source.calls {
|
||||
input::Calls::Single(call) => {
|
||||
let (inputs, decimals) =
|
||||
read_on_chain_inputs_single(client.clone(), client_address, call).await?;
|
||||
|
||||
evm_quantize_single(client, scales, &inputs, decimals).await?
|
||||
}
|
||||
input::Calls::Multiple(calls) => {
|
||||
let inputs =
|
||||
read_on_chain_inputs_multi(client.clone(), client_address, &calls).await?;
|
||||
evm_quantize_multi(client, scales, &inputs).await?
|
||||
}
|
||||
};
|
||||
use crate::eth::{evm_quantize, read_on_chain_inputs, setup_eth_backend};
|
||||
let (client, client_address) = setup_eth_backend(&source.rpc, None).await?;
|
||||
let input = read_on_chain_inputs(client.clone(), client_address, &source.call).await?;
|
||||
let quantized_evm_inputs =
|
||||
evm_quantize(client, scales, &input, &source.call.decimals).await?;
|
||||
// on-chain data has already been quantized at this point. Just need to reshape it and push into tensor vector
|
||||
let mut inputs: Vec<Tensor<Fp>> = vec![];
|
||||
for (input, shape) in [quantized_evm_inputs].iter().zip(shapes) {
|
||||
@@ -1445,6 +1431,8 @@ impl GraphCircuit {
|
||||
let output_scales = self.model().graph.get_output_scales()?;
|
||||
let input_shapes = self.model().graph.input_shapes()?;
|
||||
let output_shapes = self.model().graph.output_shapes()?;
|
||||
let mut input_data = None;
|
||||
let mut output_data = None;
|
||||
|
||||
if matches!(
|
||||
test_on_chain_data.data_sources.input,
|
||||
@@ -1455,23 +1443,12 @@ impl GraphCircuit {
|
||||
return Err(GraphError::OnChainDataSource);
|
||||
}
|
||||
|
||||
let input_data = match &data.input_data {
|
||||
DataSource::File(input_data) => input_data,
|
||||
input_data = match &data.input_data {
|
||||
DataSource::File(input_data) => Some(input_data),
|
||||
_ => {
|
||||
return Err(GraphError::OnChainDataSource);
|
||||
return Err(GraphError::MissingDataSource);
|
||||
}
|
||||
};
|
||||
// Get the flatten length of input_data
|
||||
// if the input source is a field then set scale to 0
|
||||
|
||||
let datam: (Vec<Tensor<Fp>>, OnChainSource) = OnChainSource::test_from_file_data(
|
||||
input_data,
|
||||
input_scales,
|
||||
input_shapes,
|
||||
test_on_chain_data.rpc.as_deref(),
|
||||
)
|
||||
.await?;
|
||||
data.input_data = datam.1.into();
|
||||
}
|
||||
if matches!(
|
||||
test_on_chain_data.data_sources.output,
|
||||
@@ -1482,20 +1459,39 @@ impl GraphCircuit {
|
||||
return Err(GraphError::OnChainDataSource);
|
||||
}
|
||||
|
||||
let output_data = match &data.output_data {
|
||||
Some(DataSource::File(output_data)) => output_data,
|
||||
Some(DataSource::OnChain(_)) => return Err(GraphError::OnChainDataSource),
|
||||
output_data = match &data.output_data {
|
||||
Some(DataSource::File(output_data)) => Some(output_data),
|
||||
_ => return Err(GraphError::MissingDataSource),
|
||||
};
|
||||
let datum: (Vec<Tensor<Fp>>, OnChainSource) = OnChainSource::test_from_file_data(
|
||||
output_data,
|
||||
output_scales,
|
||||
output_shapes,
|
||||
test_on_chain_data.rpc.as_deref(),
|
||||
)
|
||||
.await?;
|
||||
data.output_data = Some(datum.1.into());
|
||||
}
|
||||
// Merge the input and output data
|
||||
let mut file_data: Vec<Vec<input::FileSourceInner>> = vec![];
|
||||
let mut scales: Vec<crate::Scale> = vec![];
|
||||
let mut shapes: Vec<Vec<usize>> = vec![];
|
||||
if let Some(input_data) = input_data {
|
||||
file_data.extend(input_data.clone());
|
||||
scales.extend(input_scales.clone());
|
||||
shapes.extend(input_shapes.clone());
|
||||
}
|
||||
if let Some(output_data) = output_data {
|
||||
file_data.extend(output_data.clone());
|
||||
scales.extend(output_scales.clone());
|
||||
shapes.extend(output_shapes.clone());
|
||||
};
|
||||
// print file data
|
||||
debug!("file data: {:?}", file_data);
|
||||
|
||||
let on_chain_data: OnChainSource =
|
||||
OnChainSource::test_from_file_data(&file_data, scales, shapes, &test_on_chain_data.rpc)
|
||||
.await?;
|
||||
// Here we update the GraphData struct with the on-chain data
|
||||
if input_data.is_some() {
|
||||
data.input_data = on_chain_data.clone().into();
|
||||
}
|
||||
if output_data.is_some() {
|
||||
data.output_data = Some(on_chain_data.into());
|
||||
}
|
||||
debug!("test on-chain data: {:?}", data);
|
||||
// Save the updated GraphData struct to the data_path
|
||||
data.save(test_on_chain_data.data)?;
|
||||
Ok(())
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
use super::errors::GraphError;
|
||||
use super::extract_const_quantized_values;
|
||||
use super::node::*;
|
||||
use super::scale_to_multiplier;
|
||||
use super::vars::*;
|
||||
use super::GraphSettings;
|
||||
use crate::circuit::hybrid::HybridOp;
|
||||
@@ -379,13 +378,18 @@ pub struct ParsedNodes {
|
||||
pub nodes: BTreeMap<usize, NodeType>,
|
||||
inputs: Vec<usize>,
|
||||
outputs: Vec<Outlet>,
|
||||
output_types: Vec<InputType>,
|
||||
}
|
||||
|
||||
impl ParsedNodes {
|
||||
/// Returns the output types of the computational graph.
|
||||
pub fn get_output_types(&self) -> Vec<InputType> {
|
||||
self.output_types.clone()
|
||||
}
|
||||
|
||||
/// Returns the number of the computational graph's inputs
|
||||
pub fn num_inputs(&self) -> usize {
|
||||
let input_nodes = self.inputs.iter();
|
||||
input_nodes.len()
|
||||
self.inputs.len()
|
||||
}
|
||||
|
||||
/// Input types
|
||||
@@ -425,8 +429,7 @@ impl ParsedNodes {
|
||||
|
||||
/// Returns the number of the computational graph's outputs
|
||||
pub fn num_outputs(&self) -> usize {
|
||||
let output_nodes = self.outputs.iter();
|
||||
output_nodes.len()
|
||||
self.outputs.len()
|
||||
}
|
||||
|
||||
/// Returns shapes of the computational graph's outputs
|
||||
@@ -493,6 +496,16 @@ impl Model {
|
||||
Ok(om)
|
||||
}
|
||||
|
||||
/// Gets the input types from the parsed nodes
|
||||
pub fn get_input_types(&self) -> Result<Vec<InputType>, GraphError> {
|
||||
self.graph.get_input_types()
|
||||
}
|
||||
|
||||
/// Gets the output types from the parsed nodes
|
||||
pub fn get_output_types(&self) -> Vec<InputType> {
|
||||
self.graph.get_output_types()
|
||||
}
|
||||
|
||||
///
|
||||
pub fn save(&self, path: PathBuf) -> Result<(), GraphError> {
|
||||
let f = std::fs::File::create(&path).map_err(|e| {
|
||||
@@ -576,6 +589,11 @@ impl Model {
|
||||
required_range_checks: res.range_checks.into_iter().collect(),
|
||||
model_output_scales: self.graph.get_output_scales()?,
|
||||
model_input_scales: self.graph.get_input_scales(),
|
||||
input_types: match self.get_input_types() {
|
||||
Ok(x) => Some(x),
|
||||
Err(_) => None,
|
||||
},
|
||||
output_types: Some(self.get_output_types()),
|
||||
num_dynamic_lookups: res.num_dynamic_lookups,
|
||||
total_dynamic_col_size: res.dynamic_lookup_col_coord,
|
||||
num_shuffles: res.num_shuffles,
|
||||
@@ -634,6 +652,10 @@ impl Model {
|
||||
|
||||
for (i, id) in model.clone().inputs.iter().enumerate() {
|
||||
let input = model.node_mut(id.node);
|
||||
|
||||
if input.outputs.len() == 0 {
|
||||
return Err(GraphError::MissingOutput(id.node));
|
||||
}
|
||||
let mut fact: InferenceFact = input.outputs[0].fact.clone();
|
||||
|
||||
for (i, x) in fact.clone().shape.dims().enumerate() {
|
||||
@@ -702,6 +724,11 @@ impl Model {
|
||||
nodes,
|
||||
inputs: model.inputs.iter().map(|o| o.node).collect(),
|
||||
outputs: model.outputs.iter().map(|o| (o.node, o.slot)).collect(),
|
||||
output_types: model
|
||||
.outputs
|
||||
.iter()
|
||||
.map(|o| Ok::<InputType, GraphError>(model.outlet_fact(*o)?.datum_type.into()))
|
||||
.collect::<Result<Vec<_>, GraphError>>()?,
|
||||
};
|
||||
|
||||
let duration = start_time.elapsed();
|
||||
@@ -860,6 +887,15 @@ impl Model {
|
||||
nodes: subgraph_nodes,
|
||||
inputs: model.inputs.iter().map(|o| o.node).collect(),
|
||||
outputs: model.outputs.iter().map(|o| (o.node, o.slot)).collect(),
|
||||
output_types: model
|
||||
.outputs
|
||||
.iter()
|
||||
.map(|o| {
|
||||
Ok::<InputType, GraphError>(
|
||||
model.outlet_fact(*o)?.datum_type.into(),
|
||||
)
|
||||
})
|
||||
.collect::<Result<Vec<_>, GraphError>>()?,
|
||||
};
|
||||
|
||||
let om = Model {
|
||||
@@ -906,6 +942,7 @@ impl Model {
|
||||
n.opkind = SupportedOp::Input(Input {
|
||||
scale,
|
||||
datum_type: inp.datum_type,
|
||||
decomp: !run_args.ignore_range_check_inputs_outputs,
|
||||
});
|
||||
input_idx += 1;
|
||||
n.out_scale = scale;
|
||||
@@ -1016,6 +1053,10 @@ impl Model {
|
||||
let required_lookups = settings.required_lookups.clone();
|
||||
let required_range_checks = settings.required_range_checks.clone();
|
||||
|
||||
if vars.advices.len() < 3 {
|
||||
return Err(GraphError::InsufficientAdviceColumns(3));
|
||||
}
|
||||
|
||||
let mut base_gate = PolyConfig::configure(
|
||||
meta,
|
||||
vars.advices[0..2].try_into()?,
|
||||
@@ -1035,6 +1076,10 @@ impl Model {
|
||||
}
|
||||
|
||||
if settings.requires_dynamic_lookup() {
|
||||
if vars.advices.len() < 6 {
|
||||
return Err(GraphError::InsufficientAdviceColumns(6));
|
||||
}
|
||||
|
||||
base_gate.configure_dynamic_lookup(
|
||||
meta,
|
||||
vars.advices[0..3].try_into()?,
|
||||
@@ -1043,10 +1088,13 @@ impl Model {
|
||||
}
|
||||
|
||||
if settings.requires_shuffle() {
|
||||
if vars.advices.len() < 6 {
|
||||
return Err(GraphError::InsufficientAdviceColumns(6));
|
||||
}
|
||||
base_gate.configure_shuffles(
|
||||
meta,
|
||||
vars.advices[0..2].try_into()?,
|
||||
vars.advices[3..5].try_into()?,
|
||||
vars.advices[0..3].try_into()?,
|
||||
vars.advices[3..6].try_into()?,
|
||||
)?;
|
||||
}
|
||||
|
||||
@@ -1061,6 +1109,7 @@ impl Model {
|
||||
/// * `vars` - The variables for the circuit.
|
||||
/// * `witnessed_outputs` - The values to compare against.
|
||||
/// * `constants` - The constants for the circuit.
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub fn layout(
|
||||
&self,
|
||||
mut config: ModelConfig,
|
||||
@@ -1123,17 +1172,10 @@ impl Model {
|
||||
})?;
|
||||
|
||||
if run_args.output_visibility.is_public() || run_args.output_visibility.is_fixed() {
|
||||
let output_scales = self.graph.get_output_scales().map_err(|e| {
|
||||
error!("{}", e);
|
||||
halo2_proofs::plonk::Error::Synthesis
|
||||
})?;
|
||||
let res = outputs
|
||||
.iter()
|
||||
.enumerate()
|
||||
.map(|(i, output)| {
|
||||
let mut tolerance = run_args.tolerance;
|
||||
tolerance.scale = scale_to_multiplier(output_scales[i]).into();
|
||||
|
||||
let comparators = if run_args.output_visibility == Visibility::Public {
|
||||
let res = vars
|
||||
.instance
|
||||
@@ -1155,7 +1197,9 @@ impl Model {
|
||||
.layout(
|
||||
&mut thread_safe_region,
|
||||
&[output.clone(), comparators],
|
||||
Box::new(HybridOp::RangeCheck(tolerance)),
|
||||
Box::new(HybridOp::Output {
|
||||
decomp: !run_args.ignore_range_check_inputs_outputs,
|
||||
}),
|
||||
)
|
||||
.map_err(|e| e.into())
|
||||
})
|
||||
@@ -1415,11 +1459,9 @@ impl Model {
|
||||
let outputs = self.layout_nodes(&mut model_config, &mut region, &mut results)?;
|
||||
|
||||
if self.visibility.output.is_public() || self.visibility.output.is_fixed() {
|
||||
let output_scales = self.graph.get_output_scales()?;
|
||||
let res = outputs
|
||||
.iter()
|
||||
.enumerate()
|
||||
.map(|(i, output)| {
|
||||
.map(|output| {
|
||||
let mut comparator: ValTensor<Fp> = (0..output.len())
|
||||
.map(|_| {
|
||||
if !self.visibility.output.is_fixed() {
|
||||
@@ -1432,13 +1474,12 @@ impl Model {
|
||||
.into();
|
||||
comparator.reshape(output.dims())?;
|
||||
|
||||
let mut tolerance = run_args.tolerance;
|
||||
tolerance.scale = scale_to_multiplier(output_scales[i]).into();
|
||||
|
||||
dummy_config.layout(
|
||||
&mut region,
|
||||
&[output.clone(), comparator],
|
||||
Box::new(HybridOp::RangeCheck(tolerance)),
|
||||
Box::new(HybridOp::Output {
|
||||
decomp: !run_args.ignore_range_check_inputs_outputs,
|
||||
}),
|
||||
)
|
||||
})
|
||||
.collect::<Result<Vec<_>, _>>();
|
||||
@@ -1460,7 +1501,7 @@ impl Model {
|
||||
.iter()
|
||||
.map(|x| {
|
||||
x.get_felt_evals()
|
||||
.unwrap_or(Tensor::new(Some(&[Fp::ZERO]), &[1]).unwrap())
|
||||
.unwrap_or_else(|_| Tensor::new(Some(&[Fp::ZERO]), &[1]).unwrap())
|
||||
})
|
||||
.collect();
|
||||
|
||||
@@ -1530,6 +1571,7 @@ impl Model {
|
||||
let mut op = crate::circuit::Constant::new(
|
||||
c.quantized_values.clone(),
|
||||
c.raw_values.clone(),
|
||||
c.decomp,
|
||||
);
|
||||
op.pre_assign(consts[const_idx].clone());
|
||||
n.opkind = SupportedOp::Constant(op);
|
||||
@@ -1557,4 +1599,16 @@ impl Model {
|
||||
}
|
||||
Ok(instance_shapes)
|
||||
}
|
||||
|
||||
/// Input types of the computational graph's public inputs (if any)
|
||||
pub fn instance_types(&self) -> Result<Vec<InputType>, GraphError> {
|
||||
let mut instance_types = vec![];
|
||||
if self.visibility.input.is_public() {
|
||||
instance_types.extend(self.graph.get_input_types()?);
|
||||
}
|
||||
if self.visibility.output.is_public() {
|
||||
instance_types.extend(self.graph.get_output_types());
|
||||
}
|
||||
Ok(instance_types)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -14,14 +14,11 @@ use serde::{Deserialize, Serialize};
|
||||
use super::errors::GraphError;
|
||||
use super::{VarVisibility, Visibility};
|
||||
|
||||
/// poseidon len to hash in tree
|
||||
pub const POSEIDON_LEN_GRAPH: usize = 32;
|
||||
/// Poseidon number of instances
|
||||
pub const POSEIDON_INSTANCES: usize = 1;
|
||||
|
||||
/// Poseidon module type
|
||||
pub type ModulePoseidon =
|
||||
PoseidonChip<PoseidonSpec, POSEIDON_WIDTH, POSEIDON_RATE, POSEIDON_LEN_GRAPH>;
|
||||
pub type ModulePoseidon = PoseidonChip<PoseidonSpec, POSEIDON_WIDTH, POSEIDON_RATE>;
|
||||
/// Poseidon module config
|
||||
pub type ModulePoseidonConfig = PoseidonConfig<POSEIDON_WIDTH, POSEIDON_RATE>;
|
||||
|
||||
@@ -284,7 +281,6 @@ impl GraphModules {
|
||||
log::error!("Poseidon config not initialized");
|
||||
return Err(Error::Synthesis);
|
||||
}
|
||||
// If the module is encrypted, then we need to encrypt the inputs
|
||||
}
|
||||
|
||||
Ok(())
|
||||
|
||||
@@ -1,10 +1,19 @@
|
||||
// Import dependencies for scaling operations
|
||||
use super::scale_to_multiplier;
|
||||
|
||||
// Import ONNX-specific utilities when EZKL feature is enabled
|
||||
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
|
||||
use super::utilities::node_output_shapes;
|
||||
|
||||
// Import scale management types for EZKL
|
||||
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
|
||||
use super::VarScales;
|
||||
|
||||
// Import visibility settings for EZKL
|
||||
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
|
||||
use super::Visibility;
|
||||
|
||||
// Import operation types for different circuit components
|
||||
use crate::circuit::hybrid::HybridOp;
|
||||
use crate::circuit::lookup::LookupOp;
|
||||
use crate::circuit::poly::PolyOp;
|
||||
@@ -13,28 +22,49 @@ use crate::circuit::Constant;
|
||||
use crate::circuit::Input;
|
||||
use crate::circuit::Op;
|
||||
use crate::circuit::Unknown;
|
||||
|
||||
// Import graph error types for EZKL
|
||||
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
|
||||
use crate::graph::errors::GraphError;
|
||||
|
||||
// Import ONNX operation conversion utilities
|
||||
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
|
||||
use crate::graph::new_op_from_onnx;
|
||||
|
||||
// Import tensor error handling
|
||||
use crate::tensor::TensorError;
|
||||
|
||||
// Import curve-specific field type
|
||||
use halo2curves::bn256::Fr as Fp;
|
||||
|
||||
// Import logging for EZKL
|
||||
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
|
||||
use log::trace;
|
||||
|
||||
// Import serialization traits
|
||||
use serde::Deserialize;
|
||||
use serde::Serialize;
|
||||
|
||||
// Import data structures for EZKL
|
||||
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
|
||||
use std::collections::BTreeMap;
|
||||
|
||||
// Import formatting traits for EZKL
|
||||
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
|
||||
use std::fmt;
|
||||
|
||||
// Import table display formatting for EZKL
|
||||
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
|
||||
use tabled::Tabled;
|
||||
|
||||
// Import ONNX-specific types and traits for EZKL
|
||||
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
|
||||
use tract_onnx::{
|
||||
self,
|
||||
prelude::{Node as OnnxNode, SymbolValues, TypedFact, TypedOp},
|
||||
};
|
||||
|
||||
/// Helper function to format vectors for display
|
||||
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
|
||||
fn display_vector<T: fmt::Debug>(v: &Vec<T>) -> String {
|
||||
if !v.is_empty() {
|
||||
@@ -44,29 +74,35 @@ fn display_vector<T: fmt::Debug>(v: &Vec<T>) -> String {
|
||||
}
|
||||
}
|
||||
|
||||
/// Helper function to format operation kinds for display
|
||||
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
|
||||
fn display_opkind(v: &SupportedOp) -> String {
|
||||
v.as_string()
|
||||
}
|
||||
|
||||
/// A wrapper for an operation that has been rescaled.
|
||||
/// A wrapper for an operation that has been rescaled to handle different precision requirements.
|
||||
/// This enables operations to work with inputs that have been scaled to different fixed-point representations.
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
pub struct Rescaled {
|
||||
/// The operation that has to be rescaled.
|
||||
/// The underlying operation that needs to be rescaled
|
||||
pub inner: Box<SupportedOp>,
|
||||
/// The scale of the operation's inputs.
|
||||
/// Vector of (index, scale) pairs defining how each input should be scaled
|
||||
pub scale: Vec<(usize, u128)>,
|
||||
}
|
||||
|
||||
/// Implementation of the Op trait for Rescaled operations
|
||||
impl Op<Fp> for Rescaled {
|
||||
/// Convert to Any type for runtime type checking
|
||||
fn as_any(&self) -> &dyn std::any::Any {
|
||||
self
|
||||
}
|
||||
|
||||
/// Get string representation of the operation
|
||||
fn as_string(&self) -> String {
|
||||
format!("RESCALED INPUT ({})", self.inner.as_string())
|
||||
}
|
||||
|
||||
/// Calculate output scale based on input scales
|
||||
fn out_scale(&self, in_scales: Vec<crate::Scale>) -> Result<crate::Scale, CircuitError> {
|
||||
let in_scales = in_scales
|
||||
.into_iter()
|
||||
@@ -77,6 +113,7 @@ impl Op<Fp> for Rescaled {
|
||||
Op::<Fp>::out_scale(&*self.inner, in_scales)
|
||||
}
|
||||
|
||||
/// Layout the operation in the circuit
|
||||
fn layout(
|
||||
&self,
|
||||
config: &mut crate::circuit::BaseConfig<Fp>,
|
||||
@@ -93,28 +130,40 @@ impl Op<Fp> for Rescaled {
|
||||
self.inner.layout(config, region, res)
|
||||
}
|
||||
|
||||
/// Create a cloned boxed copy of this operation
|
||||
fn clone_dyn(&self) -> Box<dyn Op<Fp>> {
|
||||
Box::new(self.clone()) // Forward to the derive(Clone) impl
|
||||
Box::new(self.clone())
|
||||
}
|
||||
}
|
||||
|
||||
/// A wrapper for an operation that has been rescaled.
|
||||
/// A wrapper for operations that require scale rebasing
|
||||
/// This handles cases where operation scales need to be adjusted to a target scale
|
||||
/// while preserving the numerical relationships
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
pub struct RebaseScale {
|
||||
/// The operation that has to be rescaled.
|
||||
/// The operation that needs to be rescaled
|
||||
pub inner: Box<SupportedOp>,
|
||||
/// rebase op
|
||||
/// Operation used for rebasing, typically division
|
||||
pub rebase_op: HybridOp,
|
||||
/// scale being rebased to
|
||||
/// Scale that we're rebasing to
|
||||
pub target_scale: i32,
|
||||
/// The original scale of the operation's inputs.
|
||||
/// Original scale of operation's inputs before rebasing
|
||||
pub original_scale: i32,
|
||||
/// multiplier
|
||||
/// Scaling multiplier used in rebasing
|
||||
pub multiplier: f64,
|
||||
}
|
||||
|
||||
impl RebaseScale {
|
||||
/// Creates a rebased version of an operation if needed
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `inner` - Operation to potentially rebase
|
||||
/// * `global_scale` - Base scale for the system
|
||||
/// * `op_out_scale` - Current output scale of the operation
|
||||
/// * `scale_rebase_multiplier` - Factor determining when rebasing should occur
|
||||
///
|
||||
/// # Returns
|
||||
/// Original or rebased operation depending on scale relationships
|
||||
pub fn rebase(
|
||||
inner: SupportedOp,
|
||||
global_scale: crate::Scale,
|
||||
@@ -155,7 +204,15 @@ impl RebaseScale {
|
||||
}
|
||||
}
|
||||
|
||||
/// Creates a rebased operation with increased scale
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `inner` - Operation to potentially rebase
|
||||
/// * `target_scale` - Scale to rebase to
|
||||
/// * `op_out_scale` - Current output scale of the operation
|
||||
///
|
||||
/// # Returns
|
||||
/// Original or rebased operation with increased scale
|
||||
pub fn rebase_up(
|
||||
inner: SupportedOp,
|
||||
target_scale: crate::Scale,
|
||||
@@ -192,10 +249,12 @@ impl RebaseScale {
|
||||
}
|
||||
|
||||
impl Op<Fp> for RebaseScale {
|
||||
/// Convert to Any type for runtime type checking
|
||||
fn as_any(&self) -> &dyn std::any::Any {
|
||||
self
|
||||
}
|
||||
|
||||
/// Get string representation of the operation
|
||||
fn as_string(&self) -> String {
|
||||
format!(
|
||||
"REBASED (div={:?}, rebasing_op={}) ({})",
|
||||
@@ -205,10 +264,12 @@ impl Op<Fp> for RebaseScale {
|
||||
)
|
||||
}
|
||||
|
||||
/// Calculate output scale based on input scales
|
||||
fn out_scale(&self, _: Vec<crate::Scale>) -> Result<crate::Scale, CircuitError> {
|
||||
Ok(self.target_scale)
|
||||
}
|
||||
|
||||
/// Layout the operation in the circuit
|
||||
fn layout(
|
||||
&self,
|
||||
config: &mut crate::circuit::BaseConfig<Fp>,
|
||||
@@ -222,34 +283,40 @@ impl Op<Fp> for RebaseScale {
|
||||
self.rebase_op.layout(config, region, &[original_res])
|
||||
}
|
||||
|
||||
/// Create a cloned boxed copy of this operation
|
||||
fn clone_dyn(&self) -> Box<dyn Op<Fp>> {
|
||||
Box::new(self.clone()) // Forward to the derive(Clone) impl
|
||||
Box::new(self.clone())
|
||||
}
|
||||
}
|
||||
|
||||
/// A single operation in a [crate::graph::Model].
|
||||
/// Represents all supported operation types in the circuit
|
||||
/// Each variant encapsulates a different type of operation with specific behavior
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
pub enum SupportedOp {
|
||||
/// A linear operation.
|
||||
/// Linear operations (polynomial-based)
|
||||
Linear(PolyOp),
|
||||
/// A nonlinear operation.
|
||||
/// Nonlinear operations requiring lookup tables
|
||||
Nonlinear(LookupOp),
|
||||
/// A hybrid operation.
|
||||
/// Mixed operations combining different approaches
|
||||
Hybrid(HybridOp),
|
||||
///
|
||||
/// Input values to the circuit
|
||||
Input(Input),
|
||||
///
|
||||
/// Constant values in the circuit
|
||||
Constant(Constant<Fp>),
|
||||
///
|
||||
/// Placeholder for unsupported operations
|
||||
Unknown(Unknown),
|
||||
///
|
||||
/// Operations requiring rescaling of inputs
|
||||
Rescaled(Rescaled),
|
||||
///
|
||||
/// Operations requiring scale rebasing
|
||||
RebaseScale(RebaseScale),
|
||||
}
|
||||
|
||||
impl SupportedOp {
|
||||
/// Checks if the operation is a lookup operation
|
||||
///
|
||||
/// # Returns
|
||||
/// * `true` if operation requires lookup table
|
||||
/// * `false` otherwise
|
||||
pub fn is_lookup(&self) -> bool {
|
||||
match self {
|
||||
SupportedOp::Nonlinear(_) => true,
|
||||
@@ -257,7 +324,12 @@ impl SupportedOp {
|
||||
_ => false,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns input operation if this is an input
|
||||
///
|
||||
/// # Returns
|
||||
/// * `Some(Input)` if this is an input operation
|
||||
/// * `None` otherwise
|
||||
pub fn get_input(&self) -> Option<Input> {
|
||||
match self {
|
||||
SupportedOp::Input(op) => Some(op.clone()),
|
||||
@@ -265,7 +337,11 @@ impl SupportedOp {
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns reference to rebased operation if this is a rebased operation
|
||||
///
|
||||
/// # Returns
|
||||
/// * `Some(&RebaseScale)` if this is a rebased operation
|
||||
/// * `None` otherwise
|
||||
pub fn get_rebased(&self) -> Option<&RebaseScale> {
|
||||
match self {
|
||||
SupportedOp::RebaseScale(op) => Some(op),
|
||||
@@ -273,7 +349,11 @@ impl SupportedOp {
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns reference to lookup operation if this is a lookup operation
|
||||
///
|
||||
/// # Returns
|
||||
/// * `Some(&LookupOp)` if this is a lookup operation
|
||||
/// * `None` otherwise
|
||||
pub fn get_lookup(&self) -> Option<&LookupOp> {
|
||||
match self {
|
||||
SupportedOp::Nonlinear(op) => Some(op),
|
||||
@@ -281,7 +361,11 @@ impl SupportedOp {
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns reference to constant if this is a constant
|
||||
///
|
||||
/// # Returns
|
||||
/// * `Some(&Constant)` if this is a constant
|
||||
/// * `None` otherwise
|
||||
pub fn get_constant(&self) -> Option<&Constant<Fp>> {
|
||||
match self {
|
||||
SupportedOp::Constant(op) => Some(op),
|
||||
@@ -289,7 +373,11 @@ impl SupportedOp {
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns mutable reference to constant if this is a constant
|
||||
///
|
||||
/// # Returns
|
||||
/// * `Some(&mut Constant)` if this is a constant
|
||||
/// * `None` otherwise
|
||||
pub fn get_mutable_constant(&mut self) -> Option<&mut Constant<Fp>> {
|
||||
match self {
|
||||
SupportedOp::Constant(op) => Some(op),
|
||||
@@ -297,18 +385,19 @@ impl SupportedOp {
|
||||
}
|
||||
}
|
||||
|
||||
/// Creates a homogeneously rescaled version of this operation if needed
|
||||
/// Only available with EZKL feature enabled
|
||||
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
|
||||
fn homogenous_rescale(
|
||||
&self,
|
||||
in_scales: Vec<crate::Scale>,
|
||||
) -> Result<Box<dyn Op<Fp>>, GraphError> {
|
||||
let inputs_to_scale = self.requires_homogenous_input_scales();
|
||||
// creates a rescaled op if the inputs are not homogenous
|
||||
let op = self.clone_dyn();
|
||||
super::homogenize_input_scales(op, in_scales, inputs_to_scale)
|
||||
}
|
||||
|
||||
/// Since each associated value of `SupportedOp` implements `Op`, let's define a helper method to retrieve it.
|
||||
/// Returns reference to underlying Op implementation
|
||||
fn as_op(&self) -> &dyn Op<Fp> {
|
||||
match self {
|
||||
SupportedOp::Linear(op) => op,
|
||||
@@ -322,9 +411,10 @@ impl SupportedOp {
|
||||
}
|
||||
}
|
||||
|
||||
/// check if is the identity operation
|
||||
/// Checks if this is an identity operation
|
||||
///
|
||||
/// # Returns
|
||||
/// * `true` if the operation is the identity operation
|
||||
/// * `true` if this operation passes input through unchanged
|
||||
/// * `false` otherwise
|
||||
pub fn is_identity(&self) -> bool {
|
||||
match self {
|
||||
@@ -361,9 +451,11 @@ impl From<Box<dyn Op<Fp>>> for SupportedOp {
|
||||
if let Some(op) = value.as_any().downcast_ref::<Unknown>() {
|
||||
return SupportedOp::Unknown(op.clone());
|
||||
};
|
||||
|
||||
if let Some(op) = value.as_any().downcast_ref::<Rescaled>() {
|
||||
return SupportedOp::Rescaled(op.clone());
|
||||
};
|
||||
|
||||
if let Some(op) = value.as_any().downcast_ref::<RebaseScale>() {
|
||||
return SupportedOp::RebaseScale(op.clone());
|
||||
};
|
||||
@@ -375,6 +467,7 @@ impl From<Box<dyn Op<Fp>>> for SupportedOp {
|
||||
}
|
||||
|
||||
impl Op<Fp> for SupportedOp {
|
||||
/// Layout this operation in the circuit
|
||||
fn layout(
|
||||
&self,
|
||||
config: &mut crate::circuit::BaseConfig<Fp>,
|
||||
@@ -384,54 +477,61 @@ impl Op<Fp> for SupportedOp {
|
||||
self.as_op().layout(config, region, values)
|
||||
}
|
||||
|
||||
/// Check if this is an input operation
|
||||
fn is_input(&self) -> bool {
|
||||
self.as_op().is_input()
|
||||
}
|
||||
|
||||
/// Check if this is a constant operation
|
||||
fn is_constant(&self) -> bool {
|
||||
self.as_op().is_constant()
|
||||
}
|
||||
|
||||
/// Get which inputs require homogeneous scales
|
||||
fn requires_homogenous_input_scales(&self) -> Vec<usize> {
|
||||
self.as_op().requires_homogenous_input_scales()
|
||||
}
|
||||
|
||||
/// Create a clone of this operation
|
||||
fn clone_dyn(&self) -> Box<dyn Op<Fp>> {
|
||||
self.as_op().clone_dyn()
|
||||
}
|
||||
|
||||
/// Get string representation
|
||||
fn as_string(&self) -> String {
|
||||
self.as_op().as_string()
|
||||
}
|
||||
|
||||
/// Convert to Any type
|
||||
fn as_any(&self) -> &dyn std::any::Any {
|
||||
self
|
||||
}
|
||||
|
||||
/// Calculate output scale from input scales
|
||||
fn out_scale(&self, in_scales: Vec<crate::Scale>) -> Result<crate::Scale, CircuitError> {
|
||||
self.as_op().out_scale(in_scales)
|
||||
}
|
||||
}
|
||||
|
||||
/// A node's input is a tensor from another node's output.
|
||||
/// Represents a connection to another node's output
|
||||
/// First element is node index, second is output slot index
|
||||
pub type Outlet = (usize, usize);
|
||||
|
||||
/// A single operation in a [crate::graph::Model].
|
||||
/// Represents a single computational node in the circuit graph
|
||||
/// Contains all information needed to execute and connect operations
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
pub struct Node {
|
||||
/// [Op] i.e what operation this node represents.
|
||||
/// The operation this node performs
|
||||
pub opkind: SupportedOp,
|
||||
/// The denominator in the fixed point representation for the node's output. Tensors of differing scales should not be combined.
|
||||
/// Fixed point scale factor for this node's output
|
||||
pub out_scale: i32,
|
||||
// Usually there is a simple in and out shape of the node as an operator. For example, an Affine node has three input_shapes (one for the input, weight, and bias),
|
||||
// but in_dim is [in], out_dim is [out]
|
||||
/// The indices of the node's inputs.
|
||||
/// Connections to other nodes' outputs that serve as inputs
|
||||
pub inputs: Vec<Outlet>,
|
||||
/// Dimensions of output.
|
||||
/// Shape of this node's output tensor
|
||||
pub out_dims: Vec<usize>,
|
||||
/// The node's unique identifier.
|
||||
/// Unique identifier for this node
|
||||
pub idx: usize,
|
||||
/// The node's num of uses
|
||||
/// Number of times this node's output is used
|
||||
pub num_uses: usize,
|
||||
}
|
||||
|
||||
@@ -469,12 +569,19 @@ impl PartialEq for Node {
|
||||
}
|
||||
|
||||
impl Node {
|
||||
/// Converts a tract [OnnxNode] into an ezkl [Node].
|
||||
/// # Arguments:
|
||||
/// * `node` - [OnnxNode]
|
||||
/// * `other_nodes` - [BTreeMap] of other previously initialized [Node]s in the computational graph.
|
||||
/// * `public_params` - flag if parameters of model are public
|
||||
/// * `idx` - The node's unique identifier.
|
||||
/// Creates a new Node from an ONNX node
|
||||
/// Only available when EZKL feature is enabled
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `node` - Source ONNX node
|
||||
/// * `other_nodes` - Map of existing nodes in the graph
|
||||
/// * `scales` - Scale factors for variables
|
||||
/// * `idx` - Unique identifier for this node
|
||||
/// * `symbol_values` - ONNX symbol values
|
||||
/// * `run_args` - Runtime configuration arguments
|
||||
///
|
||||
/// # Returns
|
||||
/// New Node instance or error if creation fails
|
||||
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub fn new(
|
||||
@@ -612,16 +719,14 @@ impl Node {
|
||||
})
|
||||
}
|
||||
|
||||
/// check if it is a softmax node
|
||||
/// Check if this node performs softmax operation
|
||||
pub fn is_softmax(&self) -> bool {
|
||||
if let SupportedOp::Hybrid(HybridOp::Softmax { .. }) = self.opkind {
|
||||
true
|
||||
} else {
|
||||
false
|
||||
}
|
||||
matches!(self.opkind, SupportedOp::Hybrid(HybridOp::Softmax { .. }))
|
||||
}
|
||||
}
|
||||
|
||||
/// Helper function to rescale constants that are only used once
|
||||
/// Only available when EZKL feature is enabled
|
||||
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
|
||||
fn rescale_const_with_single_use(
|
||||
constant: &mut Constant<Fp>,
|
||||
|
||||
@@ -1,14 +1,14 @@
|
||||
use super::errors::GraphError;
|
||||
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
|
||||
use super::VarScales;
|
||||
use super::errors::GraphError;
|
||||
use super::{Rescaled, SupportedOp, Visibility};
|
||||
use crate::circuit::Op;
|
||||
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
|
||||
use crate::circuit::hybrid::HybridOp;
|
||||
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
|
||||
use crate::circuit::lookup::LookupOp;
|
||||
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
|
||||
use crate::circuit::poly::PolyOp;
|
||||
use crate::circuit::Op;
|
||||
use crate::fieldutils::IntegerRep;
|
||||
use crate::tensor::{Tensor, TensorError, TensorType};
|
||||
use halo2curves::bn256::Fr as Fp;
|
||||
@@ -22,6 +22,7 @@ use std::sync::Arc;
|
||||
use tract_onnx::prelude::{DatumType, Node as OnnxNode, TypedFact, TypedOp};
|
||||
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
|
||||
use tract_onnx::tract_core::ops::{
|
||||
Downsample,
|
||||
array::{
|
||||
Gather, GatherElements, GatherNd, MultiBroadcastTo, OneHot, ScatterElements, ScatterNd,
|
||||
Slice, Topk,
|
||||
@@ -31,7 +32,6 @@ use tract_onnx::tract_core::ops::{
|
||||
einsum::EinSum,
|
||||
element_wise::ElementWiseOp,
|
||||
nn::{LeakyRelu, Reduce, Softmax},
|
||||
Downsample,
|
||||
};
|
||||
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
|
||||
use tract_onnx::tract_hir::{
|
||||
@@ -39,16 +39,15 @@ use tract_onnx::tract_hir::{
|
||||
ops::array::{Pad, PadMode, TypedConcat},
|
||||
ops::cnn::PoolSpec,
|
||||
ops::konst::Const,
|
||||
ops::nn::DataFormat,
|
||||
tract_core::ops::cast::Cast,
|
||||
tract_core::ops::cnn::{conv::KernelFormat, MaxPool, SumPool},
|
||||
tract_core::ops::cnn::{MaxPool, SumPool},
|
||||
};
|
||||
|
||||
/// Quantizes an iterable of f32s to a [Tensor] of i32s using a fixed point representation.
|
||||
/// Quantizes an iterable of f64 to a [Tensor] of IntegerRep using a fixed point representation.
|
||||
/// NAN gets mapped to 0. INFINITY and NEG_INFINITY error out.
|
||||
/// Arguments
|
||||
///
|
||||
/// * `vec` - the vector to quantize.
|
||||
/// * `dims` - the dimensionality of the resulting [Tensor].
|
||||
/// * `elem` - the element to quantize.
|
||||
/// * `shift` - offset used in the fixed point representation.
|
||||
/// * `scale` - `2^scale` used in the fixed point representation.
|
||||
pub fn quantize_float(
|
||||
@@ -59,7 +58,7 @@ pub fn quantize_float(
|
||||
let mult = scale_to_multiplier(scale);
|
||||
let max_value = ((IntegerRep::MAX as f64 - shift) / mult).round(); // the maximum value that can be represented w/o sig bit truncation
|
||||
|
||||
if *elem > max_value {
|
||||
if *elem > max_value || *elem < -max_value {
|
||||
return Err(TensorError::SigBitTruncationError);
|
||||
}
|
||||
|
||||
@@ -85,7 +84,7 @@ pub fn scale_to_multiplier(scale: crate::Scale) -> f64 {
|
||||
f64::powf(2., scale as f64)
|
||||
}
|
||||
|
||||
/// Converts a scale (log base 2) to a fixed point multiplier.
|
||||
/// Converts a fixed point multiplier to a scale (log base 2).
|
||||
pub fn multiplier_to_scale(mult: f64) -> crate::Scale {
|
||||
mult.log2().round() as crate::Scale
|
||||
}
|
||||
@@ -228,10 +227,7 @@ pub fn extract_tensor_value(
|
||||
.iter()
|
||||
.map(|x| match x.to_i64() {
|
||||
Ok(v) => Ok(v as f32),
|
||||
Err(_) => match x.to_i64() {
|
||||
Ok(v) => Ok(v as f32),
|
||||
Err(_) => Err(GraphError::UnsupportedDataType(0, "TDim".to_string())),
|
||||
},
|
||||
Err(_) => Err(GraphError::UnsupportedDataType(0, "TDim".to_string())),
|
||||
})
|
||||
.collect();
|
||||
|
||||
@@ -277,11 +273,9 @@ pub fn new_op_from_onnx(
|
||||
symbol_values: &SymbolValues,
|
||||
run_args: &crate::RunArgs,
|
||||
) -> Result<(SupportedOp, Vec<usize>), GraphError> {
|
||||
use std::f64::consts::E;
|
||||
|
||||
use tract_onnx::tract_core::ops::array::Trilu;
|
||||
|
||||
use crate::circuit::InputType;
|
||||
use std::f64::consts::E;
|
||||
use tract_onnx::tract_core::ops::array::Trilu;
|
||||
|
||||
let input_scales = inputs
|
||||
.iter()
|
||||
@@ -312,6 +306,9 @@ pub fn new_op_from_onnx(
|
||||
let mut deleted_indices = vec![];
|
||||
let node = match node.op().name().as_ref() {
|
||||
"ShiftLeft" => {
|
||||
if inputs.len() != 2 {
|
||||
return Err(GraphError::InvalidDims(idx, "shift left".to_string()));
|
||||
};
|
||||
// load shift amount
|
||||
if let Some(c) = inputs[1].opkind().get_mutable_constant() {
|
||||
inputs[1].decrement_use();
|
||||
@@ -324,10 +321,13 @@ pub fn new_op_from_onnx(
|
||||
out_scale: Some(input_scales[0] - raw_values[0] as i32),
|
||||
})
|
||||
} else {
|
||||
return Err(GraphError::OpMismatch(idx, "ShiftLeft".to_string()));
|
||||
return Err(GraphError::OpMismatch(idx, "shift left".to_string()));
|
||||
}
|
||||
}
|
||||
"ShiftRight" => {
|
||||
if inputs.len() != 2 {
|
||||
return Err(GraphError::InvalidDims(idx, "shift right".to_string()));
|
||||
};
|
||||
// load shift amount
|
||||
if let Some(c) = inputs[1].opkind().get_mutable_constant() {
|
||||
inputs[1].decrement_use();
|
||||
@@ -340,7 +340,7 @@ pub fn new_op_from_onnx(
|
||||
out_scale: Some(input_scales[0] + raw_values[0] as i32),
|
||||
})
|
||||
} else {
|
||||
return Err(GraphError::OpMismatch(idx, "ShiftRight".to_string()));
|
||||
return Err(GraphError::OpMismatch(idx, "shift right".to_string()));
|
||||
}
|
||||
}
|
||||
"MultiBroadcastTo" => {
|
||||
@@ -363,7 +363,10 @@ pub fn new_op_from_onnx(
|
||||
}
|
||||
}
|
||||
|
||||
assert_eq!(input_ops.len(), 3, "Range requires 3 inputs");
|
||||
if input_ops.len() != 3 {
|
||||
return Err(GraphError::InvalidDims(idx, "range".to_string()));
|
||||
}
|
||||
|
||||
let input_ops = input_ops
|
||||
.iter()
|
||||
.map(|x| x.get_constant().ok_or(GraphError::NonConstantRange))
|
||||
@@ -378,7 +381,11 @@ pub fn new_op_from_onnx(
|
||||
// Quantize the raw value (integers)
|
||||
let quantized_value = quantize_tensor(raw_value.clone(), 0, &Visibility::Fixed)?;
|
||||
|
||||
let c = crate::circuit::ops::Constant::new(quantized_value, raw_value);
|
||||
let c = crate::circuit::ops::Constant::new(
|
||||
quantized_value,
|
||||
raw_value,
|
||||
!run_args.ignore_range_check_inputs_outputs,
|
||||
);
|
||||
// Create a constant op
|
||||
SupportedOp::Constant(c)
|
||||
}
|
||||
@@ -419,6 +426,10 @@ pub fn new_op_from_onnx(
|
||||
if let Some(c) = inputs[1].opkind().get_mutable_constant() {
|
||||
inputs[1].decrement_use();
|
||||
deleted_indices.push(inputs.len() - 1);
|
||||
if inputs[0].out_dims().is_empty() || inputs[0].out_dims()[0].len() <= axis {
|
||||
return Err(GraphError::InvalidDims(idx, "gather".to_string()));
|
||||
}
|
||||
|
||||
op = SupportedOp::Hybrid(crate::circuit::ops::hybrid::HybridOp::Gather {
|
||||
dim: axis,
|
||||
constant_idx: Some(c.raw_values.map(|x| {
|
||||
@@ -436,6 +447,7 @@ pub fn new_op_from_onnx(
|
||||
inputs[1].replace_opkind(SupportedOp::Input(crate::circuit::ops::Input {
|
||||
scale: 0,
|
||||
datum_type: InputType::TDim,
|
||||
decomp: false,
|
||||
}));
|
||||
inputs[1].bump_scale(0);
|
||||
}
|
||||
@@ -447,8 +459,17 @@ pub fn new_op_from_onnx(
|
||||
"Topk" => {
|
||||
let op = load_op::<Topk>(node.op(), idx, node.op().name().to_string())?;
|
||||
let axis = op.axis;
|
||||
|
||||
if inputs.len() != 2 {
|
||||
return Err(GraphError::InvalidDims(idx, "topk".to_string()));
|
||||
};
|
||||
|
||||
// if param_visibility.is_public() {
|
||||
let k = if let Some(c) = inputs[1].opkind().get_mutable_constant() {
|
||||
if c.raw_values.len() != 1 {
|
||||
return Err(GraphError::InvalidDims(idx, "topk".to_string()));
|
||||
}
|
||||
|
||||
inputs[1].decrement_use();
|
||||
deleted_indices.push(inputs.len() - 1);
|
||||
c.raw_values.map(|x| x as usize)[0]
|
||||
@@ -488,6 +509,10 @@ pub fn new_op_from_onnx(
|
||||
if let Some(c) = inputs[1].opkind().get_mutable_constant() {
|
||||
inputs[1].decrement_use();
|
||||
deleted_indices.push(1);
|
||||
if c.raw_values.is_empty() {
|
||||
return Err(GraphError::InvalidDims(idx, "scatter elements".to_string()));
|
||||
}
|
||||
|
||||
op = SupportedOp::Linear(crate::circuit::ops::poly::PolyOp::ScatterElements {
|
||||
dim: axis,
|
||||
constant_idx: Some(c.raw_values.map(|x| x as usize)),
|
||||
@@ -499,6 +524,7 @@ pub fn new_op_from_onnx(
|
||||
inputs[1].replace_opkind(SupportedOp::Input(crate::circuit::ops::Input {
|
||||
scale: 0,
|
||||
datum_type: InputType::TDim,
|
||||
decomp: !run_args.ignore_range_check_inputs_outputs,
|
||||
}));
|
||||
inputs[1].bump_scale(0);
|
||||
}
|
||||
@@ -522,6 +548,9 @@ pub fn new_op_from_onnx(
|
||||
if let Some(c) = inputs[1].opkind().get_mutable_constant() {
|
||||
inputs[1].decrement_use();
|
||||
deleted_indices.push(1);
|
||||
if c.raw_values.is_empty() {
|
||||
return Err(GraphError::InvalidDims(idx, "scatter nd".to_string()));
|
||||
}
|
||||
op = SupportedOp::Linear(crate::circuit::ops::poly::PolyOp::ScatterND {
|
||||
constant_idx: Some(c.raw_values.map(|x| x as usize)),
|
||||
})
|
||||
@@ -532,6 +561,7 @@ pub fn new_op_from_onnx(
|
||||
inputs[1].replace_opkind(SupportedOp::Input(crate::circuit::ops::Input {
|
||||
scale: 0,
|
||||
datum_type: InputType::TDim,
|
||||
decomp: !run_args.ignore_range_check_inputs_outputs,
|
||||
}));
|
||||
inputs[1].bump_scale(0);
|
||||
}
|
||||
@@ -555,6 +585,9 @@ pub fn new_op_from_onnx(
|
||||
if let Some(c) = inputs[1].opkind().get_mutable_constant() {
|
||||
inputs[1].decrement_use();
|
||||
deleted_indices.push(1);
|
||||
if c.raw_values.is_empty() {
|
||||
return Err(GraphError::InvalidDims(idx, "gather nd".to_string()));
|
||||
}
|
||||
op = SupportedOp::Linear(crate::circuit::ops::poly::PolyOp::GatherND {
|
||||
batch_dims,
|
||||
indices: Some(c.raw_values.map(|x| x as usize)),
|
||||
@@ -566,6 +599,7 @@ pub fn new_op_from_onnx(
|
||||
inputs[1].replace_opkind(SupportedOp::Input(crate::circuit::ops::Input {
|
||||
scale: 0,
|
||||
datum_type: InputType::TDim,
|
||||
decomp: !run_args.ignore_range_check_inputs_outputs,
|
||||
}));
|
||||
inputs[1].bump_scale(0);
|
||||
}
|
||||
@@ -589,6 +623,9 @@ pub fn new_op_from_onnx(
|
||||
if let Some(c) = inputs[1].opkind().get_mutable_constant() {
|
||||
inputs[1].decrement_use();
|
||||
deleted_indices.push(1);
|
||||
if c.raw_values.is_empty() {
|
||||
return Err(GraphError::InvalidDims(idx, "gather elements".to_string()));
|
||||
}
|
||||
op = SupportedOp::Linear(crate::circuit::ops::poly::PolyOp::GatherElements {
|
||||
dim: axis,
|
||||
constant_idx: Some(c.raw_values.map(|x| x as usize)),
|
||||
@@ -600,6 +637,7 @@ pub fn new_op_from_onnx(
|
||||
inputs[1].replace_opkind(SupportedOp::Input(crate::circuit::ops::Input {
|
||||
scale: 0,
|
||||
datum_type: InputType::TDim,
|
||||
decomp: !run_args.ignore_range_check_inputs_outputs,
|
||||
}));
|
||||
inputs[1].bump_scale(0);
|
||||
}
|
||||
@@ -674,7 +712,11 @@ pub fn new_op_from_onnx(
|
||||
constant_scale,
|
||||
&run_args.param_visibility,
|
||||
)?;
|
||||
let c = crate::circuit::ops::Constant::new(quantized_value, raw_value);
|
||||
let c = crate::circuit::ops::Constant::new(
|
||||
quantized_value,
|
||||
raw_value,
|
||||
run_args.ignore_range_check_inputs_outputs,
|
||||
);
|
||||
// Create a constant op
|
||||
SupportedOp::Constant(c)
|
||||
}
|
||||
@@ -684,7 +726,9 @@ pub fn new_op_from_onnx(
|
||||
};
|
||||
let op = load_op::<Reduce>(node.op(), idx, node.op().name().to_string())?;
|
||||
let axes: Vec<usize> = op.axes.into_iter().collect();
|
||||
assert_eq!(axes.len(), 1, "only support argmax over one axis");
|
||||
if axes.len() != 1 {
|
||||
return Err(GraphError::InvalidDims(idx, "argmax".to_string()));
|
||||
}
|
||||
|
||||
SupportedOp::Hybrid(HybridOp::ReduceArgMax { dim: axes[0] })
|
||||
}
|
||||
@@ -694,7 +738,9 @@ pub fn new_op_from_onnx(
|
||||
};
|
||||
let op = load_op::<Reduce>(node.op(), idx, node.op().name().to_string())?;
|
||||
let axes: Vec<usize> = op.axes.into_iter().collect();
|
||||
assert_eq!(axes.len(), 1, "only support argmin over one axis");
|
||||
if axes.len() != 1 {
|
||||
return Err(GraphError::InvalidDims(idx, "argmin".to_string()));
|
||||
}
|
||||
|
||||
SupportedOp::Hybrid(HybridOp::ReduceArgMin { dim: axes[0] })
|
||||
}
|
||||
@@ -803,6 +849,9 @@ pub fn new_op_from_onnx(
|
||||
}
|
||||
}
|
||||
"Recip" => {
|
||||
if inputs.len() != 1 {
|
||||
return Err(GraphError::InvalidDims(idx, "recip".to_string()));
|
||||
};
|
||||
let in_scale = input_scales[0];
|
||||
let max_scale = std::cmp::max(scales.get_max(), in_scale);
|
||||
// If the input scale is larger than the params scale
|
||||
@@ -846,6 +895,9 @@ pub fn new_op_from_onnx(
|
||||
scale: scale_to_multiplier(input_scales[0]).into(),
|
||||
}),
|
||||
"Rsqrt" => {
|
||||
if input_scales.len() != 1 {
|
||||
return Err(GraphError::InvalidDims(idx, "rsqrt".to_string()));
|
||||
};
|
||||
let in_scale = input_scales[0];
|
||||
let max_scale = std::cmp::max(scales.get_max(), in_scale);
|
||||
SupportedOp::Hybrid(HybridOp::Rsqrt {
|
||||
@@ -927,13 +979,19 @@ pub fn new_op_from_onnx(
|
||||
DatumType::F64 => (scales.input, InputType::F64),
|
||||
_ => return Err(GraphError::UnsupportedDataType(idx, format!("{:?}", dt))),
|
||||
};
|
||||
SupportedOp::Input(crate::circuit::ops::Input { scale, datum_type })
|
||||
SupportedOp::Input(crate::circuit::ops::Input {
|
||||
scale,
|
||||
datum_type,
|
||||
decomp: !run_args.ignore_range_check_inputs_outputs,
|
||||
})
|
||||
}
|
||||
"Cast" => {
|
||||
let op = load_op::<Cast>(node.op(), idx, node.op().name().to_string())?;
|
||||
let dt = op.to;
|
||||
|
||||
assert_eq!(input_scales.len(), 1);
|
||||
if input_scales.len() != 1 {
|
||||
return Err(GraphError::InvalidDims(idx, "cast".to_string()));
|
||||
};
|
||||
|
||||
match dt {
|
||||
DatumType::Bool
|
||||
@@ -983,6 +1041,11 @@ pub fn new_op_from_onnx(
|
||||
|
||||
if const_idx.len() == 1 {
|
||||
let const_idx = const_idx[0];
|
||||
|
||||
if inputs.len() <= const_idx {
|
||||
return Err(GraphError::InvalidDims(idx, "mul".to_string()));
|
||||
}
|
||||
|
||||
if let Some(c) = inputs[const_idx].opkind().get_mutable_constant() {
|
||||
if c.raw_values.len() == 1 && c.raw_values[0] < 1. {
|
||||
// if not divisible by 2 then we need to add a range check
|
||||
@@ -1057,6 +1120,9 @@ pub fn new_op_from_onnx(
|
||||
return Err(GraphError::OpMismatch(idx, "softmax".to_string()));
|
||||
}
|
||||
};
|
||||
if input_scales.len() != 1 {
|
||||
return Err(GraphError::InvalidDims(idx, "softmax".to_string()));
|
||||
}
|
||||
|
||||
let in_scale = input_scales[0];
|
||||
let max_scale = std::cmp::max(scales.get_max(), in_scale);
|
||||
@@ -1079,13 +1145,6 @@ pub fn new_op_from_onnx(
|
||||
|
||||
let pool_spec: &PoolSpec = &sumpool_node.pool_spec;
|
||||
|
||||
// only support pytorch type formatting for now
|
||||
if pool_spec.data_format != DataFormat::NCHW {
|
||||
return Err(GraphError::MissingParams(
|
||||
"data in wrong format".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
let stride = extract_strides(pool_spec)?;
|
||||
let padding = extract_padding(pool_spec, &input_dims[0])?;
|
||||
let kernel_shape = &pool_spec.kernel_shape;
|
||||
@@ -1094,24 +1153,45 @@ pub fn new_op_from_onnx(
|
||||
padding,
|
||||
stride: stride.to_vec(),
|
||||
pool_dims: kernel_shape.to_vec(),
|
||||
data_format: pool_spec.data_format.into(),
|
||||
})
|
||||
}
|
||||
"Ceil" => {
|
||||
if input_scales.len() != 1 {
|
||||
return Err(GraphError::InvalidDims(idx, "ceil".to_string()));
|
||||
}
|
||||
SupportedOp::Hybrid(HybridOp::Ceil {
|
||||
scale: scale_to_multiplier(input_scales[0]).into(),
|
||||
legs: run_args.decomp_legs,
|
||||
})
|
||||
}
|
||||
"Floor" => {
|
||||
if input_scales.len() != 1 {
|
||||
return Err(GraphError::InvalidDims(idx, "floor".to_string()));
|
||||
}
|
||||
SupportedOp::Hybrid(HybridOp::Floor {
|
||||
scale: scale_to_multiplier(input_scales[0]).into(),
|
||||
legs: run_args.decomp_legs,
|
||||
})
|
||||
}
|
||||
"Round" => {
|
||||
if input_scales.len() != 1 {
|
||||
return Err(GraphError::InvalidDims(idx, "round".to_string()));
|
||||
}
|
||||
SupportedOp::Hybrid(HybridOp::Round {
|
||||
scale: scale_to_multiplier(input_scales[0]).into(),
|
||||
legs: run_args.decomp_legs,
|
||||
})
|
||||
}
|
||||
"RoundHalfToEven" => {
|
||||
if input_scales.len() != 1 {
|
||||
return Err(GraphError::InvalidDims(idx, "roundhalftoeven".to_string()));
|
||||
}
|
||||
SupportedOp::Hybrid(HybridOp::RoundHalfToEven {
|
||||
scale: scale_to_multiplier(input_scales[0]).into(),
|
||||
legs: run_args.decomp_legs,
|
||||
})
|
||||
}
|
||||
"Ceil" => SupportedOp::Hybrid(HybridOp::Ceil {
|
||||
scale: scale_to_multiplier(input_scales[0]).into(),
|
||||
legs: run_args.decomp_legs,
|
||||
}),
|
||||
"Floor" => SupportedOp::Hybrid(HybridOp::Floor {
|
||||
scale: scale_to_multiplier(input_scales[0]).into(),
|
||||
legs: run_args.decomp_legs,
|
||||
}),
|
||||
"Round" => SupportedOp::Hybrid(HybridOp::Round {
|
||||
scale: scale_to_multiplier(input_scales[0]).into(),
|
||||
legs: run_args.decomp_legs,
|
||||
}),
|
||||
"RoundHalfToEven" => SupportedOp::Hybrid(HybridOp::RoundHalfToEven {
|
||||
scale: scale_to_multiplier(input_scales[0]).into(),
|
||||
legs: run_args.decomp_legs,
|
||||
}),
|
||||
"Sign" => SupportedOp::Linear(PolyOp::Sign),
|
||||
"Pow" => {
|
||||
// Extract the slope layer hyperparams from a const
|
||||
@@ -1121,7 +1201,9 @@ pub fn new_op_from_onnx(
|
||||
inputs[1].decrement_use();
|
||||
deleted_indices.push(1);
|
||||
if c.raw_values.len() > 1 {
|
||||
unimplemented!("only support scalar pow")
|
||||
return Err(GraphError::NonScalarPower);
|
||||
} else if c.raw_values.is_empty() {
|
||||
return Err(GraphError::InvalidDims(idx, "pow".to_string()));
|
||||
}
|
||||
|
||||
let exponent = c.raw_values[0];
|
||||
@@ -1138,7 +1220,9 @@ pub fn new_op_from_onnx(
|
||||
inputs[0].decrement_use();
|
||||
deleted_indices.push(0);
|
||||
if c.raw_values.len() > 1 {
|
||||
unimplemented!("only support scalar base")
|
||||
return Err(GraphError::NonScalarBase);
|
||||
} else if c.raw_values.is_empty() {
|
||||
return Err(GraphError::InvalidDims(idx, "pow".to_string()));
|
||||
}
|
||||
|
||||
let base = c.raw_values[0];
|
||||
@@ -1148,10 +1232,14 @@ pub fn new_op_from_onnx(
|
||||
base: base.into(),
|
||||
})
|
||||
} else {
|
||||
unimplemented!("only support constant base or pow for now")
|
||||
return Err(GraphError::InvalidDims(idx, "pow".to_string()));
|
||||
}
|
||||
}
|
||||
"Div" => {
|
||||
if inputs.len() != 2 {
|
||||
return Err(GraphError::InvalidDims(idx, "div".to_string()));
|
||||
}
|
||||
|
||||
let const_idx = inputs
|
||||
.iter()
|
||||
.enumerate()
|
||||
@@ -1159,14 +1247,15 @@ pub fn new_op_from_onnx(
|
||||
.map(|(i, _)| i)
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
if const_idx.len() > 1 {
|
||||
if const_idx.len() > 1 || const_idx.is_empty() {
|
||||
return Err(GraphError::InvalidDims(idx, "div".to_string()));
|
||||
}
|
||||
|
||||
let const_idx = const_idx[0];
|
||||
|
||||
if const_idx != 1 {
|
||||
unimplemented!("only support div with constant as second input")
|
||||
return Err(GraphError::MisformedParams(
|
||||
"only support div with constant as second input".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
if let Some(c) = inputs[const_idx].opkind().get_mutable_constant() {
|
||||
@@ -1176,14 +1265,28 @@ pub fn new_op_from_onnx(
|
||||
// get the non constant index
|
||||
let denom = c.raw_values[0];
|
||||
|
||||
SupportedOp::Hybrid(HybridOp::Div {
|
||||
let op = SupportedOp::Hybrid(HybridOp::Div {
|
||||
denom: denom.into(),
|
||||
})
|
||||
});
|
||||
|
||||
// if the input is scale 0 we re up to the max scale
|
||||
if input_scales[0] == 0 {
|
||||
SupportedOp::Rescaled(Rescaled {
|
||||
inner: Box::new(op),
|
||||
scale: vec![(0, scale_to_multiplier(scales.get_max()) as u128)],
|
||||
})
|
||||
} else {
|
||||
op
|
||||
}
|
||||
} else {
|
||||
unimplemented!("only support non zero divisors of size 1")
|
||||
return Err(GraphError::MisformedParams(
|
||||
"only support non zero divisors of size 1".to_string(),
|
||||
));
|
||||
}
|
||||
} else {
|
||||
unimplemented!("only support div with constant as second input")
|
||||
return Err(GraphError::MisformedParams(
|
||||
"only support div with constant as second input".to_string(),
|
||||
));
|
||||
}
|
||||
}
|
||||
"Cube" => SupportedOp::Linear(PolyOp::Pow(3)),
|
||||
@@ -1204,15 +1307,6 @@ pub fn new_op_from_onnx(
|
||||
}
|
||||
}
|
||||
|
||||
if ((conv_node.pool_spec.data_format != DataFormat::NCHW)
|
||||
&& (conv_node.pool_spec.data_format != DataFormat::CHW))
|
||||
|| (conv_node.kernel_fmt != KernelFormat::OIHW)
|
||||
{
|
||||
return Err(GraphError::MisformedParams(
|
||||
"data or kernel in wrong format".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
let pool_spec = &conv_node.pool_spec;
|
||||
|
||||
let stride = extract_strides(pool_spec)?;
|
||||
@@ -1240,6 +1334,8 @@ pub fn new_op_from_onnx(
|
||||
padding,
|
||||
stride,
|
||||
group,
|
||||
data_format: conv_node.pool_spec.data_format.into(),
|
||||
kernel_format: conv_node.kernel_fmt.into(),
|
||||
})
|
||||
}
|
||||
"Not" => SupportedOp::Linear(PolyOp::Not),
|
||||
@@ -1263,14 +1359,6 @@ pub fn new_op_from_onnx(
|
||||
}
|
||||
}
|
||||
|
||||
if (deconv_node.pool_spec.data_format != DataFormat::NCHW)
|
||||
|| (deconv_node.kernel_format != KernelFormat::OIHW)
|
||||
{
|
||||
return Err(GraphError::MisformedParams(
|
||||
"data or kernel in wrong format".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
let pool_spec = &deconv_node.pool_spec;
|
||||
|
||||
let stride = extract_strides(pool_spec)?;
|
||||
@@ -1296,6 +1384,8 @@ pub fn new_op_from_onnx(
|
||||
output_padding: deconv_node.adjustments.to_vec(),
|
||||
stride,
|
||||
group: deconv_node.group,
|
||||
data_format: deconv_node.pool_spec.data_format.into(),
|
||||
kernel_format: deconv_node.kernel_format.into(),
|
||||
})
|
||||
}
|
||||
"Downsample" => {
|
||||
@@ -1308,7 +1398,7 @@ pub fn new_op_from_onnx(
|
||||
|
||||
SupportedOp::Linear(PolyOp::Downsample {
|
||||
axis: downsample_node.axis,
|
||||
stride: downsample_node.stride as usize,
|
||||
stride: downsample_node.stride,
|
||||
modulo: downsample_node.modulo,
|
||||
})
|
||||
}
|
||||
@@ -1323,7 +1413,7 @@ pub fn new_op_from_onnx(
|
||||
if !resize_node.contains("interpolator: Nearest")
|
||||
&& !resize_node.contains("nearest: Floor")
|
||||
{
|
||||
unimplemented!("Only nearest neighbor interpolation is supported")
|
||||
return Err(GraphError::InvalidInterpolation);
|
||||
}
|
||||
// check if optional scale factor is present
|
||||
if inputs.len() != 2 && inputs.len() != 3 {
|
||||
@@ -1379,13 +1469,6 @@ pub fn new_op_from_onnx(
|
||||
|
||||
let pool_spec: &PoolSpec = &sumpool_node.pool_spec;
|
||||
|
||||
// only support pytorch type formatting for now
|
||||
if pool_spec.data_format != DataFormat::NCHW {
|
||||
return Err(GraphError::MissingParams(
|
||||
"data in wrong format".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
let stride = extract_strides(pool_spec)?;
|
||||
let padding = extract_padding(pool_spec, &input_dims[0])?;
|
||||
|
||||
@@ -1394,6 +1477,7 @@ pub fn new_op_from_onnx(
|
||||
stride: stride.to_vec(),
|
||||
kernel_shape: pool_spec.kernel_shape.to_vec(),
|
||||
normalized: sumpool_node.normalize,
|
||||
data_format: pool_spec.data_format.into(),
|
||||
})
|
||||
}
|
||||
"Pad" => {
|
||||
@@ -1427,6 +1511,10 @@ pub fn new_op_from_onnx(
|
||||
SupportedOp::Linear(PolyOp::Reshape(output_shape))
|
||||
}
|
||||
"Flatten" => {
|
||||
if inputs.len() != 1 || inputs[0].out_dims().is_empty() {
|
||||
return Err(GraphError::InvalidDims(idx, "flatten".to_string()));
|
||||
};
|
||||
|
||||
let new_dims: Vec<usize> = vec![inputs[0].out_dims()[0].iter().product::<usize>()];
|
||||
SupportedOp::Linear(PolyOp::Flatten(new_dims))
|
||||
}
|
||||
@@ -1500,12 +1588,10 @@ pub fn homogenize_input_scales(
|
||||
input_scales: Vec<crate::Scale>,
|
||||
inputs_to_scale: Vec<usize>,
|
||||
) -> Result<Box<dyn Op<Fp>>, GraphError> {
|
||||
let relevant_input_scales = input_scales
|
||||
.clone()
|
||||
.into_iter()
|
||||
.enumerate()
|
||||
.filter(|(idx, _)| inputs_to_scale.contains(idx))
|
||||
.map(|(_, scale)| scale)
|
||||
let relevant_input_scales = inputs_to_scale
|
||||
.iter()
|
||||
.filter(|idx| input_scales.len() > **idx)
|
||||
.map(|&idx| input_scales[idx])
|
||||
.collect_vec();
|
||||
|
||||
if inputs_to_scale.is_empty() {
|
||||
@@ -1546,10 +1632,30 @@ pub fn homogenize_input_scales(
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
/// tests for the utility module
|
||||
pub mod tests {
|
||||
|
||||
use super::*;
|
||||
|
||||
// quantization tests
|
||||
#[test]
|
||||
fn test_quantize_tensor() {
|
||||
let tensor: Tensor<f32> = (0..10).map(|x| x as f32).into();
|
||||
let reference: Tensor<Fp> = (0..10).map(|x| x.into()).into();
|
||||
let scale = 0;
|
||||
let visibility = &Visibility::Public;
|
||||
let quantized: Tensor<Fp> = quantize_tensor(tensor, scale, visibility).unwrap();
|
||||
assert_eq!(quantized.len(), 10);
|
||||
assert_eq!(quantized, reference);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_quantize_edge_cases() {
|
||||
assert_eq!(quantize_float(&f64::NAN, 0.0, 0).unwrap(), 0);
|
||||
assert!(quantize_float(&f64::INFINITY, 0.0, 0).is_err());
|
||||
assert!(quantize_float(&f64::NEG_INFINITY, 0.0, 0).is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_flatten_valtensors() {
|
||||
let tensor1: Tensor<Fp> = (0..10).map(|x| x.into()).into();
|
||||
|
||||
@@ -11,35 +11,34 @@ use log::debug;
|
||||
use pyo3::{
|
||||
exceptions::PyValueError, FromPyObject, IntoPy, PyObject, PyResult, Python, ToPyObject,
|
||||
};
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
|
||||
use tosubcommand::ToFlags;
|
||||
|
||||
use self::errors::GraphError;
|
||||
|
||||
use super::*;
|
||||
|
||||
/// Label enum to track whether model input, model parameters, and model output are public, private, or hashed
|
||||
/// Defines the visibility level of values within the zero-knowledge circuit
|
||||
/// Controls how values are handled during proof generation and verification
|
||||
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord, Default)]
|
||||
pub enum Visibility {
|
||||
/// Mark an item as private to the prover (not in the proof submitted for verification)
|
||||
/// Value is private to the prover and not included in proof
|
||||
#[default]
|
||||
Private,
|
||||
/// Mark an item as public (sent in the proof submitted for verification)
|
||||
/// Value is public and included in proof for verification
|
||||
Public,
|
||||
/// Mark an item as publicly committed to (hash sent in the proof submitted for verification)
|
||||
/// Value is hashed and the hash is included in proof
|
||||
Hashed {
|
||||
/// Whether the hash is used as an instance (sent in the proof submitted for verification)
|
||||
/// if false the hash is used as an advice (not in the proof submitted for verification) and is then sent to the computational graph
|
||||
/// if true the hash is used as an instance (sent in the proof submitted for verification) the *inputs* to the hashing function are then sent to the computational graph
|
||||
/// Controls how the hash is handled in proof
|
||||
/// true - hash is included directly in proof (public)
|
||||
/// false - hash is used as advice and passed to computational graph
|
||||
hash_is_public: bool,
|
||||
///
|
||||
/// Specifies which outputs this hash affects
|
||||
outlets: Vec<usize>,
|
||||
},
|
||||
/// Mark an item as publicly committed to (KZG commitment sent in the proof submitted for verification)
|
||||
/// Value is committed using KZG commitment scheme
|
||||
KZGCommit,
|
||||
/// assigned as a constant in the circuit
|
||||
/// Value is assigned as a constant in the circuit
|
||||
Fixed,
|
||||
}
|
||||
|
||||
@@ -66,15 +65,17 @@ impl Display for Visibility {
|
||||
|
||||
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
|
||||
impl ToFlags for Visibility {
|
||||
/// Converts visibility to command line flags
|
||||
fn to_flags(&self) -> Vec<String> {
|
||||
vec![format!("{}", self)]
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> From<&'a str> for Visibility {
|
||||
/// Converts string representation to Visibility
|
||||
fn from(s: &'a str) -> Self {
|
||||
if s.contains("hashed/private") {
|
||||
// split on last occurrence of '/'
|
||||
// Split on last occurrence of '/'
|
||||
let (_, outlets) = s.split_at(s.rfind('/').unwrap());
|
||||
let outlets = outlets
|
||||
.trim_start_matches('/')
|
||||
@@ -106,8 +107,8 @@ impl<'a> From<&'a str> for Visibility {
|
||||
}
|
||||
|
||||
#[cfg(feature = "python-bindings")]
|
||||
/// Converts Visibility into a PyObject (Required for Visibility to be compatible with Python)
|
||||
impl IntoPy<PyObject> for Visibility {
|
||||
/// Converts Visibility to Python object
|
||||
fn into_py(self, py: Python) -> PyObject {
|
||||
match self {
|
||||
Visibility::Private => "private".to_object(py),
|
||||
@@ -134,14 +135,13 @@ impl IntoPy<PyObject> for Visibility {
|
||||
}
|
||||
|
||||
#[cfg(feature = "python-bindings")]
|
||||
/// Obtains Visibility from PyObject (Required for Visibility to be compatible with Python)
|
||||
impl<'source> FromPyObject<'source> for Visibility {
|
||||
/// Extracts Visibility from Python object
|
||||
fn extract_bound(ob: &pyo3::Bound<'source, pyo3::PyAny>) -> PyResult<Self> {
|
||||
let strval = String::extract_bound(ob)?;
|
||||
let strval = strval.as_str();
|
||||
|
||||
if strval.contains("hashed/private") {
|
||||
// split on last occurence of '/'
|
||||
let (_, outlets) = strval.split_at(strval.rfind('/').unwrap());
|
||||
let outlets = outlets
|
||||
.trim_start_matches('/')
|
||||
@@ -174,29 +174,32 @@ impl<'source> FromPyObject<'source> for Visibility {
|
||||
}
|
||||
|
||||
impl Visibility {
|
||||
#[allow(missing_docs)]
|
||||
/// Returns true if visibility is Fixed
|
||||
pub fn is_fixed(&self) -> bool {
|
||||
matches!(&self, Visibility::Fixed)
|
||||
}
|
||||
#[allow(missing_docs)]
|
||||
|
||||
/// Returns true if visibility is Private or hashed private
|
||||
pub fn is_private(&self) -> bool {
|
||||
matches!(&self, Visibility::Private) || self.is_hashed_private()
|
||||
}
|
||||
|
||||
#[allow(missing_docs)]
|
||||
/// Returns true if visibility is Public
|
||||
pub fn is_public(&self) -> bool {
|
||||
matches!(&self, Visibility::Public)
|
||||
}
|
||||
#[allow(missing_docs)]
|
||||
|
||||
/// Returns true if visibility involves hashing
|
||||
pub fn is_hashed(&self) -> bool {
|
||||
matches!(&self, Visibility::Hashed { .. })
|
||||
}
|
||||
#[allow(missing_docs)]
|
||||
|
||||
/// Returns true if visibility uses KZG commitment
|
||||
pub fn is_polycommit(&self) -> bool {
|
||||
matches!(&self, Visibility::KZGCommit)
|
||||
}
|
||||
|
||||
#[allow(missing_docs)]
|
||||
/// Returns true if visibility is hashed with public hash
|
||||
pub fn is_hashed_public(&self) -> bool {
|
||||
if let Visibility::Hashed {
|
||||
hash_is_public: true,
|
||||
@@ -207,7 +210,8 @@ impl Visibility {
|
||||
}
|
||||
false
|
||||
}
|
||||
#[allow(missing_docs)]
|
||||
|
||||
/// Returns true if visibility is hashed with private hash
|
||||
pub fn is_hashed_private(&self) -> bool {
|
||||
if let Visibility::Hashed {
|
||||
hash_is_public: false,
|
||||
@@ -219,11 +223,12 @@ impl Visibility {
|
||||
false
|
||||
}
|
||||
|
||||
#[allow(missing_docs)]
|
||||
/// Returns true if visibility requires additional processing
|
||||
pub fn requires_processing(&self) -> bool {
|
||||
matches!(&self, Visibility::Hashed { .. }) | matches!(&self, Visibility::KZGCommit)
|
||||
}
|
||||
#[allow(missing_docs)]
|
||||
|
||||
/// Returns vector of output indices that this visibility setting affects
|
||||
pub fn overwrites_inputs(&self) -> Vec<usize> {
|
||||
if let Visibility::Hashed { outlets, .. } = self {
|
||||
return outlets.clone();
|
||||
@@ -232,14 +237,14 @@ impl Visibility {
|
||||
}
|
||||
}
|
||||
|
||||
/// Represents the scale of the model input, model parameters.
|
||||
/// Manages scaling factors for different parts of the model
|
||||
#[derive(Clone, Debug, Default, Deserialize, Serialize, PartialEq, PartialOrd)]
|
||||
pub struct VarScales {
|
||||
///
|
||||
/// Scale factor for input values
|
||||
pub input: crate::Scale,
|
||||
///
|
||||
/// Scale factor for parameter values
|
||||
pub params: crate::Scale,
|
||||
///
|
||||
/// Multiplier for scale rebasing
|
||||
pub rebase_multiplier: u32,
|
||||
}
|
||||
|
||||
@@ -250,17 +255,17 @@ impl std::fmt::Display for VarScales {
|
||||
}
|
||||
|
||||
impl VarScales {
|
||||
///
|
||||
/// Returns maximum scale value
|
||||
pub fn get_max(&self) -> crate::Scale {
|
||||
std::cmp::max(self.input, self.params)
|
||||
}
|
||||
|
||||
///
|
||||
/// Returns minimum scale value
|
||||
pub fn get_min(&self) -> crate::Scale {
|
||||
std::cmp::min(self.input, self.params)
|
||||
}
|
||||
|
||||
/// Place in [VarScales] struct.
|
||||
/// Creates VarScales from runtime arguments
|
||||
pub fn from_args(args: &RunArgs) -> Self {
|
||||
Self {
|
||||
input: args.input_scale,
|
||||
@@ -270,16 +275,17 @@ impl VarScales {
|
||||
}
|
||||
}
|
||||
|
||||
/// Represents whether the model input, model parameters, and model output are Public or Private to the prover.
|
||||
/// Controls visibility settings for different parts of the model
|
||||
#[derive(Clone, Debug, Deserialize, Serialize, PartialEq, PartialOrd)]
|
||||
pub struct VarVisibility {
|
||||
/// Input to the model or computational graph
|
||||
/// Visibility of model inputs
|
||||
pub input: Visibility,
|
||||
/// Parameters, such as weights and biases, in the model
|
||||
/// Visibility of model parameters (weights, biases)
|
||||
pub params: Visibility,
|
||||
/// Output of the model or computational graph
|
||||
/// Visibility of model outputs
|
||||
pub output: Visibility,
|
||||
}
|
||||
|
||||
impl std::fmt::Display for VarVisibility {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
|
||||
write!(
|
||||
@@ -301,8 +307,7 @@ impl Default for VarVisibility {
|
||||
}
|
||||
|
||||
impl VarVisibility {
|
||||
/// Read from cli args whether the model input, model parameters, and model output are Public or Private to the prover.
|
||||
/// Place in [VarVisibility] struct.
|
||||
/// Creates visibility settings from runtime arguments
|
||||
pub fn from_args(args: &RunArgs) -> Result<Self, GraphError> {
|
||||
let input_vis = &args.input_visibility;
|
||||
let params_vis = &args.param_visibility;
|
||||
@@ -313,17 +318,17 @@ impl VarVisibility {
|
||||
}
|
||||
|
||||
if !output_vis.is_public()
|
||||
& !params_vis.is_public()
|
||||
& !input_vis.is_public()
|
||||
& !output_vis.is_fixed()
|
||||
& !params_vis.is_fixed()
|
||||
& !input_vis.is_fixed()
|
||||
& !output_vis.is_hashed()
|
||||
& !params_vis.is_hashed()
|
||||
& !input_vis.is_hashed()
|
||||
& !output_vis.is_polycommit()
|
||||
& !params_vis.is_polycommit()
|
||||
& !input_vis.is_polycommit()
|
||||
&& !params_vis.is_public()
|
||||
&& !input_vis.is_public()
|
||||
&& !output_vis.is_fixed()
|
||||
&& !params_vis.is_fixed()
|
||||
&& !input_vis.is_fixed()
|
||||
&& !output_vis.is_hashed()
|
||||
&& !params_vis.is_hashed()
|
||||
&& !input_vis.is_hashed()
|
||||
&& !output_vis.is_polycommit()
|
||||
&& !params_vis.is_polycommit()
|
||||
&& !input_vis.is_polycommit()
|
||||
{
|
||||
return Err(GraphError::Visibility);
|
||||
}
|
||||
@@ -335,17 +340,17 @@ impl VarVisibility {
|
||||
}
|
||||
}
|
||||
|
||||
/// A wrapper for holding all columns that will be assigned to by a model.
|
||||
/// Container for circuit columns used by a model
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct ModelVars<F: PrimeField + TensorType + PartialOrd> {
|
||||
#[allow(missing_docs)]
|
||||
/// Advice columns for circuit assignments
|
||||
pub advices: Vec<VarTensor>,
|
||||
#[allow(missing_docs)]
|
||||
/// Optional instance column for public inputs
|
||||
pub instance: Option<ValTensor<F>>,
|
||||
}
|
||||
|
||||
impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> ModelVars<F> {
|
||||
/// Get instance col
|
||||
/// Gets reference to instance column if it exists
|
||||
pub fn get_instance_col(&self) -> Option<&Column<Instance>> {
|
||||
if let Some(instance) = &self.instance {
|
||||
match instance {
|
||||
@@ -357,14 +362,14 @@ impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> ModelVars<F> {
|
||||
}
|
||||
}
|
||||
|
||||
/// Set the initial instance offset
|
||||
/// Sets initial offset for instance values
|
||||
pub fn set_initial_instance_offset(&mut self, offset: usize) {
|
||||
if let Some(instance) = &mut self.instance {
|
||||
instance.set_initial_instance_offset(offset);
|
||||
}
|
||||
}
|
||||
|
||||
/// Get the total instance len
|
||||
/// Gets total length of instance data
|
||||
pub fn get_instance_len(&self) -> usize {
|
||||
if let Some(instance) = &self.instance {
|
||||
instance.get_total_instance_len()
|
||||
@@ -373,21 +378,21 @@ impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> ModelVars<F> {
|
||||
}
|
||||
}
|
||||
|
||||
/// Increment the instance offset
|
||||
/// Increments instance index
|
||||
pub fn increment_instance_idx(&mut self) {
|
||||
if let Some(instance) = &mut self.instance {
|
||||
instance.increment_idx();
|
||||
}
|
||||
}
|
||||
|
||||
/// Reset the instance offset
|
||||
/// Sets instance index to specific value
|
||||
pub fn set_instance_idx(&mut self, val: usize) {
|
||||
if let Some(instance) = &mut self.instance {
|
||||
instance.set_idx(val);
|
||||
}
|
||||
}
|
||||
|
||||
/// Get the instance offset
|
||||
/// Gets current instance index
|
||||
pub fn get_instance_idx(&self) -> usize {
|
||||
if let Some(instance) = &self.instance {
|
||||
instance.get_idx()
|
||||
@@ -396,7 +401,7 @@ impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> ModelVars<F> {
|
||||
}
|
||||
}
|
||||
|
||||
///
|
||||
/// Initializes instance column with specified dimensions and scale
|
||||
pub fn instantiate_instance(
|
||||
&mut self,
|
||||
cs: &mut ConstraintSystem<F>,
|
||||
@@ -417,7 +422,7 @@ impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> ModelVars<F> {
|
||||
};
|
||||
}
|
||||
|
||||
/// Allocate all columns that will be assigned to by a model.
|
||||
/// Creates new ModelVars with allocated columns based on settings
|
||||
pub fn new(cs: &mut ConstraintSystem<F>, params: &GraphSettings) -> Self {
|
||||
debug!("number of blinding factors: {}", cs.blinding_factors());
|
||||
|
||||
@@ -435,7 +440,7 @@ impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> ModelVars<F> {
|
||||
.collect_vec();
|
||||
|
||||
if requires_dynamic_lookup || requires_shuffle {
|
||||
let num_cols = if requires_dynamic_lookup { 3 } else { 2 };
|
||||
let num_cols = 3;
|
||||
for _ in 0..num_cols {
|
||||
let dynamic_lookup =
|
||||
VarTensor::new_advice(cs, logrows, 1, dynamic_lookup_and_shuffle_size);
|
||||
|
||||
298
src/lib.rs
298
src/lib.rs
@@ -28,6 +28,9 @@
|
||||
|
||||
//! A library for turning computational graphs, such as neural networks, into ZK-circuits.
|
||||
//!
|
||||
use log::warn;
|
||||
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
|
||||
use mimalloc as _;
|
||||
|
||||
/// Error type
|
||||
// #[cfg_attr(not(feature = "ezkl"), derive(uniffi::Error))]
|
||||
@@ -94,12 +97,11 @@ impl From<String> for EZKLError {
|
||||
|
||||
use std::str::FromStr;
|
||||
|
||||
use circuit::{table::Range, CheckMode, Tolerance};
|
||||
use circuit::{table::Range, CheckMode};
|
||||
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
|
||||
use clap::Args;
|
||||
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
|
||||
use fieldutils::IntegerRep;
|
||||
use graph::Visibility;
|
||||
use graph::{Visibility, MAX_PUBLIC_SRS};
|
||||
use halo2_proofs::poly::{
|
||||
ipa::commitment::IPACommitmentScheme, kzg::commitment::KZGCommitmentScheme,
|
||||
};
|
||||
@@ -165,7 +167,6 @@ pub mod srs_sha;
|
||||
pub mod tensor;
|
||||
#[cfg(feature = "ios-bindings")]
|
||||
uniffi::setup_scaffolding!();
|
||||
|
||||
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
|
||||
use lazy_static::lazy_static;
|
||||
|
||||
@@ -180,11 +181,9 @@ lazy_static! {
|
||||
.unwrap_or("8000".to_string())
|
||||
.parse()
|
||||
.unwrap();
|
||||
|
||||
/// The serialization format for the keys
|
||||
pub static ref EZKL_KEY_FORMAT: String = std::env::var("EZKL_KEY_FORMAT")
|
||||
.unwrap_or("raw-bytes".to_string());
|
||||
|
||||
}
|
||||
|
||||
#[cfg(any(not(feature = "ezkl"), target_arch = "wasm32"))]
|
||||
@@ -266,80 +265,101 @@ impl From<String> for Commitments {
|
||||
}
|
||||
|
||||
/// Parameters specific to a proving run
|
||||
///
|
||||
/// RunArgs contains all configuration parameters needed to control the proving process,
|
||||
/// including scaling factors, visibility settings, and circuit parameters.
|
||||
#[derive(Debug, Deserialize, Serialize, Clone, PartialEq, PartialOrd)]
|
||||
#[cfg_attr(
|
||||
all(feature = "ezkl", not(target_arch = "wasm32")),
|
||||
derive(Args, ToFlags)
|
||||
)]
|
||||
pub struct RunArgs {
|
||||
/// The tolerance for error on model outputs
|
||||
#[cfg_attr(all(feature = "ezkl", not(target_arch = "wasm32")), arg(short = 'T', long, default_value = "0", value_hint = clap::ValueHint::Other))]
|
||||
pub tolerance: Tolerance,
|
||||
/// The denominator in the fixed point representation used when quantizing inputs
|
||||
/// Fixed point scaling factor for quantizing inputs
|
||||
/// Higher values provide more precision but increase circuit complexity
|
||||
#[cfg_attr(all(feature = "ezkl", not(target_arch = "wasm32")), arg(short = 'S', long, default_value = "7", value_hint = clap::ValueHint::Other))]
|
||||
pub input_scale: Scale,
|
||||
/// The denominator in the fixed point representation used when quantizing parameters
|
||||
/// Fixed point scaling factor for quantizing parameters
|
||||
/// Higher values provide more precision but increase circuit complexity
|
||||
#[cfg_attr(all(feature = "ezkl", not(target_arch = "wasm32")), arg(long, default_value = "7", value_hint = clap::ValueHint::Other))]
|
||||
pub param_scale: Scale,
|
||||
/// if the scale is ever > scale_rebase_multiplier * input_scale then the scale is rebased to input_scale (this a more advanced parameter, use with caution)
|
||||
#[cfg_attr(all(feature = "ezkl", not(target_arch = "wasm32")), arg(long, default_value = "1", value_hint = clap::ValueHint::Other))]
|
||||
/// Scale rebase threshold multiplier
|
||||
/// When scale exceeds input_scale * multiplier, it is rebased to input_scale
|
||||
/// Advanced parameter that should be used with caution
|
||||
#[cfg_attr(all(feature = "ezkl", not(target_arch = "wasm32")), arg(long, default_value = "1", value_hint = clap::ValueHint::Other))]
|
||||
pub scale_rebase_multiplier: u32,
|
||||
/// The min and max elements in the lookup table input column
|
||||
/// Range for lookup table input column values
|
||||
/// Specified as (min, max) pair
|
||||
#[cfg_attr(all(feature = "ezkl", not(target_arch = "wasm32")), arg(short = 'B', long, value_parser = parse_key_val::<IntegerRep, IntegerRep>, default_value = "-32768->32768"))]
|
||||
pub lookup_range: Range,
|
||||
/// The log_2 number of rows
|
||||
/// Log2 of the number of rows in the circuit
|
||||
/// Controls circuit size and proving time
|
||||
#[cfg_attr(all(feature = "ezkl", not(target_arch = "wasm32")), arg(short = 'K', long, default_value = "17", value_hint = clap::ValueHint::Other))]
|
||||
pub logrows: u32,
|
||||
/// The log_2 number of rows
|
||||
/// Number of inner columns per block
|
||||
/// Affects circuit layout and efficiency
|
||||
#[cfg_attr(all(feature = "ezkl", not(target_arch = "wasm32")), arg(short = 'N', long, default_value = "2", value_hint = clap::ValueHint::Other))]
|
||||
pub num_inner_cols: usize,
|
||||
/// Hand-written parser for graph variables, eg. batch_size=1
|
||||
/// Graph variables for parameterizing the computation
|
||||
/// Format: "name->value", e.g. "batch_size->1"
|
||||
#[cfg_attr(all(feature = "ezkl", not(target_arch = "wasm32")), arg(short = 'V', long, value_parser = parse_key_val::<String, usize>, default_value = "batch_size->1", value_delimiter = ',', value_hint = clap::ValueHint::Other))]
|
||||
pub variables: Vec<(String, usize)>,
|
||||
/// Flags whether inputs are public, private, fixed, hashed, polycommit
|
||||
/// Visibility setting for input values
|
||||
/// Controls whether inputs are public or private in the circuit
|
||||
#[cfg_attr(all(feature = "ezkl", not(target_arch = "wasm32")), arg(long, default_value = "private", value_hint = clap::ValueHint::Other))]
|
||||
pub input_visibility: Visibility,
|
||||
/// Flags whether outputs are public, private, fixed, hashed, polycommit
|
||||
/// Visibility setting for output values
|
||||
/// Controls whether outputs are public or private in the circuit
|
||||
#[cfg_attr(all(feature = "ezkl", not(target_arch = "wasm32")), arg(long, default_value = "public", value_hint = clap::ValueHint::Other))]
|
||||
pub output_visibility: Visibility,
|
||||
/// Flags whether params are fixed, private, hashed, polycommit
|
||||
/// Visibility setting for parameters
|
||||
/// Controls how parameters are handled in the circuit
|
||||
#[cfg_attr(all(feature = "ezkl", not(target_arch = "wasm32")), arg(long, default_value = "private", value_hint = clap::ValueHint::Other))]
|
||||
pub param_visibility: Visibility,
|
||||
#[cfg_attr(
|
||||
all(feature = "ezkl", not(target_arch = "wasm32")),
|
||||
arg(long, default_value = "false")
|
||||
)]
|
||||
/// Should constants with 0.0 fraction be rebased to scale 0
|
||||
/// Whether to rebase constants with zero fractional part to scale 0
|
||||
/// Can improve efficiency for integer constants
|
||||
#[cfg_attr(
|
||||
all(feature = "ezkl", not(target_arch = "wasm32")),
|
||||
arg(long, default_value = "false")
|
||||
)]
|
||||
pub rebase_frac_zero_constants: bool,
|
||||
/// check mode (safe, unsafe, etc)
|
||||
/// Circuit checking mode
|
||||
/// Controls level of constraint verification
|
||||
#[cfg_attr(all(feature = "ezkl", not(target_arch = "wasm32")), arg(long, default_value = "unsafe", value_hint = clap::ValueHint::Other))]
|
||||
pub check_mode: CheckMode,
|
||||
/// commitment scheme
|
||||
/// Commitment scheme for circuit proving
|
||||
/// Affects proof size and verification time
|
||||
#[cfg_attr(all(feature = "ezkl", not(target_arch = "wasm32")), arg(long, default_value = "kzg", value_hint = clap::ValueHint::Other))]
|
||||
pub commitment: Option<Commitments>,
|
||||
/// the base used for decompositions
|
||||
/// Base for number decomposition
|
||||
/// Must be a power of 2
|
||||
#[cfg_attr(all(feature = "ezkl", not(target_arch = "wasm32")), arg(long, default_value = "16384", value_hint = clap::ValueHint::Other))]
|
||||
pub decomp_base: usize,
|
||||
/// Number of decomposition legs
|
||||
/// Controls decomposition granularity
|
||||
#[cfg_attr(all(feature = "ezkl", not(target_arch = "wasm32")), arg(long, default_value = "2", value_hint = clap::ValueHint::Other))]
|
||||
/// the number of legs used for decompositions
|
||||
pub decomp_legs: usize,
|
||||
/// Whether to use bounded lookup for logarithm computation
|
||||
#[cfg_attr(
|
||||
all(feature = "ezkl", not(target_arch = "wasm32")),
|
||||
arg(long, default_value = "false")
|
||||
)]
|
||||
/// use unbounded lookup for the log
|
||||
pub bounded_log_lookup: bool,
|
||||
/// Range check inputs and outputs (turn off if the inputs are felts)
|
||||
#[cfg_attr(
|
||||
all(feature = "ezkl", not(target_arch = "wasm32")),
|
||||
arg(long, default_value = "false")
|
||||
)]
|
||||
pub ignore_range_check_inputs_outputs: bool,
|
||||
}
|
||||
|
||||
impl Default for RunArgs {
|
||||
/// Creates a new RunArgs instance with default values
|
||||
///
|
||||
/// Default configuration is optimized for common use cases
|
||||
/// while maintaining reasonable proving time and circuit size
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
bounded_log_lookup: false,
|
||||
tolerance: Tolerance::default(),
|
||||
input_scale: 7,
|
||||
param_scale: 7,
|
||||
scale_rebase_multiplier: 1,
|
||||
@@ -355,54 +375,139 @@ impl Default for RunArgs {
|
||||
commitment: None,
|
||||
decomp_base: 16384,
|
||||
decomp_legs: 2,
|
||||
ignore_range_check_inputs_outputs: false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl RunArgs {
|
||||
/// Validates the RunArgs configuration
|
||||
///
|
||||
/// Performs comprehensive validation of all parameters to ensure they are within
|
||||
/// acceptable ranges and follow required constraints. Returns accumulated errors
|
||||
/// if any validations fail.
|
||||
///
|
||||
/// # Returns
|
||||
/// - Ok(()) if all validations pass
|
||||
/// - Err(String) with detailed error message if any validation fails
|
||||
pub fn validate(&self) -> Result<(), String> {
|
||||
let mut errors = Vec::new();
|
||||
|
||||
// check if the largest represented integer in the decomposed form overflows IntegerRep
|
||||
// try it with the largest possible value
|
||||
let max_decomp = (self.decomp_base as IntegerRep).checked_pow(self.decomp_legs as u32);
|
||||
if max_decomp.is_none() {
|
||||
errors.push(format!(
|
||||
"decomp_base^decomp_legs overflows IntegerRep: {}^{}",
|
||||
self.decomp_base, self.decomp_legs
|
||||
));
|
||||
}
|
||||
|
||||
// Visibility validations
|
||||
if self.param_visibility == Visibility::Public {
|
||||
return Err(
|
||||
"params cannot be public instances, you are probably trying to use `fixed` or `kzgcommit`"
|
||||
.into(),
|
||||
errors.push(
|
||||
"Parameters cannot be public instances. Use 'fixed' or 'kzgcommit' instead"
|
||||
.to_string(),
|
||||
);
|
||||
}
|
||||
|
||||
// Scale validations
|
||||
if self.scale_rebase_multiplier < 1 {
|
||||
return Err("scale_rebase_multiplier must be >= 1".into());
|
||||
errors.push("scale_rebase_multiplier must be >= 1".to_string());
|
||||
}
|
||||
|
||||
// if any of the scales are too small
|
||||
if self.input_scale < 8 || self.param_scale < 8 {
|
||||
warn!("low scale values (<8) may impact precision");
|
||||
}
|
||||
|
||||
// Lookup range validations
|
||||
if self.lookup_range.0 > self.lookup_range.1 {
|
||||
return Err("lookup_range min is greater than max".into());
|
||||
errors.push(format!(
|
||||
"Invalid lookup range: min ({}) is greater than max ({})",
|
||||
self.lookup_range.0, self.lookup_range.1
|
||||
));
|
||||
}
|
||||
|
||||
// Size validations
|
||||
if self.logrows < 1 {
|
||||
return Err("logrows must be >= 1".into());
|
||||
errors.push("logrows must be >= 1".to_string());
|
||||
}
|
||||
|
||||
if self.num_inner_cols < 1 {
|
||||
return Err("num_inner_cols must be >= 1".into());
|
||||
errors.push("num_inner_cols must be >= 1".to_string());
|
||||
}
|
||||
if self.tolerance.val > 0.0 && self.output_visibility != Visibility::Public {
|
||||
return Err("tolerance > 0.0 requires output_visibility to be public".into());
|
||||
|
||||
let batch_size = self.variables.iter().find(|(name, _)| name == "batch_size");
|
||||
if let Some(batch_size) = batch_size {
|
||||
if batch_size.1 == 0 {
|
||||
errors.push("'batch_size' cannot be 0".to_string());
|
||||
}
|
||||
}
|
||||
|
||||
// Decomposition validations
|
||||
if self.decomp_base == 0 {
|
||||
errors.push("decomp_base cannot be 0".to_string());
|
||||
}
|
||||
|
||||
if self.decomp_legs == 0 {
|
||||
errors.push("decomp_legs cannot be 0".to_string());
|
||||
}
|
||||
|
||||
// Performance validations
|
||||
if self.logrows > MAX_PUBLIC_SRS {
|
||||
warn!("logrows exceeds maximum public SRS size");
|
||||
}
|
||||
|
||||
// Performance warnings
|
||||
if self.input_scale > 20 || self.param_scale > 20 {
|
||||
warn!("High scale values (>20) may impact performance");
|
||||
}
|
||||
|
||||
if errors.is_empty() {
|
||||
Ok(())
|
||||
} else {
|
||||
Err(errors.join("\n"))
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Export the ezkl configuration as json
|
||||
/// Exports the configuration as JSON
|
||||
///
|
||||
/// Serializes the RunArgs instance to a JSON string
|
||||
///
|
||||
/// # Returns
|
||||
/// * `Ok(String)` containing JSON representation
|
||||
/// * `Err` if serialization fails
|
||||
pub fn as_json(&self) -> Result<String, Box<dyn std::error::Error>> {
|
||||
let serialized = match serde_json::to_string(&self) {
|
||||
Ok(s) => s,
|
||||
Err(e) => {
|
||||
return Err(Box::new(e));
|
||||
}
|
||||
};
|
||||
Ok(serialized)
|
||||
let res = serde_json::to_string(&self)?;
|
||||
Ok(res)
|
||||
}
|
||||
/// Parse an ezkl configuration from a json
|
||||
|
||||
/// Parses configuration from JSON
|
||||
///
|
||||
/// Deserializes a RunArgs instance from a JSON string
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `arg_json` - JSON string containing configuration
|
||||
///
|
||||
/// # Returns
|
||||
/// * `Ok(RunArgs)` if parsing succeeds
|
||||
/// * `Err` if parsing fails
|
||||
pub fn from_json(arg_json: &str) -> Result<Self, serde_json::Error> {
|
||||
serde_json::from_str(arg_json)
|
||||
}
|
||||
}
|
||||
|
||||
/// Parse a single key-value pair
|
||||
// Additional helper functions for the module
|
||||
|
||||
/// Parses a key-value pair from a string in the format "key->value"
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `s` - Input string in the format "key->value"
|
||||
///
|
||||
/// # Returns
|
||||
/// * `Ok((T, U))` - Parsed key and value
|
||||
/// * `Err` - If parsing fails
|
||||
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
|
||||
fn parse_key_val<T, U>(
|
||||
s: &str,
|
||||
@@ -415,14 +520,15 @@ where
|
||||
{
|
||||
let pos = s
|
||||
.find("->")
|
||||
.ok_or_else(|| format!("invalid x->y: no `->` found in `{s}`"))?;
|
||||
let a = s[..pos].parse()?;
|
||||
let b = s[pos + 2..].parse()?;
|
||||
Ok((a, b))
|
||||
.ok_or_else(|| format!("invalid KEY->VALUE: no `->` found in `{s}`"))?;
|
||||
Ok((s[..pos].parse()?, s[pos + 2..].parse()?))
|
||||
}
|
||||
|
||||
/// Check if the version string matches the artifact version
|
||||
/// If the version string does not match the artifact version, log a warning
|
||||
/// Verifies that a version string matches the expected artifact version
|
||||
/// Logs warnings for version mismatches or unversioned artifacts
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `artifact_version` - Version string from the artifact
|
||||
pub fn check_version_string_matches(artifact_version: &str) {
|
||||
if artifact_version == "0.0.0"
|
||||
|| artifact_version == "source - no compatibility guaranteed"
|
||||
@@ -447,3 +553,81 @@ pub fn check_version_string_matches(artifact_version: &str) {
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
#[allow(clippy::field_reassign_with_default)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_valid_default_args() {
|
||||
let args = RunArgs::default();
|
||||
assert!(args.validate().is_ok());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_invalid_param_visibility() {
|
||||
let mut args = RunArgs::default();
|
||||
args.param_visibility = Visibility::Public;
|
||||
let err = args.validate().unwrap_err();
|
||||
assert!(err.contains("Parameters cannot be public instances"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_invalid_scale_rebase() {
|
||||
let mut args = RunArgs::default();
|
||||
args.scale_rebase_multiplier = 0;
|
||||
let err = args.validate().unwrap_err();
|
||||
assert!(err.contains("scale_rebase_multiplier must be >= 1"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_invalid_lookup_range() {
|
||||
let mut args = RunArgs::default();
|
||||
args.lookup_range = (100, -100);
|
||||
let err = args.validate().unwrap_err();
|
||||
assert!(err.contains("Invalid lookup range"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_invalid_logrows() {
|
||||
let mut args = RunArgs::default();
|
||||
args.logrows = 0;
|
||||
let err = args.validate().unwrap_err();
|
||||
assert!(err.contains("logrows must be >= 1"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_invalid_inner_cols() {
|
||||
let mut args = RunArgs::default();
|
||||
args.num_inner_cols = 0;
|
||||
let err = args.validate().unwrap_err();
|
||||
assert!(err.contains("num_inner_cols must be >= 1"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_zero_batch_size() {
|
||||
let mut args = RunArgs::default();
|
||||
args.variables = vec![("batch_size".to_string(), 0)];
|
||||
let err = args.validate().unwrap_err();
|
||||
assert!(err.contains("'batch_size' cannot be 0"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_json_serialization() {
|
||||
let args = RunArgs::default();
|
||||
let json = args.as_json().unwrap();
|
||||
let deserialized = RunArgs::from_json(&json).unwrap();
|
||||
assert_eq!(args, deserialized);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_multiple_validation_errors() {
|
||||
let mut args = RunArgs::default();
|
||||
args.logrows = 0;
|
||||
args.lookup_range = (100, -100);
|
||||
let err = args.validate().unwrap_err();
|
||||
// Should contain multiple error messages
|
||||
assert!(err.matches("\n").count() >= 1);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -133,7 +133,6 @@ pub fn aggregate<'a>(
|
||||
.collect_vec()
|
||||
}));
|
||||
|
||||
// loader.ctx().constrain_equal(cell_0, cell_1)
|
||||
let mut transcript = PoseidonTranscript::<Rc<Halo2Loader>, _>::new(loader, snark.proof());
|
||||
let proof = PlonkSuccinctVerifier::read_proof(svk, &protocol, &instances, &mut transcript)
|
||||
.map_err(|_| plonk::Error::Synthesis)?;
|
||||
@@ -309,11 +308,11 @@ impl AggregationCircuit {
|
||||
})
|
||||
}
|
||||
|
||||
///
|
||||
/// Number of limbs used for decomposition
|
||||
pub fn num_limbs() -> usize {
|
||||
LIMBS
|
||||
}
|
||||
///
|
||||
/// Number of bits used for decomposition
|
||||
pub fn num_bits() -> usize {
|
||||
BITS
|
||||
}
|
||||
|
||||
@@ -17,16 +17,16 @@ use crate::{Commitments, EZKL_BUF_CAPACITY, EZKL_KEY_FORMAT};
|
||||
use clap::ValueEnum;
|
||||
use halo2_proofs::circuit::Value;
|
||||
use halo2_proofs::plonk::{
|
||||
create_proof, keygen_pk, keygen_vk_custom, verify_proof, Circuit, ProvingKey, VerifyingKey,
|
||||
Circuit, ProvingKey, VerifyingKey, create_proof, keygen_pk, keygen_vk_custom, verify_proof,
|
||||
};
|
||||
use halo2_proofs::poly::VerificationStrategy;
|
||||
use halo2_proofs::poly::commitment::{CommitmentScheme, Params, ParamsProver, Prover, Verifier};
|
||||
use halo2_proofs::poly::ipa::commitment::IPACommitmentScheme;
|
||||
use halo2_proofs::poly::kzg::commitment::KZGCommitmentScheme;
|
||||
use halo2_proofs::poly::VerificationStrategy;
|
||||
use halo2_proofs::transcript::{EncodedChallenge, TranscriptReadBuffer, TranscriptWriterBuffer};
|
||||
use halo2curves::CurveAffine;
|
||||
use halo2curves::ff::{FromUniformBytes, PrimeField, WithSmallOrderMulGroup};
|
||||
use halo2curves::serde::SerdeObject;
|
||||
use halo2curves::CurveAffine;
|
||||
use instant::Instant;
|
||||
use log::{debug, info, trace};
|
||||
#[cfg(not(feature = "det-prove"))]
|
||||
@@ -51,6 +51,9 @@ use pyo3::types::PyDictMethods;
|
||||
|
||||
use halo2curves::bn256::{Bn256, Fr, G1Affine};
|
||||
|
||||
/// Converts a string to a `SerdeFormat`.
|
||||
/// # Panics
|
||||
/// Panics if the provided `s` is not a valid `SerdeFormat` (i.e. not one of "processed", "raw-bytes-unchecked", or "raw-bytes").
|
||||
fn serde_format_from_str(s: &str) -> halo2_proofs::SerdeFormat {
|
||||
match s {
|
||||
"processed" => halo2_proofs::SerdeFormat::Processed,
|
||||
@@ -321,7 +324,7 @@ where
|
||||
}
|
||||
|
||||
#[cfg(feature = "python-bindings")]
|
||||
use pyo3::{types::PyDict, PyObject, Python, ToPyObject};
|
||||
use pyo3::{PyObject, Python, ToPyObject, types::PyDict};
|
||||
#[cfg(feature = "python-bindings")]
|
||||
impl<F: PrimeField + SerdeObject + Serialize, C: CurveAffine + Serialize> ToPyObject for Snark<F, C>
|
||||
where
|
||||
@@ -345,14 +348,15 @@ where
|
||||
}
|
||||
|
||||
impl<
|
||||
F: PrimeField + SerdeObject + Serialize + FromUniformBytes<64> + DeserializeOwned,
|
||||
C: CurveAffine + Serialize + DeserializeOwned,
|
||||
> Snark<F, C>
|
||||
F: PrimeField + SerdeObject + Serialize + FromUniformBytes<64> + DeserializeOwned,
|
||||
C: CurveAffine + Serialize + DeserializeOwned,
|
||||
> Snark<F, C>
|
||||
where
|
||||
C::Scalar: Serialize + DeserializeOwned,
|
||||
C::ScalarExt: Serialize + DeserializeOwned,
|
||||
{
|
||||
/// Create a new application snark from proof and instance variables ready for aggregation
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub fn new(
|
||||
protocol: Option<PlonkProtocol<C>>,
|
||||
instances: Vec<Vec<F>>,
|
||||
@@ -528,7 +532,6 @@ pub fn create_keys<Scheme: CommitmentScheme, C: Circuit<Scheme::Scalar>>(
|
||||
disable_selector_compression: bool,
|
||||
) -> Result<ProvingKey<Scheme::Curve>, halo2_proofs::plonk::Error>
|
||||
where
|
||||
C: Circuit<Scheme::Scalar>,
|
||||
<Scheme as CommitmentScheme>::Scalar: FromUniformBytes<64>,
|
||||
{
|
||||
// Real proof
|
||||
@@ -794,7 +797,6 @@ pub fn load_vk<Scheme: CommitmentScheme, C: Circuit<Scheme::Scalar>>(
|
||||
params: <C as Circuit<Scheme::Scalar>>::Params,
|
||||
) -> Result<VerifyingKey<Scheme::Curve>, PfsysError>
|
||||
where
|
||||
C: Circuit<Scheme::Scalar>,
|
||||
Scheme::Curve: SerdeObject + CurveAffine,
|
||||
Scheme::Scalar: PrimeField + SerdeObject + FromUniformBytes<64>,
|
||||
{
|
||||
@@ -817,7 +819,6 @@ pub fn load_pk<Scheme: CommitmentScheme, C: Circuit<Scheme::Scalar>>(
|
||||
params: <C as Circuit<Scheme::Scalar>>::Params,
|
||||
) -> Result<ProvingKey<Scheme::Curve>, PfsysError>
|
||||
where
|
||||
C: Circuit<Scheme::Scalar>,
|
||||
Scheme::Curve: SerdeObject + CurveAffine,
|
||||
Scheme::Scalar: PrimeField + SerdeObject + FromUniformBytes<64>,
|
||||
{
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
use thiserror::Error;
|
||||
|
||||
use super::ops::DecompositionError;
|
||||
use super::{ops::DecompositionError, DataFormat};
|
||||
|
||||
/// A wrapper for tensor related errors.
|
||||
#[derive(Debug, Error)]
|
||||
@@ -38,4 +38,13 @@ pub enum TensorError {
|
||||
/// Decomposition error
|
||||
#[error("decomposition error: {0}")]
|
||||
DecompositionError(#[from] DecompositionError),
|
||||
/// Invalid argument
|
||||
#[error("invalid argument: {0}")]
|
||||
InvalidArgument(String),
|
||||
/// Index out of bounds
|
||||
#[error("index {0} out of bounds for dimension {1}")]
|
||||
IndexOutOfBounds(usize, usize),
|
||||
/// Invalid data conversion
|
||||
#[error("invalid data conversion from format {0} to {1}")]
|
||||
InvalidDataConversion(DataFormat, DataFormat),
|
||||
}
|
||||
|
||||
@@ -9,6 +9,7 @@ pub mod var;
|
||||
|
||||
pub use errors::TensorError;
|
||||
|
||||
use core::hash::Hash;
|
||||
use halo2curves::ff::PrimeField;
|
||||
use maybe_rayon::{
|
||||
prelude::{
|
||||
@@ -24,12 +25,9 @@ use std::path::PathBuf;
|
||||
pub use val::*;
|
||||
pub use var::*;
|
||||
|
||||
#[cfg(feature = "metal")]
|
||||
use instant::Instant;
|
||||
|
||||
use crate::{
|
||||
circuit::utils,
|
||||
fieldutils::{integer_rep_to_felt, IntegerRep},
|
||||
fieldutils::{IntegerRep, integer_rep_to_felt},
|
||||
graph::Visibility,
|
||||
};
|
||||
|
||||
@@ -40,8 +38,6 @@ use halo2_proofs::{
|
||||
poly::Rotation,
|
||||
};
|
||||
use itertools::Itertools;
|
||||
#[cfg(feature = "metal")]
|
||||
use metal::{Device, MTLResourceOptions, MTLSize};
|
||||
use std::error::Error;
|
||||
use std::fmt::Debug;
|
||||
use std::io::Read;
|
||||
@@ -49,31 +45,6 @@ use std::iter::Iterator;
|
||||
use std::ops::{Add, Deref, DerefMut, Div, Mul, Neg, Range, Sub};
|
||||
use std::{cmp::max, ops::Rem};
|
||||
|
||||
#[cfg(feature = "metal")]
|
||||
use std::collections::HashMap;
|
||||
|
||||
#[cfg(feature = "metal")]
|
||||
const LIB_DATA: &[u8] = include_bytes!("metal/tensor_ops.metallib");
|
||||
|
||||
#[cfg(feature = "metal")]
|
||||
lazy_static::lazy_static! {
|
||||
static ref DEVICE: Device = Device::system_default().expect("no device found");
|
||||
|
||||
static ref LIB: metal::Library = DEVICE.new_library_with_data(LIB_DATA).unwrap();
|
||||
|
||||
static ref QUEUE: metal::CommandQueue = DEVICE.new_command_queue();
|
||||
|
||||
static ref PIPELINES: HashMap<String, metal::ComputePipelineState> = {
|
||||
let mut map = HashMap::new();
|
||||
for name in ["add", "sub", "mul"] {
|
||||
let function = LIB.get_function(name, None).unwrap();
|
||||
let pipeline = DEVICE.new_compute_pipeline_state_with_function(&function).unwrap();
|
||||
map.insert(name.to_string(), pipeline);
|
||||
}
|
||||
map
|
||||
};
|
||||
}
|
||||
|
||||
/// The (inner) type of tensor elements.
|
||||
pub trait TensorType: Clone + Debug + 'static {
|
||||
/// Returns the zero value.
|
||||
@@ -91,7 +62,7 @@ pub trait TensorType: Clone + Debug + 'static {
|
||||
}
|
||||
|
||||
macro_rules! tensor_type {
|
||||
($rust_type:ty, $tensor_type:ident, $zero:expr, $one:expr) => {
|
||||
($rust_type:ty, $tensor_type:ident, $zero:expr_2021, $one:expr_2021) => {
|
||||
impl TensorType for $rust_type {
|
||||
fn zero() -> Option<Self> {
|
||||
Some($zero)
|
||||
@@ -444,7 +415,7 @@ impl<T: Clone + TensorType + PrimeField> Tensor<T> {
|
||||
Err(_) => {
|
||||
return Err(TensorError::FileLoadError(
|
||||
"Failed to read tensor".to_string(),
|
||||
))
|
||||
));
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -833,6 +804,12 @@ impl<T: Clone + TensorType> Tensor<T> {
|
||||
num_repeats: usize,
|
||||
initial_offset: usize,
|
||||
) -> Result<Tensor<T>, TensorError> {
|
||||
if n == 0 {
|
||||
return Err(TensorError::InvalidArgument(
|
||||
"Cannot duplicate every 0th element".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
let mut inner: Vec<T> = Vec::with_capacity(self.inner.len());
|
||||
let mut offset = initial_offset;
|
||||
for (i, elem) in self.inner.clone().into_iter().enumerate() {
|
||||
@@ -862,11 +839,17 @@ impl<T: Clone + TensorType> Tensor<T> {
|
||||
num_repeats: usize,
|
||||
initial_offset: usize,
|
||||
) -> Result<Tensor<T>, TensorError> {
|
||||
if n == 0 {
|
||||
return Err(TensorError::InvalidArgument(
|
||||
"Cannot remove every 0th element".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
// Pre-calculate capacity to avoid reallocations
|
||||
let estimated_size = self.inner.len() - (self.inner.len() / n) * num_repeats;
|
||||
let mut inner = Vec::with_capacity(estimated_size);
|
||||
|
||||
// Use iterator directly instead of creating intermediate collections
|
||||
// Use iterator directly instead of creating intermediate collectionsif
|
||||
let mut i = 0;
|
||||
while i < self.inner.len() {
|
||||
// Add the current element
|
||||
@@ -885,7 +868,6 @@ impl<T: Clone + TensorType> Tensor<T> {
|
||||
}
|
||||
|
||||
/// Remove indices
|
||||
/// WARN: assumes indices are in ascending order for speed
|
||||
/// ```
|
||||
/// use ezkl::tensor::Tensor;
|
||||
/// use ezkl::fieldutils::IntegerRep;
|
||||
@@ -912,7 +894,11 @@ impl<T: Clone + TensorType> Tensor<T> {
|
||||
}
|
||||
// remove indices
|
||||
for elem in indices.iter().rev() {
|
||||
inner.remove(*elem);
|
||||
if *elem < self.len() {
|
||||
inner.remove(*elem);
|
||||
} else {
|
||||
return Err(TensorError::IndexOutOfBounds(*elem, self.len()));
|
||||
}
|
||||
}
|
||||
|
||||
Tensor::new(Some(&inner), &[inner.len()])
|
||||
@@ -940,6 +926,9 @@ impl<T: Clone + TensorType> Tensor<T> {
|
||||
));
|
||||
}
|
||||
self.dims = vec![];
|
||||
}
|
||||
if self.dims() == &[0] && new_dims.iter().product::<usize>() == 1 {
|
||||
self.dims = Vec::from(new_dims);
|
||||
} else {
|
||||
let product = if new_dims != [0] {
|
||||
new_dims.iter().product::<usize>()
|
||||
@@ -1118,6 +1107,10 @@ impl<T: Clone + TensorType> Tensor<T> {
|
||||
let mut output = self.clone();
|
||||
output.reshape(shape)?;
|
||||
return Ok(output);
|
||||
} else if self.dims() == &[0] && shape.iter().product::<usize>() == 1 {
|
||||
let mut output = self.clone();
|
||||
output.reshape(shape)?;
|
||||
return Ok(output);
|
||||
}
|
||||
|
||||
if self.dims().len() > shape.len() {
|
||||
@@ -1268,7 +1261,7 @@ impl<T: Clone + TensorType> Tensor<T> {
|
||||
None => {
|
||||
return Err(TensorError::DimError(
|
||||
"Cannot get last element of empty tensor".to_string(),
|
||||
))
|
||||
));
|
||||
}
|
||||
};
|
||||
|
||||
@@ -1293,7 +1286,7 @@ impl<T: Clone + TensorType> Tensor<T> {
|
||||
None => {
|
||||
return Err(TensorError::DimError(
|
||||
"Cannot get first element of empty tensor".to_string(),
|
||||
))
|
||||
));
|
||||
}
|
||||
};
|
||||
|
||||
@@ -1404,10 +1397,6 @@ impl<T: TensorType + Add<Output = T> + std::marker::Send + std::marker::Sync> Ad
|
||||
let lhs = self.expand(&broadcasted_shape).unwrap();
|
||||
let rhs = rhs.expand(&broadcasted_shape).unwrap();
|
||||
|
||||
#[cfg(feature = "metal")]
|
||||
let res = metal_tensor_op(&lhs, &rhs, "add");
|
||||
|
||||
#[cfg(not(feature = "metal"))]
|
||||
let res = {
|
||||
let mut res: Tensor<T> = lhs
|
||||
.par_iter()
|
||||
@@ -1505,10 +1494,6 @@ impl<T: TensorType + Sub<Output = T> + std::marker::Send + std::marker::Sync> Su
|
||||
let lhs = self.expand(&broadcasted_shape).unwrap();
|
||||
let rhs = rhs.expand(&broadcasted_shape).unwrap();
|
||||
|
||||
#[cfg(feature = "metal")]
|
||||
let res = metal_tensor_op(&lhs, &rhs, "sub");
|
||||
|
||||
#[cfg(not(feature = "metal"))]
|
||||
let res = {
|
||||
let mut res: Tensor<T> = lhs
|
||||
.par_iter()
|
||||
@@ -1576,10 +1561,6 @@ impl<T: TensorType + Mul<Output = T> + std::marker::Send + std::marker::Sync> Mu
|
||||
let lhs = self.expand(&broadcasted_shape).unwrap();
|
||||
let rhs = rhs.expand(&broadcasted_shape).unwrap();
|
||||
|
||||
#[cfg(feature = "metal")]
|
||||
let res = metal_tensor_op(&lhs, &rhs, "mul");
|
||||
|
||||
#[cfg(not(feature = "metal"))]
|
||||
let res = {
|
||||
let mut res: Tensor<T> = lhs
|
||||
.par_iter()
|
||||
@@ -1685,7 +1666,9 @@ impl<T: TensorType + Div<Output = T> + std::marker::Send + std::marker::Sync> Di
|
||||
}
|
||||
|
||||
// implement remainder
|
||||
impl<T: TensorType + Rem<Output = T> + std::marker::Send + std::marker::Sync> Rem for Tensor<T> {
|
||||
impl<T: TensorType + Rem<Output = T> + std::marker::Send + std::marker::Sync + PartialEq> Rem
|
||||
for Tensor<T>
|
||||
{
|
||||
type Output = Result<Tensor<T>, TensorError>;
|
||||
|
||||
/// Elementwise remainder of a tensor with another tensor.
|
||||
@@ -1714,9 +1697,24 @@ impl<T: TensorType + Rem<Output = T> + std::marker::Send + std::marker::Sync> Re
|
||||
let mut lhs = self.expand(&broadcasted_shape).unwrap();
|
||||
let rhs = rhs.expand(&broadcasted_shape).unwrap();
|
||||
|
||||
lhs.par_iter_mut().zip(rhs).for_each(|(o, r)| {
|
||||
*o = o.clone() % r;
|
||||
});
|
||||
lhs.par_iter_mut()
|
||||
.zip(rhs)
|
||||
.map(|(o, r)| match T::zero() {
|
||||
Some(zero) => {
|
||||
if r != zero {
|
||||
*o = o.clone() % r;
|
||||
Ok(())
|
||||
} else {
|
||||
Err(TensorError::InvalidArgument(
|
||||
"Cannot divide by zero in remainder".to_string(),
|
||||
))
|
||||
}
|
||||
}
|
||||
_ => Err(TensorError::InvalidArgument(
|
||||
"Undefined zero value".to_string(),
|
||||
)),
|
||||
})
|
||||
.collect::<Result<Vec<_>, _>>()?;
|
||||
|
||||
Ok(lhs)
|
||||
}
|
||||
@@ -1751,7 +1749,6 @@ impl<T: TensorType + Rem<Output = T> + std::marker::Send + std::marker::Sync> Re
|
||||
/// assert_eq!(c, vec![2, 3]);
|
||||
///
|
||||
/// ```
|
||||
|
||||
pub fn get_broadcasted_shape(
|
||||
shape_a: &[usize],
|
||||
shape_b: &[usize],
|
||||
@@ -1759,23 +1756,247 @@ pub fn get_broadcasted_shape(
|
||||
let num_dims_a = shape_a.len();
|
||||
let num_dims_b = shape_b.len();
|
||||
|
||||
match (num_dims_a, num_dims_b) {
|
||||
(a, b) if a == b => {
|
||||
let mut broadcasted_shape = Vec::with_capacity(num_dims_a);
|
||||
for (dim_a, dim_b) in shape_a.iter().zip(shape_b.iter()) {
|
||||
let max_dim = dim_a.max(dim_b);
|
||||
broadcasted_shape.push(*max_dim);
|
||||
}
|
||||
Ok(broadcasted_shape)
|
||||
if num_dims_a == num_dims_b {
|
||||
let mut broadcasted_shape = Vec::with_capacity(num_dims_a);
|
||||
for (dim_a, dim_b) in shape_a.iter().zip(shape_b.iter()) {
|
||||
let max_dim = dim_a.max(dim_b);
|
||||
broadcasted_shape.push(*max_dim);
|
||||
}
|
||||
(a, b) if a < b => Ok(shape_b.to_vec()),
|
||||
(a, b) if a > b => Ok(shape_a.to_vec()),
|
||||
_ => Err(TensorError::DimError(
|
||||
Ok(broadcasted_shape)
|
||||
} else if num_dims_a < num_dims_b {
|
||||
Ok(shape_b.to_vec())
|
||||
} else if num_dims_a > num_dims_b {
|
||||
Ok(shape_a.to_vec())
|
||||
} else {
|
||||
Err(TensorError::DimError(
|
||||
"Unknown condition for broadcasting".to_string(),
|
||||
)),
|
||||
))
|
||||
}
|
||||
}
|
||||
////////////////////////
|
||||
///
|
||||
|
||||
/// The shape of data for some operations
|
||||
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Default, Copy)]
|
||||
pub enum DataFormat {
|
||||
/// NCHW
|
||||
#[default]
|
||||
NCHW,
|
||||
/// NHWC
|
||||
NHWC,
|
||||
/// CHW
|
||||
CHW,
|
||||
/// HWC
|
||||
HWC,
|
||||
}
|
||||
|
||||
// as str
|
||||
impl core::fmt::Display for DataFormat {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
DataFormat::NCHW => write!(f, "NCHW"),
|
||||
DataFormat::NHWC => write!(f, "NHWC"),
|
||||
DataFormat::CHW => write!(f, "CHW"),
|
||||
DataFormat::HWC => write!(f, "HWC"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl DataFormat {
|
||||
/// Get the format's canonical form
|
||||
pub fn canonical(&self) -> DataFormat {
|
||||
match self {
|
||||
DataFormat::NHWC => DataFormat::NCHW,
|
||||
DataFormat::HWC => DataFormat::CHW,
|
||||
_ => self.clone(),
|
||||
}
|
||||
}
|
||||
|
||||
/// no batch dim
|
||||
pub fn has_no_batch(&self) -> bool {
|
||||
match self {
|
||||
DataFormat::CHW | DataFormat::HWC => true,
|
||||
_ => false,
|
||||
}
|
||||
}
|
||||
|
||||
/// Convert tensor to canonical format (NCHW or CHW)
|
||||
pub fn to_canonical<F: PrimeField + TensorType + PartialOrd + Hash>(
|
||||
&self,
|
||||
tensor: &mut ValTensor<F>,
|
||||
) -> Result<(), TensorError> {
|
||||
match self {
|
||||
DataFormat::NHWC => {
|
||||
// For ND: Move channels from last axis to position after batch
|
||||
let ndims = tensor.dims().len();
|
||||
if ndims > 2 {
|
||||
tensor.move_axis(ndims - 1, 1)?;
|
||||
}
|
||||
}
|
||||
DataFormat::HWC => {
|
||||
// For ND: Move channels from last axis to first position
|
||||
let ndims = tensor.dims().len();
|
||||
if ndims > 1 {
|
||||
tensor.move_axis(ndims - 1, 0)?;
|
||||
}
|
||||
}
|
||||
_ => {} // NCHW/CHW are already in canonical format
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Convert tensor from canonical format to target format
|
||||
pub fn from_canonical<F: PrimeField + TensorType + PartialOrd + Hash>(
|
||||
&self,
|
||||
tensor: &mut ValTensor<F>,
|
||||
) -> Result<(), TensorError> {
|
||||
match self {
|
||||
DataFormat::NHWC => {
|
||||
// Move channels from position 1 to end
|
||||
let ndims = tensor.dims().len();
|
||||
if ndims > 2 {
|
||||
tensor.move_axis(1, ndims - 1)?;
|
||||
}
|
||||
}
|
||||
DataFormat::HWC => {
|
||||
// Move channels from position 0 to end
|
||||
let ndims = tensor.dims().len();
|
||||
if ndims > 1 {
|
||||
tensor.move_axis(0, ndims - 1)?;
|
||||
}
|
||||
}
|
||||
_ => {} // NCHW/CHW don't need conversion
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Get the position of the channel dimension
|
||||
pub fn get_channel_dim(&self, ndims: usize) -> usize {
|
||||
match self {
|
||||
DataFormat::NCHW => 1,
|
||||
DataFormat::NHWC => ndims - 1,
|
||||
DataFormat::CHW => 0,
|
||||
DataFormat::HWC => ndims - 1,
|
||||
}
|
||||
}
|
||||
}
|
||||
/// The shape of the kernel for some operations
|
||||
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Default, Copy)]
|
||||
pub enum KernelFormat {
|
||||
/// HWIO
|
||||
HWIO,
|
||||
/// OIHW
|
||||
#[default]
|
||||
OIHW,
|
||||
/// OHWI
|
||||
OHWI,
|
||||
}
|
||||
|
||||
impl core::fmt::Display for KernelFormat {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
KernelFormat::HWIO => write!(f, "HWIO"),
|
||||
KernelFormat::OIHW => write!(f, "OIHW"),
|
||||
KernelFormat::OHWI => write!(f, "OHWI"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl KernelFormat {
|
||||
/// Get the format's canonical form
|
||||
pub fn canonical(&self) -> KernelFormat {
|
||||
match self {
|
||||
KernelFormat::HWIO => KernelFormat::OIHW,
|
||||
KernelFormat::OHWI => KernelFormat::OIHW,
|
||||
_ => self.clone(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Convert kernel to canonical format (OIHW)
|
||||
pub fn to_canonical<F: PrimeField + TensorType + PartialOrd + Hash>(
|
||||
&self,
|
||||
kernel: &mut ValTensor<F>,
|
||||
) -> Result<(), TensorError> {
|
||||
match self {
|
||||
KernelFormat::HWIO => {
|
||||
let kdims = kernel.dims().len();
|
||||
// Move output channels from last to first
|
||||
kernel.move_axis(kdims - 1, 0)?;
|
||||
// Move input channels from new last to second position
|
||||
kernel.move_axis(kdims - 1, 1)?;
|
||||
}
|
||||
KernelFormat::OHWI => {
|
||||
let kdims = kernel.dims().len();
|
||||
// Move input channels from last to second position
|
||||
kernel.move_axis(kdims - 1, 1)?;
|
||||
}
|
||||
_ => {} // OIHW is already canonical
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Convert kernel from canonical format to target format
|
||||
pub fn from_canonical<F: PrimeField + TensorType + PartialOrd + Hash>(
|
||||
&self,
|
||||
kernel: &mut ValTensor<F>,
|
||||
) -> Result<(), TensorError> {
|
||||
match self {
|
||||
KernelFormat::HWIO => {
|
||||
let kdims = kernel.dims().len();
|
||||
// Move input channels from second position to last
|
||||
kernel.move_axis(1, kdims - 1)?;
|
||||
// Move output channels from first to last
|
||||
kernel.move_axis(0, kdims - 1)?;
|
||||
}
|
||||
KernelFormat::OHWI => {
|
||||
let kdims = kernel.dims().len();
|
||||
// Move input channels from second position to last
|
||||
kernel.move_axis(1, kdims - 1)?;
|
||||
}
|
||||
_ => {} // OIHW doesn't need conversion
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Get the position of input and output channel dimensions
|
||||
pub fn get_channel_dims(&self, ndims: usize) -> (usize, usize) {
|
||||
// (input_ch, output_ch)
|
||||
match self {
|
||||
KernelFormat::OIHW => (1, 0),
|
||||
KernelFormat::HWIO => (ndims - 2, ndims - 1),
|
||||
KernelFormat::OHWI => (ndims - 1, 0),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
|
||||
impl From<tract_onnx::tract_hir::ops::nn::DataFormat> for DataFormat {
|
||||
fn from(fmt: tract_onnx::tract_hir::ops::nn::DataFormat) -> Self {
|
||||
match fmt {
|
||||
tract_onnx::tract_hir::ops::nn::DataFormat::NCHW => DataFormat::NCHW,
|
||||
tract_onnx::tract_hir::ops::nn::DataFormat::NHWC => DataFormat::NHWC,
|
||||
tract_onnx::tract_hir::ops::nn::DataFormat::CHW => DataFormat::CHW,
|
||||
tract_onnx::tract_hir::ops::nn::DataFormat::HWC => DataFormat::HWC,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
|
||||
impl From<tract_onnx::tract_hir::tract_core::ops::cnn::conv::KernelFormat> for KernelFormat {
|
||||
fn from(fmt: tract_onnx::tract_hir::tract_core::ops::cnn::conv::KernelFormat) -> Self {
|
||||
match fmt {
|
||||
tract_onnx::tract_hir::tract_core::ops::cnn::conv::KernelFormat::HWIO => {
|
||||
KernelFormat::HWIO
|
||||
}
|
||||
tract_onnx::tract_hir::tract_core::ops::cnn::conv::KernelFormat::OIHW => {
|
||||
KernelFormat::OIHW
|
||||
}
|
||||
tract_onnx::tract_hir::tract_core::ops::cnn::conv::KernelFormat::OHWI => {
|
||||
KernelFormat::OHWI
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
@@ -1811,66 +2032,4 @@ mod tests {
|
||||
let b = Tensor::<IntegerRep>::new(Some(&[1, 4]), &[2, 1]).unwrap();
|
||||
assert_eq!(a.get_slice(&[0..2, 0..1]).unwrap(), b);
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[cfg(feature = "metal")]
|
||||
fn tensor_metal_int() {
|
||||
let a = Tensor::<i64>::new(Some(&[1, 2, 3, 4]), &[2, 2]).unwrap();
|
||||
let b = Tensor::<i64>::new(Some(&[1, 2, 3, 4]), &[2, 2]).unwrap();
|
||||
let c = metal_tensor_op(&a, &b, "add");
|
||||
assert_eq!(c, Tensor::new(Some(&[2, 4, 6, 8]), &[2, 2]).unwrap());
|
||||
|
||||
let c = metal_tensor_op(&a, &b, "sub");
|
||||
assert_eq!(c, Tensor::new(Some(&[0, 0, 0, 0]), &[2, 2]).unwrap());
|
||||
|
||||
let c = metal_tensor_op(&a, &b, "mul");
|
||||
assert_eq!(c, Tensor::new(Some(&[1, 4, 9, 16]), &[2, 2]).unwrap());
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[cfg(feature = "metal")]
|
||||
fn tensor_metal_felt() {
|
||||
use halo2curves::bn256::Fr;
|
||||
|
||||
let a = Tensor::<Fr>::new(
|
||||
Some(&[Fr::from(1), Fr::from(2), Fr::from(3), Fr::from(4)]),
|
||||
&[2, 2],
|
||||
)
|
||||
.unwrap();
|
||||
let b = Tensor::<Fr>::new(
|
||||
Some(&[Fr::from(1), Fr::from(2), Fr::from(3), Fr::from(4)]),
|
||||
&[2, 2],
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let c = metal_tensor_op(&a, &b, "add");
|
||||
assert_eq!(
|
||||
c,
|
||||
Tensor::<Fr>::new(
|
||||
Some(&[Fr::from(2), Fr::from(4), Fr::from(6), Fr::from(8)]),
|
||||
&[2, 2],
|
||||
)
|
||||
.unwrap()
|
||||
);
|
||||
|
||||
let c = metal_tensor_op(&a, &b, "sub");
|
||||
assert_eq!(
|
||||
c,
|
||||
Tensor::<Fr>::new(
|
||||
Some(&[Fr::from(0), Fr::from(0), Fr::from(0), Fr::from(0)]),
|
||||
&[2, 2],
|
||||
)
|
||||
.unwrap()
|
||||
);
|
||||
|
||||
let c = metal_tensor_op(&a, &b, "mul");
|
||||
assert_eq!(
|
||||
c,
|
||||
Tensor::<Fr>::new(
|
||||
Some(&[Fr::from(1), Fr::from(4), Fr::from(9), Fr::from(16)]),
|
||||
&[2, 2],
|
||||
)
|
||||
.unwrap()
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -27,7 +27,7 @@ pub fn get_rep(
|
||||
n: usize,
|
||||
) -> Result<Vec<IntegerRep>, DecompositionError> {
|
||||
// check if x is too large
|
||||
if x.abs() > (base.pow(n as u32) as IntegerRep) - 1 {
|
||||
if (*x).abs() > ((base as i128).pow(n as u32)) - 1 {
|
||||
return Err(DecompositionError::TooLarge(*x, base, n));
|
||||
}
|
||||
let mut rep = vec![0; n + 1];
|
||||
@@ -43,8 +43,8 @@ pub fn get_rep(
|
||||
let mut x = x.abs();
|
||||
//
|
||||
for i in (1..rep.len()).rev() {
|
||||
rep[i] = x % base as i128;
|
||||
x /= base as i128;
|
||||
rep[i] = x % base as IntegerRep;
|
||||
x /= base as IntegerRep;
|
||||
}
|
||||
|
||||
Ok(rep)
|
||||
@@ -127,7 +127,7 @@ pub fn decompose(
|
||||
.flatten()
|
||||
.collect::<Vec<IntegerRep>>();
|
||||
|
||||
let output = Tensor::<i128>::new(Some(&resp), &dims)?;
|
||||
let output = Tensor::<IntegerRep>::new(Some(&resp), &dims)?;
|
||||
|
||||
Ok(output)
|
||||
}
|
||||
@@ -385,6 +385,12 @@ pub fn resize<T: TensorType + Send + Sync>(
|
||||
pub fn add<T: TensorType + Add<Output = T> + std::marker::Send + std::marker::Sync>(
|
||||
t: &[Tensor<T>],
|
||||
) -> Result<Tensor<T>, TensorError> {
|
||||
if t.len() == 1 {
|
||||
return Ok(t[0].clone());
|
||||
} else if t.is_empty() {
|
||||
return Err(TensorError::DimMismatch("add".to_string()));
|
||||
}
|
||||
|
||||
// calculate value of output
|
||||
let mut output: Tensor<T> = t[0].clone();
|
||||
|
||||
@@ -433,6 +439,11 @@ pub fn add<T: TensorType + Add<Output = T> + std::marker::Send + std::marker::Sy
|
||||
pub fn sub<T: TensorType + Sub<Output = T> + std::marker::Send + std::marker::Sync>(
|
||||
t: &[Tensor<T>],
|
||||
) -> Result<Tensor<T>, TensorError> {
|
||||
if t.len() == 1 {
|
||||
return Ok(t[0].clone());
|
||||
} else if t.is_empty() {
|
||||
return Err(TensorError::DimMismatch("sub".to_string()));
|
||||
}
|
||||
// calculate value of output
|
||||
let mut output: Tensor<T> = t[0].clone();
|
||||
|
||||
@@ -479,6 +490,11 @@ pub fn sub<T: TensorType + Sub<Output = T> + std::marker::Send + std::marker::Sy
|
||||
pub fn mult<T: TensorType + Mul<Output = T> + std::marker::Send + std::marker::Sync>(
|
||||
t: &[Tensor<T>],
|
||||
) -> Result<Tensor<T>, TensorError> {
|
||||
if t.len() == 1 {
|
||||
return Ok(t[0].clone());
|
||||
} else if t.is_empty() {
|
||||
return Err(TensorError::DimMismatch("mult".to_string()));
|
||||
}
|
||||
// calculate value of output
|
||||
let mut output: Tensor<T> = t[0].clone();
|
||||
|
||||
@@ -519,30 +535,101 @@ pub fn mult<T: TensorType + Mul<Output = T> + std::marker::Send + std::marker::S
|
||||
/// let result = downsample(&x, 1, 2, 2).unwrap();
|
||||
/// let expected = Tensor::<IntegerRep>::new(Some(&[3, 6]), &[2, 1]).unwrap();
|
||||
/// assert_eq!(result, expected);
|
||||
/// let x = Tensor::<IntegerRep>::new(
|
||||
/// Some(&[1, 2, 3, 4, 5, 6]),
|
||||
/// &[2, 3],
|
||||
/// ).unwrap();
|
||||
///
|
||||
/// // Test case 1: Negative stride along dimension 0
|
||||
/// // This should flip the order along dimension 0
|
||||
/// let result = downsample(&x, 0, -1, 0).unwrap();
|
||||
/// let expected = Tensor::<IntegerRep>::new(
|
||||
/// Some(&[4, 5, 6, 1, 2, 3]), // Flipped order of rows
|
||||
/// &[2, 3]
|
||||
/// ).unwrap();
|
||||
/// assert_eq!(result, expected);
|
||||
///
|
||||
/// // Test case 2: Negative stride along dimension 1
|
||||
/// // This should flip the order along dimension 1
|
||||
/// let result = downsample(&x, 1, -1, 0).unwrap();
|
||||
/// let expected = Tensor::<IntegerRep>::new(
|
||||
/// Some(&[3, 2, 1, 6, 5, 4]), // Flipped order of columns
|
||||
/// &[2, 3]
|
||||
/// ).unwrap();
|
||||
/// assert_eq!(result, expected);
|
||||
///
|
||||
/// // Test case 3: Negative stride with stride magnitude > 1
|
||||
/// // This should both skip and flip
|
||||
/// let result = downsample(&x, 1, -2, 0).unwrap();
|
||||
/// let expected = Tensor::<IntegerRep>::new(
|
||||
/// Some(&[3, 1, 6, 4]), // Take every 2nd element in reverse
|
||||
/// &[2, 2]
|
||||
/// ).unwrap();
|
||||
/// assert_eq!(result, expected);
|
||||
///
|
||||
/// // Test case 4: Negative stride with non-zero modulo
|
||||
/// // This should start at (size - 1 - modulo) and reverse
|
||||
/// let result = downsample(&x, 1, -2, 1).unwrap();
|
||||
/// let expected = Tensor::<IntegerRep>::new(
|
||||
/// Some(&[2, 5]), // Start at second element from end, take every 2nd in reverse
|
||||
/// &[2, 1]
|
||||
/// ).unwrap();
|
||||
/// assert_eq!(result, expected);
|
||||
///
|
||||
/// // Create a larger test case for more complex downsampling
|
||||
/// let y = Tensor::<IntegerRep>::new(
|
||||
/// Some(&[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]),
|
||||
/// &[3, 4],
|
||||
/// ).unwrap();
|
||||
///
|
||||
/// // Test case 5: Negative stride with modulo on larger tensor
|
||||
/// let result = downsample(&y, 1, -2, 1).unwrap();
|
||||
/// let expected = Tensor::<IntegerRep>::new(
|
||||
/// Some(&[3, 1, 7, 5, 11, 9]), // Start at one after reverse, take every 2nd
|
||||
/// &[3, 2]
|
||||
/// ).unwrap();
|
||||
/// assert_eq!(result, expected);
|
||||
/// ```
|
||||
pub fn downsample<T: TensorType + Send + Sync>(
|
||||
input: &Tensor<T>,
|
||||
dim: usize,
|
||||
stride: usize,
|
||||
stride: isize, // Changed from usize to isize to support negative strides
|
||||
modulo: usize,
|
||||
) -> Result<Tensor<T>, TensorError> {
|
||||
let mut output_shape = input.dims().to_vec();
|
||||
// now downsample along axis dim offset by modulo, rounding up (+1 if remaidner is non-zero)
|
||||
let remainder = (input.dims()[dim] - modulo) % stride;
|
||||
let div = (input.dims()[dim] - modulo) / stride;
|
||||
output_shape[dim] = div + (remainder > 0) as usize;
|
||||
let mut output = Tensor::<T>::new(None, &output_shape)?;
|
||||
// Handle negative stride case
|
||||
if stride == 0 {
|
||||
return Err(TensorError::DimMismatch(
|
||||
"downsample stride cannot be zero".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
if modulo > input.dims()[dim] {
|
||||
let stride_abs = stride.unsigned_abs();
|
||||
let mut output_shape = input.dims().to_vec();
|
||||
|
||||
if modulo >= input.dims()[dim] {
|
||||
return Err(TensorError::DimMismatch("downsample".to_string()));
|
||||
}
|
||||
|
||||
// now downsample along axis dim offset by modulo
|
||||
// Calculate output shape based on the absolute value of stride
|
||||
let remainder = (input.dims()[dim] - modulo) % stride_abs;
|
||||
let div = (input.dims()[dim] - modulo) / stride_abs;
|
||||
output_shape[dim] = div + (remainder > 0) as usize;
|
||||
|
||||
let mut output = Tensor::<T>::new(None, &output_shape)?;
|
||||
|
||||
// Calculate indices based on stride direction
|
||||
let indices = (0..output_shape.len())
|
||||
.map(|i| {
|
||||
if i == dim {
|
||||
let mut index = vec![0; output_shape[i]];
|
||||
for (i, idx) in index.iter_mut().enumerate() {
|
||||
*idx = i * stride + modulo;
|
||||
for (j, idx) in index.iter_mut().enumerate() {
|
||||
if stride > 0 {
|
||||
// Positive stride: move forward from modulo
|
||||
*idx = j * stride_abs + modulo;
|
||||
} else {
|
||||
// Negative stride: move backward from (size - 1 - modulo)
|
||||
*idx = (input.dims()[dim] - 1 - modulo) - j * stride_abs;
|
||||
}
|
||||
}
|
||||
index
|
||||
} else {
|
||||
@@ -1310,7 +1397,6 @@ pub fn pad<T: TensorType>(
|
||||
///
|
||||
/// # Errors
|
||||
/// Returns a TensorError if the tensors in `inputs` have incompatible dimensions for concatenation along the specified `axis`.
|
||||
|
||||
pub fn concat<T: TensorType + Send + Sync>(
|
||||
inputs: &[&Tensor<T>],
|
||||
axis: usize,
|
||||
@@ -2086,7 +2172,6 @@ pub mod nonlinearities {
|
||||
/// let expected = Tensor::<IntegerRep>::new(Some(&[4, 25, 8, 1, 1, 0]), &[2, 3]).unwrap();
|
||||
/// assert_eq!(result, expected);
|
||||
/// ```
|
||||
|
||||
pub fn tanh(a: &Tensor<IntegerRep>, scale_input: f64) -> Tensor<IntegerRep> {
|
||||
a.par_enum_map(|_, a_i| {
|
||||
let kix = (a_i as f64) / scale_input;
|
||||
@@ -2261,7 +2346,11 @@ pub mod nonlinearities {
|
||||
pub fn recip(a: &Tensor<IntegerRep>, input_scale: f64, out_scale: f64) -> Tensor<IntegerRep> {
|
||||
a.par_enum_map(|_, a_i| {
|
||||
let rescaled = (a_i as f64) / input_scale;
|
||||
let denom = (1_f64) / (rescaled + f64::EPSILON);
|
||||
let denom = if rescaled == 0_f64 {
|
||||
(1_f64) / (rescaled + f64::EPSILON)
|
||||
} else {
|
||||
(1_f64) / (rescaled)
|
||||
};
|
||||
let d_inv_x = out_scale * denom;
|
||||
Ok::<_, TensorError>(d_inv_x.round() as IntegerRep)
|
||||
})
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -2,36 +2,38 @@ use std::collections::HashSet;
|
||||
|
||||
use log::{debug, error, warn};
|
||||
|
||||
use crate::circuit::{region::ConstantsMap, CheckMode};
|
||||
use crate::circuit::{CheckMode, region::ConstantsMap};
|
||||
|
||||
use super::*;
|
||||
/// A wrapper around Halo2's `Column<Fixed>` or `Column<Advice>`.
|
||||
/// Typically assign [ValTensor]s to [VarTensor]s when laying out a circuit.
|
||||
/// A wrapper around Halo2's Column types that represents a tensor of variables in the circuit.
|
||||
/// VarTensors are used to store and manage circuit columns, typically for assigning ValTensor
|
||||
/// values during circuit layout. The tensor organizes storage into blocks of columns, where each
|
||||
/// block contains multiple columns and each column contains multiple rows.
|
||||
#[derive(Clone, Default, Debug, PartialEq, Eq)]
|
||||
pub enum VarTensor {
|
||||
/// A VarTensor for holding Advice values, which are assigned at proving time.
|
||||
Advice {
|
||||
/// Vec of Advice columns, we have [[xx][xx][xx]...] where each inner vec is xx columns
|
||||
inner: Vec<Vec<Column<Advice>>>,
|
||||
///
|
||||
/// The number of columns in each inner block
|
||||
num_inner_cols: usize,
|
||||
/// Number of rows available to be used in each column of the storage
|
||||
col_size: usize,
|
||||
},
|
||||
/// Dummy var
|
||||
/// A placeholder tensor used for testing or temporary storage
|
||||
Dummy {
|
||||
///
|
||||
/// The number of columns in each inner block
|
||||
num_inner_cols: usize,
|
||||
/// Number of rows available to be used in each column of the storage
|
||||
col_size: usize,
|
||||
},
|
||||
/// Empty var
|
||||
/// An empty tensor with no storage
|
||||
#[default]
|
||||
Empty,
|
||||
}
|
||||
|
||||
impl VarTensor {
|
||||
/// name of the tensor
|
||||
/// Returns the name of the tensor variant as a static string
|
||||
pub fn name(&self) -> &'static str {
|
||||
match self {
|
||||
VarTensor::Advice { .. } => "Advice",
|
||||
@@ -40,22 +42,35 @@ impl VarTensor {
|
||||
}
|
||||
}
|
||||
|
||||
///
|
||||
/// Returns true if the tensor is an Advice variant
|
||||
pub fn is_advice(&self) -> bool {
|
||||
matches!(self, VarTensor::Advice { .. })
|
||||
}
|
||||
|
||||
/// Calculates the maximum number of usable rows in the constraint system
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `cs` - The constraint system
|
||||
/// * `logrows` - Log base 2 of the total number of rows (including system and blinding rows)
|
||||
///
|
||||
/// # Returns
|
||||
/// The maximum number of usable rows after accounting for blinding factors
|
||||
pub fn max_rows<F: PrimeField>(cs: &ConstraintSystem<F>, logrows: usize) -> usize {
|
||||
let base = 2u32;
|
||||
base.pow(logrows as u32) as usize - cs.blinding_factors() - 1
|
||||
}
|
||||
|
||||
/// Create a new VarTensor::Advice that is unblinded
|
||||
/// Arguments
|
||||
/// * `cs` - The constraint system
|
||||
/// * `logrows` - log2 number of rows in the matrix, including any system and blinding rows.
|
||||
/// * `capacity` - The number of advice cells to allocate
|
||||
/// Creates a new VarTensor::Advice with unblinded columns. Unblinded columns are used when
|
||||
/// the values do not need to be hidden in the proof.
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `cs` - The constraint system to create columns in
|
||||
/// * `logrows` - Log base 2 of the total number of rows
|
||||
/// * `num_inner_cols` - Number of columns in each inner block
|
||||
/// * `capacity` - Total number of advice cells to allocate
|
||||
///
|
||||
/// # Returns
|
||||
/// A new VarTensor::Advice with unblinded columns enabled for equality constraints
|
||||
pub fn new_unblinded_advice<F: PrimeField>(
|
||||
cs: &mut ConstraintSystem<F>,
|
||||
logrows: usize,
|
||||
@@ -93,11 +108,17 @@ impl VarTensor {
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a new VarTensor::Advice
|
||||
/// Arguments
|
||||
/// * `cs` - The constraint system
|
||||
/// * `logrows` - log2 number of rows in the matrix, including any system and blinding rows.
|
||||
/// * `capacity` - The number of advice cells to allocate
|
||||
/// Creates a new VarTensor::Advice with standard (blinded) columns, used when
|
||||
/// the values need to be hidden in the proof.
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `cs` - The constraint system to create columns in
|
||||
/// * `logrows` - Log base 2 of the total number of rows
|
||||
/// * `num_inner_cols` - Number of columns in each inner block
|
||||
/// * `capacity` - Total number of advice cells to allocate
|
||||
///
|
||||
/// # Returns
|
||||
/// A new VarTensor::Advice with blinded columns enabled for equality constraints
|
||||
pub fn new_advice<F: PrimeField>(
|
||||
cs: &mut ConstraintSystem<F>,
|
||||
logrows: usize,
|
||||
@@ -133,11 +154,17 @@ impl VarTensor {
|
||||
}
|
||||
}
|
||||
|
||||
/// Initializes fixed columns to support the VarTensor::Advice
|
||||
/// Arguments
|
||||
/// * `cs` - The constraint system
|
||||
/// * `logrows` - log2 number of rows in the matrix, including any system and blinding rows.
|
||||
/// * `capacity` - The number of advice cells to allocate
|
||||
/// Initializes fixed columns in the constraint system to support the VarTensor::Advice
|
||||
/// Fixed columns are used for constant values that are known at circuit creation time.
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `cs` - The constraint system to create columns in
|
||||
/// * `logrows` - Log base 2 of the total number of rows
|
||||
/// * `num_constants` - Number of constant values needed
|
||||
/// * `module_requires_fixed` - Whether the module requires at least one fixed column
|
||||
///
|
||||
/// # Returns
|
||||
/// The number of fixed columns created
|
||||
pub fn constant_cols<F: PrimeField>(
|
||||
cs: &mut ConstraintSystem<F>,
|
||||
logrows: usize,
|
||||
@@ -169,7 +196,14 @@ impl VarTensor {
|
||||
modulo
|
||||
}
|
||||
|
||||
/// Create a new VarTensor::Dummy
|
||||
/// Creates a new dummy VarTensor for testing or temporary storage
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `logrows` - Log base 2 of the total number of rows
|
||||
/// * `num_inner_cols` - Number of columns in each inner block
|
||||
///
|
||||
/// # Returns
|
||||
/// A new VarTensor::Dummy with the specified dimensions
|
||||
pub fn dummy(logrows: usize, num_inner_cols: usize) -> Self {
|
||||
let base = 2u32;
|
||||
let max_rows = base.pow(logrows as u32) as usize - 6;
|
||||
@@ -179,7 +213,7 @@ impl VarTensor {
|
||||
}
|
||||
}
|
||||
|
||||
/// Gets the dims of the object the VarTensor represents
|
||||
/// Returns the number of blocks in the tensor
|
||||
pub fn num_blocks(&self) -> usize {
|
||||
match self {
|
||||
VarTensor::Advice { inner, .. } => inner.len(),
|
||||
@@ -187,7 +221,7 @@ impl VarTensor {
|
||||
}
|
||||
}
|
||||
|
||||
/// Num inner cols
|
||||
/// Returns the number of columns in each inner block
|
||||
pub fn num_inner_cols(&self) -> usize {
|
||||
match self {
|
||||
VarTensor::Advice { num_inner_cols, .. } | VarTensor::Dummy { num_inner_cols, .. } => {
|
||||
@@ -197,7 +231,7 @@ impl VarTensor {
|
||||
}
|
||||
}
|
||||
|
||||
/// Total number of columns
|
||||
/// Returns the total number of columns across all blocks
|
||||
pub fn num_cols(&self) -> usize {
|
||||
match self {
|
||||
VarTensor::Advice { inner, .. } => inner[0].len() * inner.len(),
|
||||
@@ -205,7 +239,7 @@ impl VarTensor {
|
||||
}
|
||||
}
|
||||
|
||||
/// Gets the size of each column
|
||||
/// Returns the maximum number of rows in each column
|
||||
pub fn col_size(&self) -> usize {
|
||||
match self {
|
||||
VarTensor::Advice { col_size, .. } | VarTensor::Dummy { col_size, .. } => *col_size,
|
||||
@@ -213,7 +247,7 @@ impl VarTensor {
|
||||
}
|
||||
}
|
||||
|
||||
/// Gets the size of each column
|
||||
/// Returns the total size of each block (num_inner_cols * col_size)
|
||||
pub fn block_size(&self) -> usize {
|
||||
match self {
|
||||
VarTensor::Advice {
|
||||
@@ -230,7 +264,13 @@ impl VarTensor {
|
||||
}
|
||||
}
|
||||
|
||||
/// Take a linear coordinate and output the (column, row) position in the storage block.
|
||||
/// Converts a linear coordinate to (block, column, row) coordinates in the storage
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `linear_coord` - The linear index to convert
|
||||
///
|
||||
/// # Returns
|
||||
/// A tuple of (block_index, column_index, row_index)
|
||||
pub fn cartesian_coord(&self, linear_coord: usize) -> (usize, usize, usize) {
|
||||
// x indexes over blocks of size num_inner_cols
|
||||
let x = linear_coord / self.block_size();
|
||||
@@ -243,7 +283,17 @@ impl VarTensor {
|
||||
}
|
||||
|
||||
impl VarTensor {
|
||||
/// Retrieve the value of a specific cell in the tensor.
|
||||
/// Queries a range of cells in the tensor during circuit synthesis
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `meta` - Virtual cells accessor
|
||||
/// * `x` - Block index
|
||||
/// * `y` - Column index within block
|
||||
/// * `z` - Starting row offset
|
||||
/// * `rng` - Number of consecutive rows to query
|
||||
///
|
||||
/// # Returns
|
||||
/// A tensor of expressions representing the queried cells
|
||||
pub fn query_rng<F: PrimeField>(
|
||||
&self,
|
||||
meta: &mut VirtualCells<'_, F>,
|
||||
@@ -268,7 +318,16 @@ impl VarTensor {
|
||||
}
|
||||
}
|
||||
|
||||
/// Retrieve the value of a specific block at an offset in the tensor.
|
||||
/// Queries an entire block of cells at a given offset
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `meta` - Virtual cells accessor
|
||||
/// * `x` - Block index
|
||||
/// * `z` - Row offset
|
||||
/// * `rng` - Number of consecutive rows to query
|
||||
///
|
||||
/// # Returns
|
||||
/// A tensor of expressions representing the queried block
|
||||
pub fn query_whole_block<F: PrimeField>(
|
||||
&self,
|
||||
meta: &mut VirtualCells<'_, F>,
|
||||
@@ -293,7 +352,16 @@ impl VarTensor {
|
||||
}
|
||||
}
|
||||
|
||||
/// Assigns a constant value to a specific cell in the tensor.
|
||||
/// Assigns a constant value to a specific cell in the tensor
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `region` - The region to assign values in
|
||||
/// * `offset` - Base offset for the assignment
|
||||
/// * `coord` - Coordinate within the tensor
|
||||
/// * `constant` - The constant value to assign
|
||||
///
|
||||
/// # Returns
|
||||
/// The assigned cell or an error if assignment fails
|
||||
pub fn assign_constant<F: PrimeField + TensorType + PartialOrd>(
|
||||
&self,
|
||||
region: &mut Region<F>,
|
||||
@@ -313,7 +381,17 @@ impl VarTensor {
|
||||
}
|
||||
}
|
||||
|
||||
/// Assigns [ValTensor] to the columns of the inner tensor.
|
||||
/// Assigns values from a ValTensor to this tensor, excluding specified positions
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `region` - The region to assign values in
|
||||
/// * `offset` - Base offset for assignments
|
||||
/// * `values` - The ValTensor containing values to assign
|
||||
/// * `omissions` - Set of positions to skip during assignment
|
||||
/// * `constants` - Map for tracking constant assignments
|
||||
///
|
||||
/// # Returns
|
||||
/// The assigned ValTensor or an error if assignment fails
|
||||
pub fn assign_with_omissions<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
|
||||
&self,
|
||||
region: &mut Region<F>,
|
||||
@@ -325,7 +403,10 @@ impl VarTensor {
|
||||
let mut assigned_coord = 0;
|
||||
let mut res: ValTensor<F> = match values {
|
||||
ValTensor::Instance { .. } => {
|
||||
unimplemented!("cannot assign instance to advice columns with omissions")
|
||||
error!(
|
||||
"assignment with omissions is not supported on instance columns. increase K if you require more rows."
|
||||
);
|
||||
Err(halo2_proofs::plonk::Error::Synthesis)
|
||||
}
|
||||
ValTensor::Value { inner: v, .. } => Ok::<ValTensor<F>, halo2_proofs::plonk::Error>(
|
||||
v.enum_map(|coord, k| {
|
||||
@@ -344,7 +425,16 @@ impl VarTensor {
|
||||
Ok(res)
|
||||
}
|
||||
|
||||
/// Assigns [ValTensor] to the columns of the inner tensor.
|
||||
/// Assigns values from a ValTensor to this tensor
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `region` - The region to assign values in
|
||||
/// * `offset` - Base offset for assignments
|
||||
/// * `values` - The ValTensor containing values to assign
|
||||
/// * `constants` - Map for tracking constant assignments
|
||||
///
|
||||
/// # Returns
|
||||
/// The assigned ValTensor or an error if assignment fails
|
||||
pub fn assign<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
|
||||
&self,
|
||||
region: &mut Region<F>,
|
||||
@@ -396,14 +486,23 @@ impl VarTensor {
|
||||
Ok(res)
|
||||
}
|
||||
|
||||
/// Helper function to get the remaining size of the column
|
||||
/// Returns the remaining available space in a column for assignments
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `offset` - Current offset in the column
|
||||
/// * `values` - The ValTensor to check space for
|
||||
///
|
||||
/// # Returns
|
||||
/// The number of rows that need to be flushed or an error if space is insufficient
|
||||
pub fn get_column_flush<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
|
||||
&self,
|
||||
offset: usize,
|
||||
values: &ValTensor<F>,
|
||||
) -> Result<usize, halo2_proofs::plonk::Error> {
|
||||
if values.len() > self.col_size() {
|
||||
error!("Values are too large for the column");
|
||||
error!(
|
||||
"There are too many values to flush for this column size, try setting the logrows to a higher value (eg. --logrows 22 on the cli)"
|
||||
);
|
||||
return Err(halo2_proofs::plonk::Error::Synthesis);
|
||||
}
|
||||
|
||||
@@ -427,8 +526,16 @@ impl VarTensor {
|
||||
Ok(flush_len)
|
||||
}
|
||||
|
||||
/// Assigns [ValTensor] to the columns of the inner tensor. Whereby the values are assigned to a single column, without overflowing.
|
||||
/// So for instance if we are assigning 10 values and we are at index 18 of the column, and the columns are of length 20, we skip the last 2 values of current column and start from the beginning of the next column.
|
||||
/// Assigns values to a single column, avoiding column overflow by flushing to the next column if needed
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `region` - The region to assign values in
|
||||
/// * `offset` - Base offset for assignments
|
||||
/// * `values` - The ValTensor containing values to assign
|
||||
/// * `constants` - Map for tracking constant assignments
|
||||
///
|
||||
/// # Returns
|
||||
/// A tuple of (assigned ValTensor, number of rows flushed) or an error if assignment fails
|
||||
pub fn assign_exact_column<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
|
||||
&self,
|
||||
region: &mut Region<F>,
|
||||
@@ -443,8 +550,17 @@ impl VarTensor {
|
||||
Ok((assigned_vals, flush_len))
|
||||
}
|
||||
|
||||
/// Assigns specific values (`ValTensor`) to the columns of the inner tensor but allows for column wrapping for accumulated operations.
|
||||
/// Duplication occurs by copying the last cell of the column to the first cell next column and creating a copy constraint between the two.
|
||||
/// Assigns values with duplication in dummy mode, used for testing and simulation
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `row` - Starting row for assignment
|
||||
/// * `offset` - Base offset for assignments
|
||||
/// * `values` - The ValTensor containing values to assign
|
||||
/// * `single_inner_col` - Whether to treat as a single column
|
||||
/// * `constants` - Map for tracking constant assignments
|
||||
///
|
||||
/// # Returns
|
||||
/// A tuple of (assigned ValTensor, total length used) or an error if assignment fails
|
||||
pub fn dummy_assign_with_duplication<
|
||||
F: PrimeField + TensorType + PartialOrd + std::hash::Hash,
|
||||
>(
|
||||
@@ -456,8 +572,13 @@ impl VarTensor {
|
||||
constants: &mut ConstantsMap<F>,
|
||||
) -> Result<(ValTensor<F>, usize), halo2_proofs::plonk::Error> {
|
||||
match values {
|
||||
ValTensor::Instance { .. } => unimplemented!("duplication is not supported on instance columns. increase K if you require more rows."),
|
||||
ValTensor::Value { inner: v, dims , ..} => {
|
||||
ValTensor::Instance { .. } => {
|
||||
error!(
|
||||
"duplication is not supported on instance columns. increase K if you require more rows."
|
||||
);
|
||||
Err(halo2_proofs::plonk::Error::Synthesis)
|
||||
}
|
||||
ValTensor::Value { inner: v, dims, .. } => {
|
||||
let duplication_freq = if single_inner_col {
|
||||
self.col_size()
|
||||
} else {
|
||||
@@ -470,21 +591,20 @@ impl VarTensor {
|
||||
self.num_inner_cols()
|
||||
};
|
||||
|
||||
let duplication_offset = if single_inner_col {
|
||||
row
|
||||
} else {
|
||||
offset
|
||||
};
|
||||
|
||||
let duplication_offset = if single_inner_col { row } else { offset };
|
||||
|
||||
// duplicates every nth element to adjust for column overflow
|
||||
let mut res: ValTensor<F> = v.duplicate_every_n(duplication_freq, num_repeats, duplication_offset).unwrap().into();
|
||||
let mut res: ValTensor<F> = v
|
||||
.duplicate_every_n(duplication_freq, num_repeats, duplication_offset)
|
||||
.unwrap()
|
||||
.into();
|
||||
|
||||
let constants_map = res.create_constants_map();
|
||||
constants.extend(constants_map);
|
||||
|
||||
let total_used_len = res.len();
|
||||
res.remove_every_n(duplication_freq, num_repeats, duplication_offset).unwrap();
|
||||
res.remove_every_n(duplication_freq, num_repeats, duplication_offset)
|
||||
.unwrap();
|
||||
|
||||
res.reshape(dims).unwrap();
|
||||
res.set_scale(values.scale());
|
||||
@@ -494,7 +614,16 @@ impl VarTensor {
|
||||
}
|
||||
}
|
||||
|
||||
/// Assigns specific values (`ValTensor`) to the columns of the inner tensor but allows for column wrapping for accumulated operations.
|
||||
/// Assigns values with duplication but without enforcing constraints between duplicated values
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `region` - The region to assign values in
|
||||
/// * `offset` - Base offset for assignments
|
||||
/// * `values` - The ValTensor containing values to assign
|
||||
/// * `constants` - Map for tracking constant assignments
|
||||
///
|
||||
/// # Returns
|
||||
/// A tuple of (assigned ValTensor, total length used) or an error if assignment fails
|
||||
pub fn assign_with_duplication_unconstrained<
|
||||
F: PrimeField + TensorType + PartialOrd + std::hash::Hash,
|
||||
>(
|
||||
@@ -505,9 +634,13 @@ impl VarTensor {
|
||||
constants: &mut ConstantsMap<F>,
|
||||
) -> Result<(ValTensor<F>, usize), halo2_proofs::plonk::Error> {
|
||||
match values {
|
||||
ValTensor::Instance { .. } => unimplemented!("duplication is not supported on instance columns. increase K if you require more rows."),
|
||||
ValTensor::Value { inner: v, dims , ..} => {
|
||||
|
||||
ValTensor::Instance { .. } => {
|
||||
error!(
|
||||
"duplication is not supported on instance columns. increase K if you require more rows."
|
||||
);
|
||||
Err(halo2_proofs::plonk::Error::Synthesis)
|
||||
}
|
||||
ValTensor::Value { inner: v, dims, .. } => {
|
||||
let duplication_freq = self.block_size();
|
||||
|
||||
let num_repeats = self.num_inner_cols();
|
||||
@@ -515,17 +648,31 @@ impl VarTensor {
|
||||
let duplication_offset = offset;
|
||||
|
||||
// duplicates every nth element to adjust for column overflow
|
||||
let v = v.duplicate_every_n(duplication_freq, num_repeats, duplication_offset).unwrap();
|
||||
let v = v
|
||||
.duplicate_every_n(duplication_freq, num_repeats, duplication_offset)
|
||||
.map_err(|e| {
|
||||
error!("Error duplicating values: {:?}", e);
|
||||
halo2_proofs::plonk::Error::Synthesis
|
||||
})?;
|
||||
let mut res: ValTensor<F> = {
|
||||
v.enum_map(|coord, k| {
|
||||
let cell = self.assign_value(region, offset, k.clone(), coord, constants)?;
|
||||
Ok::<_, halo2_proofs::plonk::Error>(cell)
|
||||
|
||||
})?.into()};
|
||||
let cell =
|
||||
self.assign_value(region, offset, k.clone(), coord, constants)?;
|
||||
Ok::<_, halo2_proofs::plonk::Error>(cell)
|
||||
})?
|
||||
.into()
|
||||
};
|
||||
let total_used_len = res.len();
|
||||
res.remove_every_n(duplication_freq, num_repeats, duplication_offset).unwrap();
|
||||
res.remove_every_n(duplication_freq, num_repeats, duplication_offset)
|
||||
.map_err(|e| {
|
||||
error!("Error duplicating values: {:?}", e);
|
||||
halo2_proofs::plonk::Error::Synthesis
|
||||
})?;
|
||||
|
||||
res.reshape(dims).unwrap();
|
||||
res.reshape(dims).map_err(|e| {
|
||||
error!("Error duplicating values: {:?}", e);
|
||||
halo2_proofs::plonk::Error::Synthesis
|
||||
})?;
|
||||
res.set_scale(values.scale());
|
||||
|
||||
Ok((res, total_used_len))
|
||||
@@ -533,8 +680,18 @@ impl VarTensor {
|
||||
}
|
||||
}
|
||||
|
||||
/// Assigns specific values (`ValTensor`) to the columns of the inner tensor but allows for column wrapping for accumulated operations.
|
||||
/// Duplication occurs by copying the last cell of the column to the first cell next column and creating a copy constraint between the two.
|
||||
/// Assigns values with duplication and enforces equality constraints between duplicated values
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `region` - The region to assign values in
|
||||
/// * `row` - Starting row for assignment
|
||||
/// * `offset` - Base offset for assignments
|
||||
/// * `values` - The ValTensor containing values to assign
|
||||
/// * `check_mode` - Mode for checking equality constraints
|
||||
/// * `constants` - Map for tracking constant assignments
|
||||
///
|
||||
/// # Returns
|
||||
/// A tuple of (assigned ValTensor, total length used) or an error if assignment fails
|
||||
pub fn assign_with_duplication_constrained<
|
||||
F: PrimeField + TensorType + PartialOrd + std::hash::Hash,
|
||||
>(
|
||||
@@ -549,61 +706,71 @@ impl VarTensor {
|
||||
let mut prev_cell = None;
|
||||
|
||||
match values {
|
||||
ValTensor::Instance { .. } => unimplemented!("duplication is not supported on instance columns. increase K if you require more rows."),
|
||||
ValTensor::Value { inner: v, dims , ..} => {
|
||||
|
||||
ValTensor::Instance { .. } => {
|
||||
error!(
|
||||
"duplication is not supported on instance columns. increase K if you require more rows."
|
||||
);
|
||||
Err(halo2_proofs::plonk::Error::Synthesis)
|
||||
}
|
||||
ValTensor::Value { inner: v, dims, .. } => {
|
||||
let duplication_freq = self.col_size();
|
||||
let num_repeats = 1;
|
||||
let duplication_offset = row;
|
||||
|
||||
// duplicates every nth element to adjust for column overflow
|
||||
let v = v.duplicate_every_n(duplication_freq, num_repeats, duplication_offset).unwrap();
|
||||
let mut res: ValTensor<F> =
|
||||
v.enum_map(|coord, k| {
|
||||
let v = v
|
||||
.duplicate_every_n(duplication_freq, num_repeats, duplication_offset)
|
||||
.unwrap();
|
||||
let mut res: ValTensor<F> = v
|
||||
.enum_map(|coord, k| {
|
||||
let step = self.num_inner_cols();
|
||||
|
||||
let step = self.num_inner_cols();
|
||||
let (x, y, z) = self.cartesian_coord(offset + coord * step);
|
||||
if matches!(check_mode, CheckMode::SAFE) && coord > 0 && z == 0 && y == 0 {
|
||||
// assert that duplication occurred correctly
|
||||
assert_eq!(
|
||||
Into::<IntegerRep>::into(k.clone()),
|
||||
Into::<IntegerRep>::into(v[coord - 1].clone())
|
||||
);
|
||||
};
|
||||
|
||||
let (x, y, z) = self.cartesian_coord(offset + coord * step);
|
||||
if matches!(check_mode, CheckMode::SAFE) && coord > 0 && z == 0 && y == 0 {
|
||||
// assert that duplication occurred correctly
|
||||
assert_eq!(Into::<IntegerRep>::into(k.clone()), Into::<IntegerRep>::into(v[coord - 1].clone()));
|
||||
};
|
||||
let cell =
|
||||
self.assign_value(region, offset, k.clone(), coord * step, constants)?;
|
||||
|
||||
let cell = self.assign_value(region, offset, k.clone(), coord * step, constants)?;
|
||||
let at_end_of_column = z == duplication_freq - 1;
|
||||
let at_beginning_of_column = z == 0;
|
||||
|
||||
let at_end_of_column = z == duplication_freq - 1;
|
||||
let at_beginning_of_column = z == 0;
|
||||
|
||||
if at_end_of_column {
|
||||
// if we are at the end of the column, we need to copy the cell to the next column
|
||||
prev_cell = Some(cell.clone());
|
||||
} else if coord > 0 && at_beginning_of_column {
|
||||
if let Some(prev_cell) = prev_cell.as_ref() {
|
||||
let cell = if let Some(cell) = cell.cell() {
|
||||
cell
|
||||
if at_end_of_column {
|
||||
// if we are at the end of the column, we need to copy the cell to the next column
|
||||
prev_cell = Some(cell.clone());
|
||||
} else if coord > 0 && at_beginning_of_column {
|
||||
if let Some(prev_cell) = prev_cell.as_ref() {
|
||||
let cell = if let Some(cell) = cell.cell() {
|
||||
cell
|
||||
} else {
|
||||
error!("Error getting cell: {:?}", (x, y));
|
||||
return Err(halo2_proofs::plonk::Error::Synthesis);
|
||||
};
|
||||
let prev_cell = if let Some(prev_cell) = prev_cell.cell() {
|
||||
prev_cell
|
||||
} else {
|
||||
error!("Error getting prev cell: {:?}", (x, y));
|
||||
return Err(halo2_proofs::plonk::Error::Synthesis);
|
||||
};
|
||||
region.constrain_equal(prev_cell, cell)?;
|
||||
} else {
|
||||
error!("Error getting cell: {:?}", (x,y));
|
||||
error!("Previous cell was not set");
|
||||
return Err(halo2_proofs::plonk::Error::Synthesis);
|
||||
};
|
||||
let prev_cell = if let Some(prev_cell) = prev_cell.cell() {
|
||||
prev_cell
|
||||
} else {
|
||||
error!("Error getting prev cell: {:?}", (x,y));
|
||||
return Err(halo2_proofs::plonk::Error::Synthesis);
|
||||
};
|
||||
region.constrain_equal(prev_cell,cell)?;
|
||||
} else {
|
||||
error!("Previous cell was not set");
|
||||
return Err(halo2_proofs::plonk::Error::Synthesis);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(cell)
|
||||
|
||||
})?.into();
|
||||
Ok(cell)
|
||||
})?
|
||||
.into();
|
||||
|
||||
let total_used_len = res.len();
|
||||
res.remove_every_n(duplication_freq, num_repeats, duplication_offset).unwrap();
|
||||
res.remove_every_n(duplication_freq, num_repeats, duplication_offset)
|
||||
.unwrap();
|
||||
|
||||
res.reshape(dims).unwrap();
|
||||
res.set_scale(values.scale());
|
||||
@@ -613,6 +780,17 @@ impl VarTensor {
|
||||
}
|
||||
}
|
||||
|
||||
/// Assigns a single value to the tensor. This is a helper function used by other assignment methods.
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `region` - The region to assign values in
|
||||
/// * `offset` - Base offset for the assignment
|
||||
/// * `k` - The value to assign
|
||||
/// * `coord` - The coordinate where to assign the value
|
||||
/// * `constants` - Map for tracking constant assignments
|
||||
///
|
||||
/// # Returns
|
||||
/// The assigned value or an error if assignment fails
|
||||
fn assign_value<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
|
||||
&self,
|
||||
region: &mut Region<F>,
|
||||
@@ -623,32 +801,49 @@ impl VarTensor {
|
||||
) -> Result<ValType<F>, halo2_proofs::plonk::Error> {
|
||||
let (x, y, z) = self.cartesian_coord(offset + coord);
|
||||
let res = match k {
|
||||
// Handle direct value assignment
|
||||
ValType::Value(v) => match &self {
|
||||
VarTensor::Advice { inner: advices, .. } => {
|
||||
ValType::PrevAssigned(region.assign_advice(|| "k", advices[x][y], z, || v)?)
|
||||
}
|
||||
_ => unimplemented!(),
|
||||
_ => {
|
||||
error!("VarTensor was not initialized");
|
||||
return Err(halo2_proofs::plonk::Error::Synthesis);
|
||||
}
|
||||
},
|
||||
// Handle copying previously assigned value
|
||||
ValType::PrevAssigned(v) => match &self {
|
||||
VarTensor::Advice { inner: advices, .. } => {
|
||||
ValType::PrevAssigned(v.copy_advice(|| "k", region, advices[x][y], z)?)
|
||||
}
|
||||
_ => unimplemented!(),
|
||||
_ => {
|
||||
error!("VarTensor was not initialized");
|
||||
return Err(halo2_proofs::plonk::Error::Synthesis);
|
||||
}
|
||||
},
|
||||
// Handle copying previously assigned constant
|
||||
ValType::AssignedConstant(v, val) => match &self {
|
||||
VarTensor::Advice { inner: advices, .. } => {
|
||||
ValType::AssignedConstant(v.copy_advice(|| "k", region, advices[x][y], z)?, val)
|
||||
}
|
||||
_ => unimplemented!(),
|
||||
_ => {
|
||||
error!("VarTensor was not initialized");
|
||||
return Err(halo2_proofs::plonk::Error::Synthesis);
|
||||
}
|
||||
},
|
||||
// Handle assigning evaluated value
|
||||
ValType::AssignedValue(v) => match &self {
|
||||
VarTensor::Advice { inner: advices, .. } => ValType::PrevAssigned(
|
||||
region
|
||||
.assign_advice(|| "k", advices[x][y], z, || v)?
|
||||
.evaluate(),
|
||||
),
|
||||
_ => unimplemented!(),
|
||||
_ => {
|
||||
error!("VarTensor was not initialized");
|
||||
return Err(halo2_proofs::plonk::Error::Synthesis);
|
||||
}
|
||||
},
|
||||
// Handle constant value assignment with caching
|
||||
ValType::Constant(v) => {
|
||||
if let std::collections::hash_map::Entry::Vacant(e) = constants.entry(v) {
|
||||
let value = ValType::AssignedConstant(
|
||||
|
||||
Binary file not shown.
Binary file not shown.
File diff suppressed because one or more lines are too long
@@ -28,11 +28,12 @@
|
||||
"commitment": "KZG",
|
||||
"decomp_base": 128,
|
||||
"decomp_legs": 2,
|
||||
"bounded_log_lookup": false
|
||||
"bounded_log_lookup": false,
|
||||
"ignore_range_check_inputs_outputs": false
|
||||
},
|
||||
"num_rows": 46,
|
||||
"total_assignments": 92,
|
||||
"total_const_size": 3,
|
||||
"num_rows": 236,
|
||||
"total_assignments": 472,
|
||||
"total_const_size": 4,
|
||||
"total_dynamic_col_size": 0,
|
||||
"max_dynamic_input_len": 0,
|
||||
"num_dynamic_lookups": 0,
|
||||
|
||||
Binary file not shown.
14
tests/foundry/.gitignore
vendored
Normal file
14
tests/foundry/.gitignore
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
# Compiler files
|
||||
cache/
|
||||
out/
|
||||
|
||||
# Ignores development broadcast logs
|
||||
!/broadcast
|
||||
/broadcast/*/31337/
|
||||
/broadcast/**/dry-run/
|
||||
|
||||
# Docs
|
||||
docs/
|
||||
|
||||
# Dotenv file
|
||||
.env
|
||||
66
tests/foundry/README.md
Normal file
66
tests/foundry/README.md
Normal file
@@ -0,0 +1,66 @@
|
||||
## Foundry
|
||||
|
||||
**Foundry is a blazing fast, portable and modular toolkit for Ethereum application development written in Rust.**
|
||||
|
||||
Foundry consists of:
|
||||
|
||||
- **Forge**: Ethereum testing framework (like Truffle, Hardhat and DappTools).
|
||||
- **Cast**: Swiss army knife for interacting with EVM smart contracts, sending transactions and getting chain data.
|
||||
- **Anvil**: Local Ethereum node, akin to Ganache, Hardhat Network.
|
||||
- **Chisel**: Fast, utilitarian, and verbose solidity REPL.
|
||||
|
||||
## Documentation
|
||||
|
||||
https://book.getfoundry.sh/
|
||||
|
||||
## Usage
|
||||
|
||||
### Build
|
||||
|
||||
```shell
|
||||
$ forge build
|
||||
```
|
||||
|
||||
### Test
|
||||
|
||||
```shell
|
||||
$ forge test
|
||||
```
|
||||
|
||||
### Format
|
||||
|
||||
```shell
|
||||
$ forge fmt
|
||||
```
|
||||
|
||||
### Gas Snapshots
|
||||
|
||||
```shell
|
||||
$ forge snapshot
|
||||
```
|
||||
|
||||
### Anvil
|
||||
|
||||
```shell
|
||||
$ anvil
|
||||
```
|
||||
|
||||
### Deploy
|
||||
|
||||
```shell
|
||||
$ forge script script/Counter.s.sol:CounterScript --rpc-url <your_rpc_url> --private-key <your_private_key>
|
||||
```
|
||||
|
||||
### Cast
|
||||
|
||||
```shell
|
||||
$ cast <subcommand>
|
||||
```
|
||||
|
||||
### Help
|
||||
|
||||
```shell
|
||||
$ forge --help
|
||||
$ anvil --help
|
||||
$ cast --help
|
||||
```
|
||||
6
tests/foundry/foundry.toml
Normal file
6
tests/foundry/foundry.toml
Normal file
@@ -0,0 +1,6 @@
|
||||
[profile.default]
|
||||
src = "../../contracts"
|
||||
out = "out"
|
||||
libs = ["lib"]
|
||||
|
||||
# See more config options https://github.com/foundry-rs/foundry/blob/master/crates/config/README.md#all-options
|
||||
1
tests/foundry/remappings.txt
Normal file
1
tests/foundry/remappings.txt
Normal file
@@ -0,0 +1 @@
|
||||
contracts/=../../contracts/
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user