mirror of
https://github.com/zkonduit/ezkl.git
synced 2026-01-13 16:27:59 -05:00
Compare commits
19 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
c1ce8c88d0 | ||
|
|
876a9584a1 | ||
|
|
7d7f049cc4 | ||
|
|
96f3fd94b2 | ||
|
|
6263510c56 | ||
|
|
f5b8ae3213 | ||
|
|
b2e4e414f0 | ||
|
|
0b0199e2b7 | ||
|
|
5e169bdd17 | ||
|
|
64cbcb3f7e | ||
|
|
ee17f0ff9a | ||
|
|
ee55e7dc19 | ||
|
|
5df83886c7 | ||
|
|
061ae89c01 | ||
|
|
0fc1c3eecd | ||
|
|
85302453d9 | ||
|
|
523c77c912 | ||
|
|
948e5cd4b9 | ||
|
|
00155e585f |
@@ -2,3 +2,16 @@
|
||||
runner = 'wasm-bindgen-test-runner'
|
||||
rustflags = ["-C", "target-feature=+atomics,+bulk-memory,+mutable-globals","-C",
|
||||
"link-arg=--max-memory=4294967296"]
|
||||
|
||||
|
||||
[target.x86_64-apple-darwin]
|
||||
rustflags = [
|
||||
"-C", "link-arg=-undefined",
|
||||
"-C", "link-arg=dynamic_lookup",
|
||||
]
|
||||
|
||||
[target.aarch64-apple-darwin]
|
||||
rustflags = [
|
||||
"-C", "link-arg=-undefined",
|
||||
"-C", "link-arg=dynamic_lookup",
|
||||
]
|
||||
4
.github/workflows/pypi-gpu.yml
vendored
4
.github/workflows/pypi-gpu.yml
vendored
@@ -98,14 +98,14 @@ jobs:
|
||||
# publishes to PyPI
|
||||
- name: Publish package distributions to PyPI
|
||||
continue-on-error: true
|
||||
uses: pypa/gh-action-pypi-publish@release/v1
|
||||
uses: pypa/gh-action-pypi-publish@unstable/v1
|
||||
with:
|
||||
packages-dir: ./
|
||||
|
||||
# publishes to TestPyPI
|
||||
- name: Publish package distribution to TestPyPI
|
||||
continue-on-error: true
|
||||
uses: pypa/gh-action-pypi-publish@release/v1
|
||||
uses: pypa/gh-action-pypi-publish@unstable/v1
|
||||
with:
|
||||
repository-url: https://test.pypi.org/legacy/
|
||||
packages-dir: ./
|
||||
|
||||
10
.github/workflows/pypi.yml
vendored
10
.github/workflows/pypi.yml
vendored
@@ -168,7 +168,7 @@ jobs:
|
||||
name: wheels
|
||||
path: dist
|
||||
|
||||
# TODO: There's a problem with the maturin-action toolchain for arm arch leading to failed builds
|
||||
# There's a problem with the maturin-action toolchain for arm arch leading to failed builds
|
||||
# linux-cross:
|
||||
# runs-on: ubuntu-latest
|
||||
# strategy:
|
||||
@@ -283,8 +283,6 @@ jobs:
|
||||
platform:
|
||||
- target: aarch64-unknown-linux-musl
|
||||
arch: aarch64
|
||||
- target: armv7-unknown-linux-musleabihf
|
||||
arch: armv7
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-python@v4
|
||||
@@ -308,7 +306,7 @@ jobs:
|
||||
manylinux: musllinux_1_2
|
||||
args: --release --out dist --features python-bindings
|
||||
|
||||
- uses: uraimo/run-on-arch-action@v2.5.0
|
||||
- uses: uraimo/run-on-arch-action@v2.8.1
|
||||
name: Install built wheel
|
||||
with:
|
||||
arch: ${{ matrix.platform.arch }}
|
||||
@@ -350,14 +348,14 @@ jobs:
|
||||
# publishes to PyPI
|
||||
- name: Publish package distributions to PyPI
|
||||
continue-on-error: true
|
||||
uses: pypa/gh-action-pypi-publish@release/v1
|
||||
uses: pypa/gh-action-pypi-publish@unstable/v1
|
||||
with:
|
||||
packages-dir: ./
|
||||
|
||||
# publishes to TestPyPI
|
||||
- name: Publish package distribution to TestPyPI
|
||||
continue-on-error: true
|
||||
uses: pypa/gh-action-pypi-publish@release/v1
|
||||
uses: pypa/gh-action-pypi-publish@unstable/v1
|
||||
with:
|
||||
repository-url: https://test.pypi.org/legacy/
|
||||
packages-dir: ./
|
||||
|
||||
59
.github/workflows/rust.yml
vendored
59
.github/workflows/rust.yml
vendored
@@ -207,23 +207,6 @@ jobs:
|
||||
# AR=/opt/homebrew/opt/llvm/bin/llvm-ar CC=/opt/homebrew/opt/llvm/bin/clang wasm-pack test --firefox --headless -- -Z build-std="panic_abort,std" --features web
|
||||
run: wasm-pack test --chrome --headless -- -Z build-std="panic_abort,std" --features web
|
||||
|
||||
tutorial:
|
||||
runs-on: ubuntu-latest
|
||||
needs: [build, library-tests, docs, python-tests, python-integration-tests]
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: nightly-2024-07-18
|
||||
override: true
|
||||
components: rustfmt, clippy
|
||||
- uses: baptiste0928/cargo-install@v1
|
||||
with:
|
||||
crate: cargo-nextest
|
||||
locked: true
|
||||
- name: Circuit Render
|
||||
run: cargo nextest run --release --verbose tests::tutorial_
|
||||
|
||||
mock-proving-tests:
|
||||
runs-on: non-gpu
|
||||
needs: [build, library-tests, docs, python-tests, python-integration-tests]
|
||||
@@ -240,6 +223,8 @@ jobs:
|
||||
locked: true
|
||||
# - name: The Worm Mock
|
||||
# run: cargo nextest run --release --verbose tests::large_mock_::large_tests_5_expects -- --include-ignored
|
||||
- name: public outputs and bounded lookup log
|
||||
run: cargo nextest run --release --verbose tests::mock_bounded_lookup_log --test-threads 32
|
||||
- name: public outputs and tolerance > 0
|
||||
run: cargo nextest run --release --verbose tests::mock_tolerance_public_outputs_ --test-threads 32
|
||||
- name: public outputs + batch size == 10
|
||||
@@ -492,23 +477,23 @@ jobs:
|
||||
- name: Mock aggr tests (KZG)
|
||||
run: cargo nextest run --release --verbose tests_aggr::kzg_aggr_mock_prove_and_verify_ --test-threads 8
|
||||
|
||||
prove-and-verify-aggr-tests-gpu:
|
||||
runs-on: GPU
|
||||
env:
|
||||
ENABLE_ICICLE_GPU: true
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: nightly-2024-07-18
|
||||
override: true
|
||||
components: rustfmt, clippy
|
||||
- uses: baptiste0928/cargo-install@v1
|
||||
with:
|
||||
crate: cargo-nextest
|
||||
locked: true
|
||||
- name: KZG )tests
|
||||
run: cargo nextest run --verbose tests_aggr::kzg_aggr_prove_and_verify_ --features icicle --test-threads 1 -- --include-ignored
|
||||
# prove-and-verify-aggr-tests-gpu:
|
||||
# runs-on: GPU
|
||||
# env:
|
||||
# ENABLE_ICICLE_GPU: true
|
||||
# steps:
|
||||
# - uses: actions/checkout@v4
|
||||
# - uses: actions-rs/toolchain@v1
|
||||
# with:
|
||||
# toolchain: nightly-2024-07-18
|
||||
# override: true
|
||||
# components: rustfmt, clippy
|
||||
# - uses: baptiste0928/cargo-install@v1
|
||||
# with:
|
||||
# crate: cargo-nextest
|
||||
# locked: true
|
||||
# - name: KZG tests
|
||||
# run: cargo nextest run --verbose tests_aggr::kzg_aggr_prove_and_verify_ --features icicle --test-threads 1 -- --include-ignored
|
||||
|
||||
prove-and-verify-aggr-tests:
|
||||
runs-on: large-self-hosted
|
||||
@@ -612,8 +597,6 @@ jobs:
|
||||
run: python -m venv .env --clear; source .env/bin/activate; pip install -r requirements.txt;
|
||||
- name: Build python ezkl
|
||||
run: source .env/bin/activate; unset CONDA_PREFIX; maturin develop --features python-bindings --release
|
||||
- name: Div rebase
|
||||
run: source .env/bin/activate; cargo nextest run --release --verbose tests::accuracy_measurement_div_rebase_
|
||||
- name: Public inputs
|
||||
run: source .env/bin/activate; cargo nextest run --release --verbose tests::accuracy_measurement_public_inputs_
|
||||
- name: fixed params
|
||||
@@ -667,6 +650,10 @@ jobs:
|
||||
run: python -m venv .env --clear; source .env/bin/activate; pip install -r requirements.txt; python -m ensurepip --upgrade
|
||||
- name: Build python ezkl
|
||||
run: source .env/bin/activate; unset CONDA_PREFIX; maturin develop --features python-bindings --release
|
||||
- name: Neural bow
|
||||
run: source .env/bin/activate; cargo nextest run py_tests::tests::neural_bag_of_words_ --no-capture
|
||||
- name: Felt conversion
|
||||
run: source .env/bin/activate; cargo nextest run py_tests::tests::felt_conversion_test_ --no-capture
|
||||
- name: Postgres tutorials
|
||||
run: source .env/bin/activate; cargo nextest run py_tests::tests::postgres_ --no-capture
|
||||
- name: Tictactoe tutorials
|
||||
|
||||
12
.github/workflows/update-ios-package.yml
vendored
12
.github/workflows/update-ios-package.yml
vendored
@@ -36,6 +36,15 @@ jobs:
|
||||
rm -rf ezkl-swift-package/Sources/EzklCoreBindings
|
||||
cp -r build/EzklCoreBindings ezkl-swift-package/Sources/
|
||||
|
||||
- name: Copy Test Files
|
||||
run: |
|
||||
rm -rf ezkl-swift-package/Tests/EzklAssets/*
|
||||
|
||||
cp tests/assets/kzg ezkl-swift-package/Tests/EzklAssets/kzg.srs
|
||||
cp tests/assets/input.json ezkl-swift-package/Tests/EzklAssets/input.json
|
||||
cp tests/assets/model.compiled ezkl-swift-package/Tests/EzklAssets/network.ezkl
|
||||
cp tests/assets/settings.json ezkl-swift-package/Tests/EzklAssets/settings.json
|
||||
|
||||
- name: Set up Xcode environment
|
||||
run: |
|
||||
sudo xcode-select -s /Applications/Xcode.app/Contents/Developer
|
||||
@@ -66,10 +75,11 @@ jobs:
|
||||
git config user.name "GitHub Action"
|
||||
git config user.email "action@github.com"
|
||||
git add Sources/EzklCoreBindings
|
||||
git add Tests/EzklAssets
|
||||
git commit -m "Automatically updated EzklCoreBindings for EZKL"
|
||||
git tag ${{ github.event.inputs.tag }}
|
||||
git remote set-url origin https://zkonduit:${EZKL_PORTER_TOKEN}@github.com/zkonduit/ezkl-swift-package.git
|
||||
git push origin
|
||||
git push origin --tags
|
||||
git push origin tag ${{ github.event.inputs.tag }}
|
||||
env:
|
||||
EZKL_PORTER_TOKEN: ${{ secrets.EZKL_PORTER_TOKEN }}
|
||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -27,7 +27,6 @@ __pycache__/
|
||||
*.pyc
|
||||
*.pyo
|
||||
*.py[cod]
|
||||
bin/
|
||||
build/
|
||||
develop-eggs/
|
||||
dist/
|
||||
@@ -49,4 +48,4 @@ timingData.json
|
||||
!tests/assets/pk.key
|
||||
!tests/assets/vk.key
|
||||
docs/python/build
|
||||
!tests/assets/vk_aggr.key
|
||||
!tests/assets/vk_aggr.key
|
||||
|
||||
717
Cargo.lock
generated
717
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
77
Cargo.toml
77
Cargo.toml
@@ -16,11 +16,13 @@ crate-type = ["cdylib", "rlib", "staticlib"]
|
||||
|
||||
|
||||
[dependencies]
|
||||
halo2_gadgets = { git = "https://github.com/zkonduit/halo2", branch = "ac/optional-selector-poly" }
|
||||
halo2_gadgets = { git = "https://github.com/zkonduit/halo2" }
|
||||
halo2curves = { git = "https://github.com/privacy-scaling-explorations/halo2curves", rev = "b753a832e92d5c86c5c997327a9cf9de86a18851", features = [
|
||||
"derive_serde",
|
||||
] }
|
||||
halo2_proofs = { git = "https://github.com/zkonduit/halo2", package = "halo2_proofs", branch = "ac/cache-lookup-commitments", features = ["circuit-params"] }
|
||||
halo2_proofs = { git = "https://github.com/zkonduit/halo2", package = "halo2_proofs", features = [
|
||||
"circuit-params",
|
||||
] }
|
||||
rand = { version = "0.8", default-features = false }
|
||||
itertools = { version = "0.10.3", default-features = false }
|
||||
clap = { version = "4.5.3", features = ["derive"], optional = true }
|
||||
@@ -33,7 +35,7 @@ halo2_wrong_ecc = { git = "https://github.com/zkonduit/halo2wrong", branch = "ac
|
||||
snark-verifier = { git = "https://github.com/zkonduit/snark-verifier", branch = "ac/chunked-mv-lookup", features = [
|
||||
"derive_serde",
|
||||
] }
|
||||
halo2_solidity_verifier = { git = "https://github.com/alexander-camuto/halo2-solidity-verifier", branch = "ac/update-h2-curves", optional = true }
|
||||
halo2_solidity_verifier = { git = "https://github.com/alexander-camuto/halo2-solidity-verifier", optional = true }
|
||||
maybe-rayon = { version = "0.1.1", default-features = false }
|
||||
bincode = { version = "1.3.3", default-features = false }
|
||||
unzip-n = "0.1.2"
|
||||
@@ -43,10 +45,7 @@ tosubcommand = { git = "https://github.com/zkonduit/enum_to_subcommand", package
|
||||
semver = { version = "1.0.22", optional = true }
|
||||
|
||||
[target.'cfg(not(target_arch = "wasm32"))'.dependencies]
|
||||
serde_json = { version = "1.0.97", features = [
|
||||
"float_roundtrip",
|
||||
"raw_value",
|
||||
] }
|
||||
serde_json = { version = "1.0.97", features = ["float_roundtrip", "raw_value"] }
|
||||
|
||||
# evm related deps
|
||||
alloy = { git = "https://github.com/alloy-rs/alloy", version = "0.1.0", rev = "5fbf57bac99edef9d8475190109a7ea9fb7e5e83", features = [
|
||||
@@ -56,29 +55,46 @@ alloy = { git = "https://github.com/alloy-rs/alloy", version = "0.1.0", rev = "5
|
||||
"rpc-types-eth",
|
||||
"signer-wallet",
|
||||
"node-bindings",
|
||||
|
||||
|
||||
], optional = true }
|
||||
foundry-compilers = { version = "0.4.1", features = [
|
||||
"svm-solc",
|
||||
], optional = true }
|
||||
foundry-compilers = { version = "0.4.1", features = ["svm-solc"], optional = true }
|
||||
ethabi = { version = "18", optional = true }
|
||||
indicatif = { version = "0.17.5", features = ["rayon"], optional = true }
|
||||
gag = { version = "1.0.0", default-features = false, optional = true }
|
||||
instant = { version = "0.1" }
|
||||
reqwest = { version = "0.12.4", default-features = false, features = ["default-tls", "multipart", "stream"], optional = true }
|
||||
reqwest = { version = "0.12.4", default-features = false, features = [
|
||||
"default-tls",
|
||||
"multipart",
|
||||
"stream",
|
||||
], optional = true }
|
||||
openssl = { version = "0.10.55", features = ["vendored"], optional = true }
|
||||
tokio-postgres = { version = "0.7.10", optional = true }
|
||||
pg_bigdecimal = { version = "0.1.5", optional = true }
|
||||
lazy_static = { version = "1.4.0", optional = true }
|
||||
colored_json = { version = "3.0.1", default-features = false, optional = true }
|
||||
regex = { version = "1", default-features = false, optional = true }
|
||||
tokio = { version = "1.35.0", default-features = false, features = ["macros", "rt-multi-thread"], optional = true }
|
||||
pyo3 = { version = "0.21.2", features = ["extension-module", "abi3-py37", "macros"], default-features = false, optional = true }
|
||||
pyo3-asyncio = { git = "https://github.com/jopemachine/pyo3-asyncio/", branch="migration-pyo3-0.21", features = ["attributes", "tokio-runtime"], default-features = false, optional = true }
|
||||
pyo3-log = { version = "0.10.0", default-features = false, optional = true }
|
||||
tract-onnx = { git = "https://github.com/sonos/tract/", rev = "40c64319291184814d9fea5fdf4fa16f5a4f7116", default-features = false, optional = true }
|
||||
tokio = { version = "1.35.0", default-features = false, features = [
|
||||
"macros",
|
||||
"rt-multi-thread",
|
||||
], optional = true }
|
||||
pyo3 = { version = "0.23.2", features = [
|
||||
"extension-module",
|
||||
"abi3-py37",
|
||||
"macros",
|
||||
], default-features = false, optional = true }
|
||||
pyo3-async-runtimes = { git = "https://github.com/PyO3/pyo3-async-runtimes", version = "0.23.0", features = [
|
||||
"attributes",
|
||||
"tokio-runtime",
|
||||
], default-features = false, optional = true }
|
||||
pyo3-log = { version = "0.12.0", default-features = false, optional = true }
|
||||
tract-onnx = { git = "https://github.com/sonos/tract/", rev = "37132e0397d0a73e5bd3a8615d932dabe44f6736", default-features = false, optional = true }
|
||||
tabled = { version = "0.12.0", optional = true }
|
||||
metal = { git = "https://github.com/gfx-rs/metal-rs", optional = true }
|
||||
objc = { version = "0.2.4", optional = true }
|
||||
mimalloc = { version = "0.1", optional = true }
|
||||
pyo3-stub-gen = { version = "0.6.0", optional = true }
|
||||
|
||||
# universal bindings
|
||||
uniffi = { version = "=0.28.0", optional = true }
|
||||
@@ -195,11 +211,21 @@ required-features = ["ezkl"]
|
||||
name = "ios_gen_bindings"
|
||||
required-features = ["ios-bindings", "uuid", "camino", "uniffi_bindgen"]
|
||||
|
||||
[[bin]]
|
||||
name = "py_stub_gen"
|
||||
required-features = ["python-bindings"]
|
||||
|
||||
[features]
|
||||
web = ["wasm-bindgen-rayon"]
|
||||
default = ["ezkl", "mv-lookup", "precompute-coset", "no-banner", "parallel-poly-read"]
|
||||
default = [
|
||||
"ezkl",
|
||||
"mv-lookup",
|
||||
"precompute-coset",
|
||||
"no-banner",
|
||||
"parallel-poly-read",
|
||||
]
|
||||
onnx = ["dep:tract-onnx"]
|
||||
python-bindings = ["pyo3", "pyo3-log", "pyo3-asyncio"]
|
||||
python-bindings = ["pyo3", "pyo3-log", "pyo3-async-runtimes", "pyo3-stub-gen"]
|
||||
ios-bindings = ["mv-lookup", "precompute-coset", "parallel-poly-read", "uniffi"]
|
||||
ios-bindings-test = ["ios-bindings", "uniffi/bindgen-tests"]
|
||||
ezkl = [
|
||||
@@ -231,7 +257,10 @@ ezkl = [
|
||||
"dep:clap",
|
||||
"dep:tosubcommand",
|
||||
]
|
||||
parallel-poly-read = ["halo2_proofs/circuit-params", "halo2_proofs/parallel-poly-read"]
|
||||
parallel-poly-read = [
|
||||
"halo2_proofs/circuit-params",
|
||||
"halo2_proofs/parallel-poly-read",
|
||||
]
|
||||
mv-lookup = [
|
||||
"halo2_proofs/mv-lookup",
|
||||
"snark-verifier/mv-lookup",
|
||||
@@ -245,12 +274,9 @@ empty-cmd = []
|
||||
no-banner = []
|
||||
no-update = []
|
||||
|
||||
# icicle patch to 0.1.0 if feature icicle is enabled
|
||||
[patch.'https://github.com/ingonyama-zk/icicle']
|
||||
icicle = { git = "https://github.com/ingonyama-zk/icicle?rev=45b00fb", package = "icicle", branch = "fix/vhnat/ezkl-build-fix" }
|
||||
|
||||
[patch.'https://github.com/zkonduit/halo2']
|
||||
halo2_proofs = { git = "https://github.com/zkonduit/halo2?branch=ac/cache-lookup-commitments#8b13a0d2a7a34d8daab010dadb2c47dfa47d37d0", package = "halo2_proofs", branch = "ac/cache-lookup-commitments" }
|
||||
halo2_proofs = { git = "https://github.com/zkonduit/halo2#0654e92bdf725fd44d849bfef3643870a8c7d50b", package = "halo2_proofs" }
|
||||
|
||||
[patch.crates-io]
|
||||
uniffi_testing = { git = "https://github.com/ElusAegis/uniffi-rs", branch = "feat/testing-feature-build-fix" }
|
||||
@@ -261,3 +287,10 @@ lto = "fat"
|
||||
codegen-units = 1
|
||||
# panic = "abort"
|
||||
|
||||
|
||||
[package.metadata.wasm-pack.profile.release]
|
||||
wasm-opt = [
|
||||
"-O4",
|
||||
"--flexible-inline-max-function-size",
|
||||
"4294967295",
|
||||
]
|
||||
147
abis/DataAttestationSingle.json
Normal file
147
abis/DataAttestationSingle.json
Normal file
@@ -0,0 +1,147 @@
|
||||
[
|
||||
{
|
||||
"inputs": [
|
||||
{
|
||||
"internalType": "address",
|
||||
"name": "_contractAddresses",
|
||||
"type": "address"
|
||||
},
|
||||
{
|
||||
"internalType": "bytes",
|
||||
"name": "_callData",
|
||||
"type": "bytes"
|
||||
},
|
||||
{
|
||||
"internalType": "uint256",
|
||||
"name": "_decimals",
|
||||
"type": "uint256"
|
||||
},
|
||||
{
|
||||
"internalType": "uint256[]",
|
||||
"name": "_scales",
|
||||
"type": "uint256[]"
|
||||
},
|
||||
{
|
||||
"internalType": "uint8",
|
||||
"name": "_instanceOffset",
|
||||
"type": "uint8"
|
||||
},
|
||||
{
|
||||
"internalType": "address",
|
||||
"name": "_admin",
|
||||
"type": "address"
|
||||
}
|
||||
],
|
||||
"stateMutability": "nonpayable",
|
||||
"type": "constructor"
|
||||
},
|
||||
{
|
||||
"inputs": [],
|
||||
"name": "accountCall",
|
||||
"outputs": [
|
||||
{
|
||||
"internalType": "address",
|
||||
"name": "contractAddress",
|
||||
"type": "address"
|
||||
},
|
||||
{
|
||||
"internalType": "bytes",
|
||||
"name": "callData",
|
||||
"type": "bytes"
|
||||
},
|
||||
{
|
||||
"internalType": "uint256",
|
||||
"name": "decimals",
|
||||
"type": "uint256"
|
||||
}
|
||||
],
|
||||
"stateMutability": "view",
|
||||
"type": "function"
|
||||
},
|
||||
{
|
||||
"inputs": [],
|
||||
"name": "admin",
|
||||
"outputs": [
|
||||
{
|
||||
"internalType": "address",
|
||||
"name": "",
|
||||
"type": "address"
|
||||
}
|
||||
],
|
||||
"stateMutability": "view",
|
||||
"type": "function"
|
||||
},
|
||||
{
|
||||
"inputs": [],
|
||||
"name": "instanceOffset",
|
||||
"outputs": [
|
||||
{
|
||||
"internalType": "uint8",
|
||||
"name": "",
|
||||
"type": "uint8"
|
||||
}
|
||||
],
|
||||
"stateMutability": "view",
|
||||
"type": "function"
|
||||
},
|
||||
{
|
||||
"inputs": [
|
||||
{
|
||||
"internalType": "address",
|
||||
"name": "_contractAddresses",
|
||||
"type": "address"
|
||||
},
|
||||
{
|
||||
"internalType": "bytes",
|
||||
"name": "_callData",
|
||||
"type": "bytes"
|
||||
},
|
||||
{
|
||||
"internalType": "uint256",
|
||||
"name": "_decimals",
|
||||
"type": "uint256"
|
||||
}
|
||||
],
|
||||
"name": "updateAccountCalls",
|
||||
"outputs": [],
|
||||
"stateMutability": "nonpayable",
|
||||
"type": "function"
|
||||
},
|
||||
{
|
||||
"inputs": [
|
||||
{
|
||||
"internalType": "address",
|
||||
"name": "_admin",
|
||||
"type": "address"
|
||||
}
|
||||
],
|
||||
"name": "updateAdmin",
|
||||
"outputs": [],
|
||||
"stateMutability": "nonpayable",
|
||||
"type": "function"
|
||||
},
|
||||
{
|
||||
"inputs": [
|
||||
{
|
||||
"internalType": "address",
|
||||
"name": "verifier",
|
||||
"type": "address"
|
||||
},
|
||||
{
|
||||
"internalType": "bytes",
|
||||
"name": "encoded",
|
||||
"type": "bytes"
|
||||
}
|
||||
],
|
||||
"name": "verifyWithDataAttestation",
|
||||
"outputs": [
|
||||
{
|
||||
"internalType": "bool",
|
||||
"name": "",
|
||||
"type": "bool"
|
||||
}
|
||||
],
|
||||
"stateMutability": "view",
|
||||
"type": "function"
|
||||
}
|
||||
]
|
||||
@@ -1,4 +1,23 @@
|
||||
[
|
||||
{
|
||||
"inputs": [
|
||||
{
|
||||
"internalType": "int256[]",
|
||||
"name": "quantized_data",
|
||||
"type": "int256[]"
|
||||
}
|
||||
],
|
||||
"name": "check_is_valid_field_element",
|
||||
"outputs": [
|
||||
{
|
||||
"internalType": "uint256[]",
|
||||
"name": "output",
|
||||
"type": "uint256[]"
|
||||
}
|
||||
],
|
||||
"stateMutability": "pure",
|
||||
"type": "function"
|
||||
},
|
||||
{
|
||||
"inputs": [
|
||||
{
|
||||
@@ -17,12 +36,41 @@
|
||||
"type": "uint256[]"
|
||||
}
|
||||
],
|
||||
"name": "quantize_data",
|
||||
"name": "quantize_data_multi",
|
||||
"outputs": [
|
||||
{
|
||||
"internalType": "int64[]",
|
||||
"internalType": "int256[]",
|
||||
"name": "quantized_data",
|
||||
"type": "int64[]"
|
||||
"type": "int256[]"
|
||||
}
|
||||
],
|
||||
"stateMutability": "pure",
|
||||
"type": "function"
|
||||
},
|
||||
{
|
||||
"inputs": [
|
||||
{
|
||||
"internalType": "bytes",
|
||||
"name": "data",
|
||||
"type": "bytes"
|
||||
},
|
||||
{
|
||||
"internalType": "uint256",
|
||||
"name": "decimals",
|
||||
"type": "uint256"
|
||||
},
|
||||
{
|
||||
"internalType": "uint256[]",
|
||||
"name": "scales",
|
||||
"type": "uint256[]"
|
||||
}
|
||||
],
|
||||
"name": "quantize_data_single",
|
||||
"outputs": [
|
||||
{
|
||||
"internalType": "int256[]",
|
||||
"name": "quantized_data",
|
||||
"type": "int256[]"
|
||||
}
|
||||
],
|
||||
"stateMutability": "pure",
|
||||
|
||||
@@ -163,6 +163,253 @@ contract SwapProofCommitments {
|
||||
} /// end checkKzgCommits
|
||||
}
|
||||
|
||||
contract DataAttestationSingle is LoadInstances, SwapProofCommitments {
|
||||
/**
|
||||
* @notice Struct used to make view only call to account to fetch the data that EZKL reads from.
|
||||
* @param the address of the account to make calls to
|
||||
* @param the abi encoded function calls to make to the `contractAddress`
|
||||
*/
|
||||
struct AccountCall {
|
||||
address contractAddress;
|
||||
bytes callData;
|
||||
uint256 decimals;
|
||||
}
|
||||
AccountCall public accountCall;
|
||||
|
||||
uint[] scales;
|
||||
|
||||
address public admin;
|
||||
|
||||
/**
|
||||
* @notice EZKL P value
|
||||
* @dev In order to prevent the verifier from accepting two version of the same pubInput, n and the quantity (n + P), where n + P <= 2^256, we require that all instances are stricly less than P. a
|
||||
* @dev The reason for this is that the assmebly code of the verifier performs all arithmetic operations modulo P and as a consequence can't distinguish between n and n + P.
|
||||
*/
|
||||
uint256 constant ORDER =
|
||||
uint256(
|
||||
0x30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f0000001
|
||||
);
|
||||
|
||||
uint256 constant INPUT_LEN = 0;
|
||||
|
||||
uint256 constant OUTPUT_LEN = 0;
|
||||
|
||||
uint8 public instanceOffset;
|
||||
|
||||
/**
|
||||
* @dev Initialize the contract with account calls the EZKL model will read from.
|
||||
* @param _contractAddresses - The calls to all the contracts EZKL reads storage from.
|
||||
* @param _callData - The abi encoded function calls to make to the `contractAddress` that EZKL reads storage from.
|
||||
*/
|
||||
constructor(
|
||||
address _contractAddresses,
|
||||
bytes memory _callData,
|
||||
uint256 _decimals,
|
||||
uint[] memory _scales,
|
||||
uint8 _instanceOffset,
|
||||
address _admin
|
||||
) {
|
||||
admin = _admin;
|
||||
for (uint i; i < _scales.length; i++) {
|
||||
scales.push(1 << _scales[i]);
|
||||
}
|
||||
populateAccountCalls(_contractAddresses, _callData, _decimals);
|
||||
instanceOffset = _instanceOffset;
|
||||
}
|
||||
|
||||
function updateAdmin(address _admin) external {
|
||||
require(msg.sender == admin, "Only admin can update admin");
|
||||
if (_admin == address(0)) {
|
||||
revert();
|
||||
}
|
||||
admin = _admin;
|
||||
}
|
||||
|
||||
function updateAccountCalls(
|
||||
address _contractAddresses,
|
||||
bytes memory _callData,
|
||||
uint256 _decimals
|
||||
) external {
|
||||
require(msg.sender == admin, "Only admin can update account calls");
|
||||
populateAccountCalls(_contractAddresses, _callData, _decimals);
|
||||
}
|
||||
|
||||
function populateAccountCalls(
|
||||
address _contractAddresses,
|
||||
bytes memory _callData,
|
||||
uint256 _decimals
|
||||
) internal {
|
||||
AccountCall memory _accountCall = accountCall;
|
||||
_accountCall.contractAddress = _contractAddresses;
|
||||
_accountCall.callData = _callData;
|
||||
_accountCall.decimals = 10 ** _decimals;
|
||||
accountCall = _accountCall;
|
||||
}
|
||||
|
||||
function mulDiv(
|
||||
uint256 x,
|
||||
uint256 y,
|
||||
uint256 denominator
|
||||
) internal pure returns (uint256 result) {
|
||||
unchecked {
|
||||
uint256 prod0;
|
||||
uint256 prod1;
|
||||
assembly {
|
||||
let mm := mulmod(x, y, not(0))
|
||||
prod0 := mul(x, y)
|
||||
prod1 := sub(sub(mm, prod0), lt(mm, prod0))
|
||||
}
|
||||
|
||||
if (prod1 == 0) {
|
||||
return prod0 / denominator;
|
||||
}
|
||||
|
||||
require(denominator > prod1, "Math: mulDiv overflow");
|
||||
|
||||
uint256 remainder;
|
||||
assembly {
|
||||
remainder := mulmod(x, y, denominator)
|
||||
prod1 := sub(prod1, gt(remainder, prod0))
|
||||
prod0 := sub(prod0, remainder)
|
||||
}
|
||||
|
||||
uint256 twos = denominator & (~denominator + 1);
|
||||
assembly {
|
||||
denominator := div(denominator, twos)
|
||||
prod0 := div(prod0, twos)
|
||||
twos := add(div(sub(0, twos), twos), 1)
|
||||
}
|
||||
|
||||
prod0 |= prod1 * twos;
|
||||
|
||||
uint256 inverse = (3 * denominator) ^ 2;
|
||||
|
||||
inverse *= 2 - denominator * inverse;
|
||||
inverse *= 2 - denominator * inverse;
|
||||
inverse *= 2 - denominator * inverse;
|
||||
inverse *= 2 - denominator * inverse;
|
||||
inverse *= 2 - denominator * inverse;
|
||||
inverse *= 2 - denominator * inverse;
|
||||
|
||||
result = prod0 * inverse;
|
||||
return result;
|
||||
}
|
||||
}
|
||||
/**
|
||||
* @dev Quantize the data returned from the account calls to the scale used by the EZKL model.
|
||||
* @param x - One of the elements of the data returned from the account calls
|
||||
* @param _decimals - Number of base 10 decimals to scale the data by.
|
||||
* @param _scale - The base 2 scale used to convert the floating point value into a fixed point value.
|
||||
*
|
||||
*/
|
||||
function quantizeData(
|
||||
int x,
|
||||
uint256 _decimals,
|
||||
uint256 _scale
|
||||
) internal pure returns (int256 quantized_data) {
|
||||
bool neg = x < 0;
|
||||
if (neg) x = -x;
|
||||
uint output = mulDiv(uint256(x), _scale, _decimals);
|
||||
if (mulmod(uint256(x), _scale, _decimals) * 2 >= _decimals) {
|
||||
output += 1;
|
||||
}
|
||||
quantized_data = neg ? -int256(output) : int256(output);
|
||||
}
|
||||
/**
|
||||
* @dev Make a static call to the account to fetch the data that EZKL reads from.
|
||||
* @param target - The address of the account to make calls to.
|
||||
* @param data - The abi encoded function calls to make to the `contractAddress` that EZKL reads storage from.
|
||||
* @return The data returned from the account calls. (Must come from either a view or pure function. Will throw an error otherwise)
|
||||
*/
|
||||
function staticCall(
|
||||
address target,
|
||||
bytes memory data
|
||||
) internal view returns (bytes memory) {
|
||||
(bool success, bytes memory returndata) = target.staticcall(data);
|
||||
if (success) {
|
||||
if (returndata.length == 0) {
|
||||
require(
|
||||
target.code.length > 0,
|
||||
"Address: call to non-contract"
|
||||
);
|
||||
}
|
||||
return returndata;
|
||||
} else {
|
||||
revert("Address: low-level call failed");
|
||||
}
|
||||
}
|
||||
/**
|
||||
* @dev Convert the fixed point quantized data into a field element.
|
||||
* @param x - The quantized data.
|
||||
* @return field_element - The field element.
|
||||
*/
|
||||
function toFieldElement(
|
||||
int256 x
|
||||
) internal pure returns (uint256 field_element) {
|
||||
// The casting down to uint256 is safe because the order is about 2^254, and the value
|
||||
// of x ranges of -2^127 to 2^127, so x + int(ORDER) is always positive.
|
||||
return uint256(x + int(ORDER)) % ORDER;
|
||||
}
|
||||
|
||||
/**
|
||||
* @dev Make the account calls to fetch the data that EZKL reads from and attest to the data.
|
||||
* @param instances - The public instances to the proof (the data in the proof that publicly accessible to the verifier).
|
||||
*/
|
||||
function attestData(uint256[] memory instances) internal view {
|
||||
require(
|
||||
instances.length >= INPUT_LEN + OUTPUT_LEN,
|
||||
"Invalid public inputs length"
|
||||
);
|
||||
AccountCall memory _accountCall = accountCall;
|
||||
uint[] memory _scales = scales;
|
||||
bytes memory returnData = staticCall(
|
||||
_accountCall.contractAddress,
|
||||
_accountCall.callData
|
||||
);
|
||||
int256[] memory x = abi.decode(returnData, (int256[]));
|
||||
uint _offset;
|
||||
int output = quantizeData(x[0], _accountCall.decimals, _scales[0]);
|
||||
uint field_element = toFieldElement(output);
|
||||
for (uint i = 0; i < x.length; i++) {
|
||||
if (field_element != instances[i + instanceOffset]) {
|
||||
_offset += 1;
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
uint length = x.length - _offset;
|
||||
for (uint i = 1; i < length; i++) {
|
||||
output = quantizeData(x[i], _accountCall.decimals, _scales[i]);
|
||||
field_element = toFieldElement(output);
|
||||
require(
|
||||
field_element == instances[i + instanceOffset + _offset],
|
||||
"Public input does not match"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @dev Verify the proof with the data attestation.
|
||||
* @param verifier - The address of the verifier contract.
|
||||
* @param encoded - The verifier calldata.
|
||||
*/
|
||||
function verifyWithDataAttestation(
|
||||
address verifier,
|
||||
bytes calldata encoded
|
||||
) public view returns (bool) {
|
||||
require(verifier.code.length > 0, "Address: call to non-contract");
|
||||
attestData(getInstancesCalldata(encoded));
|
||||
// static call the verifier contract to verify the proof
|
||||
(bool success, bytes memory returndata) = verifier.staticcall(encoded);
|
||||
|
||||
if (success) {
|
||||
return abi.decode(returndata, (bool));
|
||||
} else {
|
||||
revert("low-level call to verifier failed");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// This contract serves as a Data Attestation Verifier for the EZKL model.
|
||||
// It is designed to read and attest to instances of proofs generated from a specified circuit.
|
||||
// It is particularly constructed to read only int256 data from specified on-chain contracts' view functions.
|
||||
@@ -173,11 +420,11 @@ contract SwapProofCommitments {
|
||||
// 3. Static Calls: Makes static calls to fetch data from other contracts. See the `staticCall` method.
|
||||
// 4. Field Element Conversion: The fixed-point representation is then converted into a field element modulo P using the `toFieldElement` method.
|
||||
// 5. Data Attestation: The `attestData` method validates that the public instances match the data fetched and processed by the contract.
|
||||
// 6. Proof Verification: The `verifyWithDataAttestation` method parses the instances out of the encoded calldata and calls the `attestData` method to validate the public instances,
|
||||
// 6. Proof Verification: The `verifyWithDataAttestationMulti` method parses the instances out of the encoded calldata and calls the `attestData` method to validate the public instances,
|
||||
// 6b. Optional KZG Commitment Verification: It also checks the KZG commitments in the proof against the expected commitments using the `checkKzgCommits` method.
|
||||
// then calls the `verifyProof` method to verify the proof on the verifier.
|
||||
|
||||
contract DataAttestation is LoadInstances, SwapProofCommitments {
|
||||
contract DataAttestationMulti is LoadInstances, SwapProofCommitments {
|
||||
/**
|
||||
* @notice Struct used to make view only calls to accounts to fetch the data that EZKL reads from.
|
||||
* @param the address of the account to make calls to
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
ezkl==0.0.0
|
||||
ezkl
|
||||
sphinx
|
||||
sphinx-rtd-theme
|
||||
sphinxcontrib-napoleon
|
||||
|
||||
@@ -592,7 +592,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.12.2"
|
||||
"version": "3.12.5"
|
||||
},
|
||||
"orig_nbformat": 4
|
||||
},
|
||||
|
||||
@@ -648,10 +648,10 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.15"
|
||||
"version": "3.12.7"
|
||||
},
|
||||
"orig_nbformat": 4
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
}
|
||||
|
||||
130
examples/notebooks/felt_conversion_test.ipynb
Normal file
130
examples/notebooks/felt_conversion_test.ipynb
Normal file
File diff suppressed because one or more lines are too long
@@ -271,7 +271,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.12.2"
|
||||
"version": "3.12.7"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
766
examples/notebooks/neural_bow.ipynb
Normal file
766
examples/notebooks/neural_bow.ipynb
Normal file
@@ -0,0 +1,766 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"\n",
|
||||
"This is a zk version of the tutorial found [here](https://github.com/bentrevett/pytorch-sentiment-analysis/blob/main/1%20-%20Neural%20Bag%20of%20Words.ipynb). The original tutorial is part of the PyTorch Sentiment Analysis series by Ben Trevett.\n",
|
||||
"\n",
|
||||
"1 - NBoW\n",
|
||||
"\n",
|
||||
"In this series we'll be building a machine learning model to perform sentiment analysis -- a subset of text classification where the task is to detect if a given sentence is positive or negative -- using PyTorch and torchtext. The dataset used will be movie reviews from the IMDb dataset, which we'll obtain using the datasets library.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"\n",
|
||||
"Preparing Data\n",
|
||||
"\n",
|
||||
"Before we can implement our NBoW model, we first have to perform quite a few steps to get our data ready to use. NLP usually requires quite a lot of data wrangling beforehand, though libraries such as datasets and torchtext handle most of this for us.\n",
|
||||
"\n",
|
||||
"The steps to take are:\n",
|
||||
"\n",
|
||||
" 1. importing modules\n",
|
||||
" 2. loading data\n",
|
||||
" 3. tokenizing data\n",
|
||||
" 4. creating data splits\n",
|
||||
" 5. creating a vocabulary\n",
|
||||
" 6. numericalizing data\n",
|
||||
" 7. creating the data loaders\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"! pip install torchtex"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import collections\n",
|
||||
"\n",
|
||||
"import datasets\n",
|
||||
"import matplotlib.pyplot as plt\n",
|
||||
"import numpy as np\n",
|
||||
"import torch\n",
|
||||
"import torch.nn as nn\n",
|
||||
"import torch.optim as optim\n",
|
||||
"import torchtext\n",
|
||||
"import tqdm\n",
|
||||
"\n",
|
||||
"# It is usually good practice to run your experiments multiple times with different random seeds -- both to measure the variance of your model and also to avoid having results only calculated with either \"good\" or \"bad\" seeds, i.e. being very lucky or unlucky with the randomness in the training process.\n",
|
||||
"\n",
|
||||
"seed = 1234\n",
|
||||
"\n",
|
||||
"np.random.seed(seed)\n",
|
||||
"torch.manual_seed(seed)\n",
|
||||
"torch.cuda.manual_seed(seed)\n",
|
||||
"torch.backends.cudnn.deterministic = True\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"train_data, test_data = datasets.load_dataset(\"imdb\", split=[\"train\", \"test\"])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We can check the features attribute of a split to get more information about the features. We can see that text is a Value of dtype=string -- in other words, it's a string -- and that label is a ClassLabel. A ClassLabel means the feature is an integer representation of which class the example belongs to. num_classes=2 means that our labels are one of two values, 0 or 1, and names=['neg', 'pos'] gives us the human-readable versions of those values. Thus, a label of 0 means the example is a negative review and a label of 1 means the example is a positive review."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"train_data.features\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"train_data[0]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"One of the first things we need to do to our data is tokenize it. Machine learning models aren't designed to handle strings, they're design to handle numbers. So what we need to do is break down our string into individual tokens, and then convert these tokens to numbers. We'll get to the conversion later, but first we'll look at tokenization.\n",
|
||||
"\n",
|
||||
"Tokenization involves using a tokenizer to process the strings in our dataset. A tokenizer is a function that goes from a string to a list of strings. There are many types of tokenizers available, but we're going to use a relatively simple one provided by torchtext called the basic_english tokenizer. We load our tokenizer as such:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"tokenizer = torchtext.data.utils.get_tokenizer(\"basic_english\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def tokenize_example(example, tokenizer, max_length):\n",
|
||||
" tokens = tokenizer(example[\"text\"])[:max_length]\n",
|
||||
" return {\"tokens\": tokens}\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"max_length = 256\n",
|
||||
"\n",
|
||||
"train_data = train_data.map(\n",
|
||||
" tokenize_example, fn_kwargs={\"tokenizer\": tokenizer, \"max_length\": max_length}\n",
|
||||
")\n",
|
||||
"test_data = test_data.map(\n",
|
||||
" tokenize_example, fn_kwargs={\"tokenizer\": tokenizer, \"max_length\": max_length}\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# create validation data \n",
|
||||
"# Why have both a validation set and a test set? Your test set respresents the real world data that you'd see if you actually deployed this model. You won't be able to see what data your model will be fed once deployed, and your test set is supposed to reflect that. Every time we tune our model hyperparameters or training set-up to make it do a bit better on the test set, we are leak information from the test set into the training process. If we do this too often then we begin to overfit on the test set. Hence, we need some data which can act as a \"proxy\" test set which we can look at more frequently in order to evaluate how well our model actually does on unseen data -- this is the validation set.\n",
|
||||
"\n",
|
||||
"test_size = 0.25\n",
|
||||
"\n",
|
||||
"train_valid_data = train_data.train_test_split(test_size=test_size)\n",
|
||||
"train_data = train_valid_data[\"train\"]\n",
|
||||
"valid_data = train_valid_data[\"test\"]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Next, we have to build a vocabulary. This is look-up table where every unique token in your dataset has a corresponding index (an integer).\n",
|
||||
"\n",
|
||||
"We do this as machine learning models cannot operate on strings, only numerical vaslues. Each index is used to construct a one-hot vector for each token. A one-hot vector is a vector where all the elements are 0, except one, which is 1, and the dimensionality is the total number of unique tokens in your vocabulary, commonly denoted by V."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"min_freq = 5\n",
|
||||
"special_tokens = [\"<unk>\", \"<pad>\"]\n",
|
||||
"\n",
|
||||
"vocab = torchtext.vocab.build_vocab_from_iterator(\n",
|
||||
" train_data[\"tokens\"],\n",
|
||||
" min_freq=min_freq,\n",
|
||||
" specials=special_tokens,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# We store the indices of the unknown and padding tokens (zero and one, respectively) in variables, as we'll use these further on in this notebook.\n",
|
||||
"\n",
|
||||
"unk_index = vocab[\"<unk>\"]\n",
|
||||
"pad_index = vocab[\"<pad>\"]\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"vocab.set_default_index(unk_index)\n",
|
||||
"\n",
|
||||
"# To look-up a list of tokens, we can use the vocabulary's lookup_indices method.\n",
|
||||
"vocab.lookup_indices([\"hello\", \"world\", \"some_token\", \"<pad>\"])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Now we have our vocabulary, we can numericalize our data. This involves converting the tokens within our dataset into indices. Similar to how we tokenized our data using the Dataset.map method, we'll define a function that takes an example and our vocabulary, gets the index for each token in each example and then creates an ids field which containes the numericalized tokens."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def numericalize_example(example, vocab):\n",
|
||||
" ids = vocab.lookup_indices(example[\"tokens\"])\n",
|
||||
" return {\"ids\": ids}\n",
|
||||
"\n",
|
||||
"train_data = train_data.map(numericalize_example, fn_kwargs={\"vocab\": vocab})\n",
|
||||
"valid_data = valid_data.map(numericalize_example, fn_kwargs={\"vocab\": vocab})\n",
|
||||
"test_data = test_data.map(numericalize_example, fn_kwargs={\"vocab\": vocab})\n",
|
||||
"\n",
|
||||
"train_data = train_data.with_format(type=\"torch\", columns=[\"ids\", \"label\"])\n",
|
||||
"valid_data = valid_data.with_format(type=\"torch\", columns=[\"ids\", \"label\"])\n",
|
||||
"test_data = test_data.with_format(type=\"torch\", columns=[\"ids\", \"label\"])\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The final step of preparing the data is creating the data loaders. We can iterate over a data loader to retrieve batches of examples. This is also where we will perform any padding that is necessary.\n",
|
||||
"\n",
|
||||
"We first need to define a function to collate a batch, consisting of a list of examples, into what we want our data loader to output.\n",
|
||||
"\n",
|
||||
"Here, our desired output from the data loader is a dictionary with keys of \"ids\" and \"label\".\n",
|
||||
"\n",
|
||||
"The value of batch[\"ids\"] should be a tensor of shape [batch size, length], where length is the length of the longest sentence (in terms of tokens) within the batch, and all sentences shorter than this should be padded to that length.\n",
|
||||
"\n",
|
||||
"The value of batch[\"label\"] should be a tensor of shape [batch size] consisting of the label for each sentence in the batch.\n",
|
||||
"\n",
|
||||
"We define a function, get_collate_fn, which is passed the pad token index and returns the actual collate function. Within the actual collate function, collate_fn, we get a list of \"ids\" tensors for each example in the batch, and then use the pad_sequence function, which converts the list of tensors into the desired [batch size, length] shaped tensor and performs padding using the specified pad_index. By default, pad_sequence will return a [length, batch size] shaped tensor, but by setting batch_first=True, these two dimensions are switched. We get a list of \"label\" tensors and convert the list of tensors into a single [batch size] shaped tensor."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def get_collate_fn(pad_index):\n",
|
||||
" def collate_fn(batch):\n",
|
||||
" batch_ids = [i[\"ids\"] for i in batch]\n",
|
||||
" batch_ids = nn.utils.rnn.pad_sequence(\n",
|
||||
" batch_ids, padding_value=pad_index, batch_first=True\n",
|
||||
" )\n",
|
||||
" batch_label = [i[\"label\"] for i in batch]\n",
|
||||
" batch_label = torch.stack(batch_label)\n",
|
||||
" batch = {\"ids\": batch_ids, \"label\": batch_label}\n",
|
||||
" return batch\n",
|
||||
"\n",
|
||||
" return collate_fn\n",
|
||||
"\n",
|
||||
"def get_data_loader(dataset, batch_size, pad_index, shuffle=False):\n",
|
||||
" collate_fn = get_collate_fn(pad_index)\n",
|
||||
" data_loader = torch.utils.data.DataLoader(\n",
|
||||
" dataset=dataset,\n",
|
||||
" batch_size=batch_size,\n",
|
||||
" collate_fn=collate_fn,\n",
|
||||
" shuffle=shuffle,\n",
|
||||
" )\n",
|
||||
" return data_loader\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"batch_size = 512\n",
|
||||
"\n",
|
||||
"train_data_loader = get_data_loader(train_data, batch_size, pad_index, shuffle=True)\n",
|
||||
"valid_data_loader = get_data_loader(valid_data, batch_size, pad_index)\n",
|
||||
"test_data_loader = get_data_loader(test_data, batch_size, pad_index)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"\n",
|
||||
"\n",
|
||||
"class NBoW(nn.Module):\n",
|
||||
" def __init__(self, vocab_size, embedding_dim, output_dim, pad_index):\n",
|
||||
" super().__init__()\n",
|
||||
" self.embedding = nn.Embedding(vocab_size, embedding_dim, padding_idx=pad_index)\n",
|
||||
" self.fc = nn.Linear(embedding_dim, output_dim)\n",
|
||||
"\n",
|
||||
" def forward(self, ids):\n",
|
||||
" # ids = [batch size, seq len]\n",
|
||||
" embedded = self.embedding(ids)\n",
|
||||
" # embedded = [batch size, seq len, embedding dim]\n",
|
||||
" pooled = embedded.mean(dim=1)\n",
|
||||
" # pooled = [batch size, embedding dim]\n",
|
||||
" prediction = self.fc(pooled)\n",
|
||||
" # prediction = [batch size, output dim]\n",
|
||||
" return prediction\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"vocab_size = len(vocab)\n",
|
||||
"embedding_dim = 300\n",
|
||||
"output_dim = len(train_data.unique(\"label\"))\n",
|
||||
"\n",
|
||||
"model = NBoW(vocab_size, embedding_dim, output_dim, pad_index)\n",
|
||||
"\n",
|
||||
"def count_parameters(model):\n",
|
||||
" return sum(p.numel() for p in model.parameters() if p.requires_grad)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"print(f\"The model has {count_parameters(model):,} trainable parameters\")\n",
|
||||
"\n",
|
||||
"vectors = torchtext.vocab.GloVe()\n",
|
||||
"\n",
|
||||
"pretrained_embedding = vectors.get_vecs_by_tokens(vocab.get_itos())\n",
|
||||
"\n",
|
||||
"optimizer = optim.Adam(model.parameters())\n",
|
||||
"\n",
|
||||
"criterion = nn.CrossEntropyLoss()\n",
|
||||
"\n",
|
||||
"device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n",
|
||||
"\n",
|
||||
"model = model.to(device)\n",
|
||||
"criterion = criterion.to(device)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def train(data_loader, model, criterion, optimizer, device):\n",
|
||||
" model.train()\n",
|
||||
" epoch_losses = []\n",
|
||||
" epoch_accs = []\n",
|
||||
" for batch in tqdm.tqdm(data_loader, desc=\"training...\"):\n",
|
||||
" ids = batch[\"ids\"].to(device)\n",
|
||||
" label = batch[\"label\"].to(device)\n",
|
||||
" prediction = model(ids)\n",
|
||||
" loss = criterion(prediction, label)\n",
|
||||
" accuracy = get_accuracy(prediction, label)\n",
|
||||
" optimizer.zero_grad()\n",
|
||||
" loss.backward()\n",
|
||||
" optimizer.step()\n",
|
||||
" epoch_losses.append(loss.item())\n",
|
||||
" epoch_accs.append(accuracy.item())\n",
|
||||
" return np.mean(epoch_losses), np.mean(epoch_accs)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def evaluate(data_loader, model, criterion, device):\n",
|
||||
" model.eval()\n",
|
||||
" epoch_losses = []\n",
|
||||
" epoch_accs = []\n",
|
||||
" with torch.no_grad():\n",
|
||||
" for batch in tqdm.tqdm(data_loader, desc=\"evaluating...\"):\n",
|
||||
" ids = batch[\"ids\"].to(device)\n",
|
||||
" label = batch[\"label\"].to(device)\n",
|
||||
" prediction = model(ids)\n",
|
||||
" loss = criterion(prediction, label)\n",
|
||||
" accuracy = get_accuracy(prediction, label)\n",
|
||||
" epoch_losses.append(loss.item())\n",
|
||||
" epoch_accs.append(accuracy.item())\n",
|
||||
" return np.mean(epoch_losses), np.mean(epoch_accs)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def get_accuracy(prediction, label):\n",
|
||||
" batch_size, _ = prediction.shape\n",
|
||||
" predicted_classes = prediction.argmax(dim=-1)\n",
|
||||
" correct_predictions = predicted_classes.eq(label).sum()\n",
|
||||
" accuracy = correct_predictions / batch_size\n",
|
||||
" return accuracy"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"n_epochs = 10\n",
|
||||
"best_valid_loss = float(\"inf\")\n",
|
||||
"\n",
|
||||
"metrics = collections.defaultdict(list)\n",
|
||||
"\n",
|
||||
"for epoch in range(n_epochs):\n",
|
||||
" train_loss, train_acc = train(\n",
|
||||
" train_data_loader, model, criterion, optimizer, device\n",
|
||||
" )\n",
|
||||
" valid_loss, valid_acc = evaluate(valid_data_loader, model, criterion, device)\n",
|
||||
" metrics[\"train_losses\"].append(train_loss)\n",
|
||||
" metrics[\"train_accs\"].append(train_acc)\n",
|
||||
" metrics[\"valid_losses\"].append(valid_loss)\n",
|
||||
" metrics[\"valid_accs\"].append(valid_acc)\n",
|
||||
" if valid_loss < best_valid_loss:\n",
|
||||
" best_valid_loss = valid_loss\n",
|
||||
" torch.save(model.state_dict(), \"nbow.pt\")\n",
|
||||
" print(f\"epoch: {epoch}\")\n",
|
||||
" print(f\"train_loss: {train_loss:.3f}, train_acc: {train_acc:.3f}\")\n",
|
||||
" print(f\"valid_loss: {valid_loss:.3f}, valid_acc: {valid_acc:.3f}\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"fig = plt.figure(figsize=(10, 6))\n",
|
||||
"ax = fig.add_subplot(1, 1, 1)\n",
|
||||
"ax.plot(metrics[\"train_losses\"], label=\"train loss\")\n",
|
||||
"ax.plot(metrics[\"valid_losses\"], label=\"valid loss\")\n",
|
||||
"ax.set_xlabel(\"epoch\")\n",
|
||||
"ax.set_ylabel(\"loss\")\n",
|
||||
"ax.set_xticks(range(n_epochs))\n",
|
||||
"ax.legend()\n",
|
||||
"ax.grid()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"fig = plt.figure(figsize=(10, 6))\n",
|
||||
"ax = fig.add_subplot(1, 1, 1)\n",
|
||||
"ax.plot(metrics[\"train_accs\"], label=\"train accuracy\")\n",
|
||||
"ax.plot(metrics[\"valid_accs\"], label=\"valid accuracy\")\n",
|
||||
"ax.set_xlabel(\"epoch\")\n",
|
||||
"ax.set_ylabel(\"loss\")\n",
|
||||
"ax.set_xticks(range(n_epochs))\n",
|
||||
"ax.legend()\n",
|
||||
"ax.grid()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"model.load_state_dict(torch.load(\"nbow.pt\"))\n",
|
||||
"\n",
|
||||
"test_loss, test_acc = evaluate(test_data_loader, model, criterion, device)\n",
|
||||
"\n",
|
||||
"print(f\"test_loss: {test_loss:.3f}, test_acc: {test_acc:.3f}\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def predict_sentiment(text, model, tokenizer, vocab, device):\n",
|
||||
" tokens = tokenizer(text)\n",
|
||||
" ids = vocab.lookup_indices(tokens)\n",
|
||||
" tensor = torch.LongTensor(ids).unsqueeze(dim=0).to(device)\n",
|
||||
" prediction = model(tensor).squeeze(dim=0)\n",
|
||||
" probability = torch.softmax(prediction, dim=-1)\n",
|
||||
" predicted_class = prediction.argmax(dim=-1).item()\n",
|
||||
" predicted_probability = probability[predicted_class].item()\n",
|
||||
" return predicted_class, predicted_probability"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"text = \"This film is terrible!\"\n",
|
||||
"\n",
|
||||
"predict_sentiment(text, model, tokenizer, vocab, device)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"text = \"This film is great!\"\n",
|
||||
"\n",
|
||||
"predict_sentiment(text, model, tokenizer, vocab, device)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"text = \"This film is not terrible, it's great!\"\n",
|
||||
"\n",
|
||||
"predict_sentiment(text, model, tokenizer, vocab, device)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"text = \"This film is not great, it's terrible!\"\n",
|
||||
"\n",
|
||||
"predict_sentiment(text, model, tokenizer, vocab, device)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def text_to_tensor(text, tokenizer, vocab, device):\n",
|
||||
" tokens = tokenizer(text)\n",
|
||||
" ids = vocab.lookup_indices(tokens)\n",
|
||||
" tensor = torch.LongTensor(ids).unsqueeze(dim=0).to(device)\n",
|
||||
" return tensor\n",
|
||||
"\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Now we do onnx stuff to get the data ready for the zk-circuit."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"\n",
|
||||
"import json\n",
|
||||
"\n",
|
||||
"text = \"This film is terrible!\"\n",
|
||||
"x = text_to_tensor(text, tokenizer, vocab, device)\n",
|
||||
"\n",
|
||||
"# Flips the neural net into inference mode\n",
|
||||
"model.eval()\n",
|
||||
"model.to('cpu')\n",
|
||||
"\n",
|
||||
"model_path = \"network.onnx\"\n",
|
||||
"data_path = \"input.json\"\n",
|
||||
"\n",
|
||||
" # Export the model\n",
|
||||
"torch.onnx.export(model, # model being run\n",
|
||||
" x, # model input (or a tuple for multiple inputs)\n",
|
||||
" model_path, # where to save the model (can be a file or file-like object)\n",
|
||||
" export_params=True, # store the trained parameter weights inside the model file\n",
|
||||
" opset_version=10, # the ONNX version to export the model to\n",
|
||||
" do_constant_folding=True, # whether to execute constant folding for optimization\n",
|
||||
" input_names = ['input'], # the model's input names\n",
|
||||
" output_names = ['output'], # the model's output names\n",
|
||||
" dynamic_axes={'input' : {0 : 'batch_size'}, # variable length axes\n",
|
||||
" 'output' : {0 : 'batch_size'}})\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"data_array = ((x).detach().numpy()).reshape([-1]).tolist()\n",
|
||||
"\n",
|
||||
"data_json = dict(input_data = [data_array])\n",
|
||||
"\n",
|
||||
"print(data_json)\n",
|
||||
"\n",
|
||||
" # Serialize data into file:\n",
|
||||
"json.dump(data_json, open(data_path, 'w'))\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import ezkl\n",
|
||||
"\n",
|
||||
"run_args = ezkl.PyRunArgs()\n",
|
||||
"run_args.logrows = 23\n",
|
||||
"run_args.scale_rebase_multiplier = 10\n",
|
||||
"# inputs should be auditable by all\n",
|
||||
"run_args.input_visibility = \"public\"\n",
|
||||
"# same with outputs\n",
|
||||
"run_args.output_visibility = \"public\"\n",
|
||||
"# for simplicity, we'll just use the fixed model visibility: i.e it is public and can't be changed by the prover\n",
|
||||
"run_args.param_visibility = \"fixed\"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# TODO: Dictionary outputs\n",
|
||||
"res = ezkl.gen_settings(py_run_args=run_args)\n",
|
||||
"assert res == True\n",
|
||||
"\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"res = ezkl.compile_circuit()\n",
|
||||
"assert res == True"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# srs path\n",
|
||||
"res = await ezkl.get_srs()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# now generate the witness file\n",
|
||||
"res = await ezkl.gen_witness()\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"res = ezkl.mock()\n",
|
||||
"assert res == True"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"\n",
|
||||
"# HERE WE SETUP THE CIRCUIT PARAMS\n",
|
||||
"# WE GOT KEYS\n",
|
||||
"# WE GOT CIRCUIT PARAMETERS\n",
|
||||
"# EVERYTHING ANYONE HAS EVER NEEDED FOR ZK\n",
|
||||
"\n",
|
||||
"res = ezkl.setup()\n",
|
||||
"\n",
|
||||
"assert res == True"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# GENERATE A PROOF\n",
|
||||
"res = ezkl.prove(proof_path=\"proof.json\")\n",
|
||||
"\n",
|
||||
"print(res)\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# VERIFY IT\n",
|
||||
"res = ezkl.verify()\n",
|
||||
"\n",
|
||||
"assert res == True\n",
|
||||
"print(\"verified\")\n",
|
||||
"\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We can also verify it on chain by creating an onchain verifier"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# check if notebook is in colab\n",
|
||||
"try:\n",
|
||||
" import google.colab\n",
|
||||
" import subprocess\n",
|
||||
" import sys\n",
|
||||
" subprocess.check_call([sys.executable, \"-m\", \"pip\", \"install\", \"solc-select\"])\n",
|
||||
" !solc-select install 0.8.20\n",
|
||||
" !solc-select use 0.8.20\n",
|
||||
" !solc --version\n",
|
||||
" import os\n",
|
||||
"\n",
|
||||
"# rely on local installation if the notebook is not in colab\n",
|
||||
"except:\n",
|
||||
" import os\n",
|
||||
" pass"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"res = await ezkl.create_evm_verifier()\n",
|
||||
"assert res == True\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"You should see a `Verifier.sol`. Right-click and save it locally.\n",
|
||||
"\n",
|
||||
"Now go to [https://remix.ethereum.org](https://remix.ethereum.org).\n",
|
||||
"\n",
|
||||
"Create a new file within remix and copy the verifier code over.\n",
|
||||
"\n",
|
||||
"Finally, compile the code and deploy. For the demo you can deploy to the test environment within remix.\n",
|
||||
"\n",
|
||||
"If everything works, you would have deployed your verifer onchain! Copy the values in the cell above to the respective fields to test if the verifier is working."
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": ".env",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.12.7"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
@@ -171,7 +171,7 @@
|
||||
"json.dump(data, open(cal_path, 'w'))\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"ezkl.calibrate_settings(cal_path, model_path, settings_path, \"resources\")"
|
||||
"await ezkl.calibrate_settings(cal_path, model_path, settings_path, \"resources\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -328,7 +328,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"execution_count": 26,
|
||||
"id": "171702d3",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -348,7 +348,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"execution_count": 27,
|
||||
"id": "671dfdd5",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -364,7 +364,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"execution_count": 28,
|
||||
"id": "50eba2f4",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -399,9 +399,9 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.15"
|
||||
"version": "3.12.7"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
}
|
||||
|
||||
763
examples/notebooks/univ3-da.ipynb
Normal file
763
examples/notebooks/univ3-da.ipynb
Normal file
@@ -0,0 +1,763 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# univ3-da-ezkl\n",
|
||||
"\n",
|
||||
"Here's an example leveraging EZKL whereby the inputs to the model are read and attested to from an on-chain source. For this setup we make a single call to a view function that returns an array of UniV3 historical TWAP price data that we will attest to on-chain. \n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"First we import the necessary dependencies and set up logging to be as informative as possible. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# check if notebook is in colab\n",
|
||||
"try:\n",
|
||||
" # install ezkl\n",
|
||||
" import google.colab\n",
|
||||
" import subprocess\n",
|
||||
" import sys\n",
|
||||
" subprocess.check_call([sys.executable, \"-m\", \"pip\", \"install\", \"ezkl\"])\n",
|
||||
" subprocess.check_call([sys.executable, \"-m\", \"pip\", \"install\", \"onnx\"])\n",
|
||||
"\n",
|
||||
"# rely on local installation of ezkl if the notebook is not in colab\n",
|
||||
"except:\n",
|
||||
" pass\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"from torch import nn\n",
|
||||
"import ezkl\n",
|
||||
"import os\n",
|
||||
"import json\n",
|
||||
"import logging\n",
|
||||
"\n",
|
||||
"# uncomment for more descriptive logging \n",
|
||||
"FORMAT = '%(levelname)s %(name)s %(asctime)-15s %(filename)s:%(lineno)d %(message)s'\n",
|
||||
"logging.basicConfig(format=FORMAT)\n",
|
||||
"logging.getLogger().setLevel(logging.DEBUG)\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Now we define our model. It is a very simple PyTorch model that has just one layer, an average pooling 2D layer. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import torch\n",
|
||||
"# Defines the model\n",
|
||||
"\n",
|
||||
"class MyModel(nn.Module):\n",
|
||||
" def __init__(self):\n",
|
||||
" super(MyModel, self).__init__()\n",
|
||||
" self.layer = nn.AvgPool2d(2, 1, (1, 1))\n",
|
||||
"\n",
|
||||
" def forward(self, x):\n",
|
||||
" return self.layer(x)[0]\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"circuit = MyModel()\n",
|
||||
"\n",
|
||||
"# this is where you'd train your model"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We omit training for purposes of this demonstration. We've marked where training would happen in the cell above. \n",
|
||||
"Now we export the model to onnx and create a corresponding (randomly generated) input. This input data will eventually be stored on chain and read from according to the call_data field in the graph input.\n",
|
||||
"\n",
|
||||
"You can replace the random `x` with real data if you so wish. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"x = 0.1*torch.rand(1,*[3, 2, 2], requires_grad=True)\n",
|
||||
"\n",
|
||||
"# Flips the neural net into inference mode\n",
|
||||
"circuit.eval()\n",
|
||||
"\n",
|
||||
" # Export the model\n",
|
||||
"torch.onnx.export(circuit, # model being run\n",
|
||||
" x, # model input (or a tuple for multiple inputs)\n",
|
||||
" \"network.onnx\", # where to save the model (can be a file or file-like object)\n",
|
||||
" export_params=True, # store the trained parameter weights inside the model file\n",
|
||||
" opset_version=10, # the ONNX version to export the model to\n",
|
||||
" do_constant_folding=True, # whether to execute constant folding for optimization\n",
|
||||
" input_names = ['input'], # the model's input names\n",
|
||||
" output_names = ['output'], # the model's output names\n",
|
||||
" dynamic_axes={'input' : {0 : 'batch_size'}, # variable length axes\n",
|
||||
" 'output' : {0 : 'batch_size'}})\n",
|
||||
"\n",
|
||||
"data_array = ((x).detach().numpy()).reshape([-1]).tolist()\n",
|
||||
"\n",
|
||||
"data = dict(input_data = [data_array])\n",
|
||||
"\n",
|
||||
" # Serialize data into file:\n",
|
||||
"json.dump(data, open(\"input.json\", 'w' ))\n",
|
||||
"\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We now define a function that will create a new anvil instance which we will deploy our test contract too. This contract will contain in its storage the data that we will read from and attest to. In production you would not need to set up a local anvil instance. Instead you would replace RPC_URL with the actual RPC endpoint of the chain you are deploying your verifiers too, reading from the data on said chain."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 44,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import subprocess\n",
|
||||
"import time\n",
|
||||
"import threading\n",
|
||||
"\n",
|
||||
"# make sure anvil is running locally\n",
|
||||
"# $ anvil -p 3030\n",
|
||||
"\n",
|
||||
"RPC_URL = \"http://localhost:3030\"\n",
|
||||
"\n",
|
||||
"# Save process globally\n",
|
||||
"anvil_process = None\n",
|
||||
"\n",
|
||||
"def start_anvil():\n",
|
||||
" global anvil_process\n",
|
||||
" if anvil_process is None:\n",
|
||||
" anvil_process = subprocess.Popen([\"anvil\", \"-p\", \"3030\", \"--fork-url\", \"https://arb1.arbitrum.io/rpc\", \"--code-size-limit=41943040\"])\n",
|
||||
" if anvil_process.returncode is not None:\n",
|
||||
" raise Exception(\"failed to start anvil process\")\n",
|
||||
" time.sleep(3)\n",
|
||||
"\n",
|
||||
"def stop_anvil():\n",
|
||||
" global anvil_process\n",
|
||||
" if anvil_process is not None:\n",
|
||||
" anvil_process.terminate()\n",
|
||||
" anvil_process = None\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We define our `PyRunArgs` objects which contains the visibility parameters for out model. \n",
|
||||
"- `input_visibility` defines the visibility of the model inputs\n",
|
||||
"- `param_visibility` defines the visibility of the model weights and constants and parameters \n",
|
||||
"- `output_visibility` defines the visibility of the model outputs\n",
|
||||
"\n",
|
||||
"Here we create the following setup:\n",
|
||||
"- `input_visibility`: \"public\"\n",
|
||||
"- `param_visibility`: \"private\"\n",
|
||||
"- `output_visibility`: public\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import ezkl\n",
|
||||
"\n",
|
||||
"model_path = os.path.join('network.onnx')\n",
|
||||
"compiled_model_path = os.path.join('network.compiled')\n",
|
||||
"pk_path = os.path.join('test.pk')\n",
|
||||
"vk_path = os.path.join('test.vk')\n",
|
||||
"settings_path = os.path.join('settings.json')\n",
|
||||
"srs_path = os.path.join('kzg.srs')\n",
|
||||
"data_path = os.path.join('input.json')\n",
|
||||
"\n",
|
||||
"run_args = ezkl.PyRunArgs()\n",
|
||||
"run_args.input_visibility = \"public\"\n",
|
||||
"run_args.param_visibility = \"private\"\n",
|
||||
"run_args.output_visibility = \"public\"\n",
|
||||
"run_args.num_inner_cols = 1\n",
|
||||
"run_args.variables = [(\"batch_size\", 1)]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Now we generate a settings file. This file basically instantiates a bunch of parameters that determine their circuit shape, size etc... Because of the way we represent nonlinearities in the circuit (using Halo2's [lookup tables](https://zcash.github.io/halo2/design/proving-system/lookup.html)), it is often best to _calibrate_ this settings file as some data can fall out of range of these lookups.\n",
|
||||
"\n",
|
||||
"You can pass a dataset for calibration that will be representative of real inputs you might find if and when you deploy the prover. Here we create a dummy calibration dataset for demonstration purposes. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# TODO: Dictionary outputs\n",
|
||||
"res = ezkl.gen_settings(model_path, settings_path, py_run_args=run_args)\n",
|
||||
"assert res == True"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# generate a bunch of dummy calibration data\n",
|
||||
"cal_data = {\n",
|
||||
" \"input_data\": [(0.1*torch.rand(2, *[3, 2, 2])).flatten().tolist()],\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"cal_path = os.path.join('val_data.json')\n",
|
||||
"# save as json file\n",
|
||||
"with open(cal_path, \"w\") as f:\n",
|
||||
" json.dump(cal_data, f)\n",
|
||||
"\n",
|
||||
"res = await ezkl.calibrate_settings(cal_path, model_path, settings_path, \"resources\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"res = ezkl.compile_circuit(model_path, compiled_model_path, settings_path)\n",
|
||||
"assert res == True"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The graph input for on chain data sources is formatted completely differently compared to file based data sources.\n",
|
||||
"\n",
|
||||
"- For file data sources, the raw floating point values that eventually get quantized, converted into field elements and stored in `witness.json` to be consumed by the circuit are stored. The output data contains the expected floating point values returned as outputs from running your vanilla pytorch model on the given inputs.\n",
|
||||
"- For on chain data sources, the input_data field contains all the data necessary to read and format the on chain data into something digestable by EZKL (aka field elements :-D). \n",
|
||||
"Here is what the schema for an on-chain data source graph input file should look like for a single call data source:\n",
|
||||
" \n",
|
||||
"```json\n",
|
||||
"{\n",
|
||||
" \"input_data\": {\n",
|
||||
" \"rpc\": \"http://localhost:3030\", // The rpc endpoint of the chain you are deploying your verifier to\n",
|
||||
" \"calls\": {\n",
|
||||
" \"call_data\": \"1f3be514000000000000000000000000c6962004f452be9203591991d15f6b388e09e8d00000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000c000000000000000000000000000000000000000000000000000000000000000b000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000009000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000070000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000000500000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000003000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000\", // The abi encoded call data to a view function that returns an array of on-chain data points we are attesting to. \n",
|
||||
" \"decimals\": 0, // The number of decimal places of the large uint256 value. This is our way of representing large wei values as floating points on chain, since the evm only natively supports integer values.\n",
|
||||
" \"address\": \"9A213F53334279C128C37DA962E5472eCD90554f\", // The address of the contract that we are calling to get the data. \n",
|
||||
" \"len\": 12 // The number of data points returned by the view function (the length of the array)\n",
|
||||
" }\n",
|
||||
" }\n",
|
||||
"}\n",
|
||||
"```"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from web3 import Web3, HTTPProvider\n",
|
||||
"from solcx import compile_standard\n",
|
||||
"from decimal import Decimal\n",
|
||||
"import json\n",
|
||||
"import os\n",
|
||||
"import torch\n",
|
||||
"import requests\n",
|
||||
"\n",
|
||||
"# This function counts the decimal places of a floating point number\n",
|
||||
"def count_decimal_places(num):\n",
|
||||
" num_str = str(num)\n",
|
||||
" if '.' in num_str:\n",
|
||||
" return len(num_str) - 1 - num_str.index('.')\n",
|
||||
" else:\n",
|
||||
" return 0\n",
|
||||
"\n",
|
||||
"# setup web3 instance\n",
|
||||
"w3 = Web3(HTTPProvider(RPC_URL)) \n",
|
||||
"\n",
|
||||
"def set_next_block_timestamp(anvil_url, timestamp):\n",
|
||||
" # Send the JSON-RPC request to Anvil\n",
|
||||
" payload = {\n",
|
||||
" \"jsonrpc\": \"2.0\",\n",
|
||||
" \"id\": 1,\n",
|
||||
" \"method\": \"evm_setNextBlockTimestamp\",\n",
|
||||
" \"params\": [timestamp]\n",
|
||||
" }\n",
|
||||
" response = requests.post(anvil_url, json=payload)\n",
|
||||
" if response.status_code == 200:\n",
|
||||
" print(f\"Next block timestamp set to: {timestamp}\")\n",
|
||||
" else:\n",
|
||||
" print(f\"Failed to set next block timestamp: {response.text}\")\n",
|
||||
"\n",
|
||||
"def on_chain_data(tensor):\n",
|
||||
" # Step 0: Convert the tensor to a flat list\n",
|
||||
" data = tensor.view(-1).tolist()\n",
|
||||
"\n",
|
||||
" # Step 1: Prepare the calldata\n",
|
||||
" secondsAgo = [len(data) - 1 - i for i in range(len(data))]\n",
|
||||
"\n",
|
||||
" # Step 2: Prepare and compile the contract UniTickAttestor contract\n",
|
||||
" contract_source_code = '''\n",
|
||||
" // SPDX-License-Identifier: MIT\n",
|
||||
" pragma solidity ^0.8.20;\n",
|
||||
"\n",
|
||||
" /// @title Pool state that is not stored\n",
|
||||
" /// @notice Contains view functions to provide information about the pool that is computed rather than stored on the\n",
|
||||
" /// blockchain. The functions here may have variable gas costs.\n",
|
||||
" interface IUniswapV3PoolDerivedState {\n",
|
||||
" /// @notice Returns the cumulative tick and liquidity as of each timestamp `secondsAgo` from the current block timestamp\n",
|
||||
" /// @dev To get a time weighted average tick or liquidity-in-range, you must call this with two values, one representing\n",
|
||||
" /// the beginning of the period and another for the end of the period. E.g., to get the last hour time-weighted average tick,\n",
|
||||
" /// you must call it with secondsAgos = [3600, 0].\n",
|
||||
" /// log base sqrt(1.0001) of token1 / token0. The TickMath library can be used to go from a tick value to a ratio.\n",
|
||||
" /// @dev The time weighted average tick represents the geometric time weighted average price of the pool, in\n",
|
||||
" /// @param secondsAgos From how long ago each cumulative tick and liquidity value should be returned\n",
|
||||
" /// @return tickCumulatives Cumulative tick values as of each `secondsAgos` from the current block timestamp\n",
|
||||
" /// @return secondsPerLiquidityCumulativeX128s Cumulative seconds per liquidity-in-range value as of each `secondsAgos` from the current block\n",
|
||||
" /// timestamp\n",
|
||||
" function observe(\n",
|
||||
" uint32[] calldata secondsAgos\n",
|
||||
" )\n",
|
||||
" external\n",
|
||||
" view\n",
|
||||
" returns (\n",
|
||||
" int56[] memory tickCumulatives,\n",
|
||||
" uint160[] memory secondsPerLiquidityCumulativeX128s\n",
|
||||
" );\n",
|
||||
" }\n",
|
||||
"\n",
|
||||
" /// @title Uniswap Wrapper around `pool.observe` that stores the parameters for fetching and then attesting to historical data\n",
|
||||
" /// @notice Provides functions to integrate with V3 pool oracle\n",
|
||||
" contract UniTickAttestor {\n",
|
||||
" /**\n",
|
||||
" * @notice Calculates time-weighted means of tick and liquidity for a given Uniswap V3 pool\n",
|
||||
" * @param pool Address of the pool that we want to observe\n",
|
||||
" * @param secondsAgo Number of seconds in the past from which to calculate the time-weighted means\n",
|
||||
" * @return tickCumulatives The cumulative tick values as of each `secondsAgo` from the current block timestamp\n",
|
||||
" */\n",
|
||||
" function consult(\n",
|
||||
" IUniswapV3PoolDerivedState pool,\n",
|
||||
" uint32[] memory secondsAgo\n",
|
||||
" ) public view returns (int256[] memory tickCumulatives) {\n",
|
||||
" tickCumulatives = new int256[](secondsAgo.length);\n",
|
||||
" (int56[] memory _ticks,) = pool.observe(secondsAgo);\n",
|
||||
" for (uint256 i = 0; i < secondsAgo.length; i++) {\n",
|
||||
" tickCumulatives[i] = int256(_ticks[i]);\n",
|
||||
" }\n",
|
||||
" }\n",
|
||||
" }\n",
|
||||
" '''\n",
|
||||
"\n",
|
||||
" compiled_sol = compile_standard({\n",
|
||||
" \"language\": \"Solidity\",\n",
|
||||
" \"sources\": {\"UniTickAttestor.sol\": {\"content\": contract_source_code}},\n",
|
||||
" \"settings\": {\"outputSelection\": {\"*\": {\"*\": [\"metadata\", \"evm.bytecode\", \"abi\"]}}}\n",
|
||||
" })\n",
|
||||
"\n",
|
||||
" # Get bytecode\n",
|
||||
" bytecode = compiled_sol['contracts']['UniTickAttestor.sol']['UniTickAttestor']['evm']['bytecode']['object']\n",
|
||||
"\n",
|
||||
" # Get ABI\n",
|
||||
" # In production if you are reading from really large contracts you can just use\n",
|
||||
" # a stripped down version of the ABI of the contract you are calling, containing only the view functions you will fetch data from.\n",
|
||||
" abi = json.loads(compiled_sol['contracts']['UniTickAttestor.sol']['UniTickAttestor']['metadata'])['output']['abi']\n",
|
||||
"\n",
|
||||
" # Step 3: Deploy the contract\n",
|
||||
" UniTickAttestor = w3.eth.contract(abi=abi, bytecode=bytecode)\n",
|
||||
" tx_hash = UniTickAttestor.constructor().transact()\n",
|
||||
" tx_receipt = w3.eth.wait_for_transaction_receipt(tx_hash)\n",
|
||||
" # If you are deploying to production you can skip the 3 lines of code above and just instantiate the contract like this,\n",
|
||||
" # passing the address and abi of the contract you are fetching data from.\n",
|
||||
" contract = w3.eth.contract(address=tx_receipt['contractAddress'], abi=abi)\n",
|
||||
"\n",
|
||||
" # Step 4: Interact with the contract\n",
|
||||
" call = contract.functions.consult(\n",
|
||||
" # Address of the UniV3 usdc-weth pool 0.005 fee\n",
|
||||
" \"0xC6962004f452bE9203591991D15f6b388e09E8D0\",\n",
|
||||
" secondsAgo\n",
|
||||
" ).build_transaction()\n",
|
||||
" result = contract.functions.consult(\n",
|
||||
" # Address of the UniV3 usdc-weth pool 0.005 fee\n",
|
||||
" \"0xC6962004f452bE9203591991D15f6b388e09E8D0\",\n",
|
||||
" secondsAgo\n",
|
||||
" ).call()\n",
|
||||
" \n",
|
||||
" print(f'result: {result}')\n",
|
||||
" calldata = call['data'][2:]\n",
|
||||
"\n",
|
||||
" time_stamp = w3.eth.get_block('latest')['timestamp']\n",
|
||||
"\n",
|
||||
" print(f'time_stamp: {time_stamp}')\n",
|
||||
"\n",
|
||||
" # Set the next block timestamp using the fetched time_stamp\n",
|
||||
" set_next_block_timestamp(RPC_URL, time_stamp)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
" # Prepare the calls_to_account object\n",
|
||||
" # If you were calling view functions across multiple contracts,\n",
|
||||
" # you would have multiple entries in the calls_to_account array,\n",
|
||||
" # one for each contract.\n",
|
||||
" call_to_account = {\n",
|
||||
" 'call_data': calldata,\n",
|
||||
" 'decimals': 0,\n",
|
||||
" 'address': contract.address[2:], # remove the '0x' prefix\n",
|
||||
" 'len': len(data),\n",
|
||||
" }\n",
|
||||
"\n",
|
||||
" print(f'call_to_account: {call_to_account}')\n",
|
||||
"\n",
|
||||
" return call_to_account\n",
|
||||
"\n",
|
||||
"# Now let's start the Anvil process. You don't need to do this if you are deploying to a non-local chain.\n",
|
||||
"start_anvil()\n",
|
||||
"\n",
|
||||
"# Now let's call our function, passing in the same input tensor we used to export the model 2 cells above.\n",
|
||||
"calls_to_account = on_chain_data(x)\n",
|
||||
"\n",
|
||||
"data = dict(input_data = {'rpc': RPC_URL, 'calls': calls_to_account })\n",
|
||||
"\n",
|
||||
"# Serialize on-chain data into file:\n",
|
||||
"json.dump(data, open(\"input.json\", 'w'))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"As we use Halo2 with KZG-commitments we need an SRS string from (preferably) a multi-party trusted setup ceremony. For an overview of the procedures for such a ceremony check out [this page](https://blog.ethereum.org/2023/01/16/announcing-kzg-ceremony). The `get_srs` command retrieves a correctly sized SRS given the calibrated settings file from [here](https://github.com/han0110/halo2-kzg-srs). \n",
|
||||
"\n",
|
||||
"These SRS were generated with [this](https://github.com/privacy-scaling-explorations/perpetualpowersoftau) ceremony. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"res = await ezkl.get_srs( settings_path)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We now need to generate the circuit witness. These are the model outputs (and any hashes) that are generated when feeding the previously generated `input.json` through the circuit / model. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# !export RUST_BACKTRACE=1\n",
|
||||
"\n",
|
||||
"witness_path = \"witness.json\"\n",
|
||||
"\n",
|
||||
"res = await ezkl.gen_witness(data_path, compiled_model_path, witness_path)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Here we setup verifying and proving keys for the circuit. As the name suggests the proving key is needed for ... proving and the verifying key is needed for ... verifying. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# HERE WE SETUP THE CIRCUIT PARAMS\n",
|
||||
"# WE GOT KEYS\n",
|
||||
"# WE GOT CIRCUIT PARAMETERS\n",
|
||||
"# EVERYTHING ANYONE HAS EVER NEEDED FOR ZK\n",
|
||||
"res = ezkl.setup(\n",
|
||||
" compiled_model_path,\n",
|
||||
" vk_path,\n",
|
||||
" pk_path,\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
"assert res == True\n",
|
||||
"assert os.path.isfile(vk_path)\n",
|
||||
"assert os.path.isfile(pk_path)\n",
|
||||
"assert os.path.isfile(settings_path)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Now we generate a full proof. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# GENERATE A PROOF\n",
|
||||
"\n",
|
||||
"proof_path = os.path.join('test.pf')\n",
|
||||
"\n",
|
||||
"res = ezkl.prove(\n",
|
||||
" witness_path,\n",
|
||||
" compiled_model_path,\n",
|
||||
" pk_path,\n",
|
||||
" proof_path,\n",
|
||||
" \"single\",\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
"print(res)\n",
|
||||
"assert os.path.isfile(proof_path)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"And verify it as a sanity check. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# VERIFY IT\n",
|
||||
"\n",
|
||||
"res = ezkl.verify(\n",
|
||||
" proof_path,\n",
|
||||
" settings_path,\n",
|
||||
" vk_path,\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
"assert res == True\n",
|
||||
"print(\"verified\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We can now create and then deploy a vanilla evm verifier."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"abi_path = 'test.abi'\n",
|
||||
"sol_code_path = 'test.sol'\n",
|
||||
"\n",
|
||||
"res = await ezkl.create_evm_verifier(\n",
|
||||
" vk_path,\n",
|
||||
" settings_path,\n",
|
||||
" sol_code_path,\n",
|
||||
" abi_path,\n",
|
||||
" )\n",
|
||||
"assert res == True"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import json\n",
|
||||
"\n",
|
||||
"addr_path_verifier = \"addr_verifier.txt\"\n",
|
||||
"\n",
|
||||
"res = await ezkl.deploy_evm(\n",
|
||||
" addr_path_verifier,\n",
|
||||
" sol_code_path,\n",
|
||||
" 'http://127.0.0.1:3030'\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"assert res == True"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"With the vanilla verifier deployed, we can now create the data attestation contract, which will read in the instances from the calldata to the verifier, attest to them, call the verifier and then return the result. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"\n",
|
||||
"abi_path = 'test.abi'\n",
|
||||
"sol_code_path = 'test.sol'\n",
|
||||
"input_path = 'input.json'\n",
|
||||
"\n",
|
||||
"res = await ezkl.create_evm_data_attestation(\n",
|
||||
" input_path,\n",
|
||||
" settings_path,\n",
|
||||
" sol_code_path,\n",
|
||||
" abi_path,\n",
|
||||
" )"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Now we can deploy the data attest verifier contract. For security reasons, this binding will only deploy to a local anvil instance, using accounts generated by anvil. \n",
|
||||
"So should only be used for testing purposes."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"addr_path_da = \"addr_da.txt\"\n",
|
||||
"\n",
|
||||
"res = await ezkl.deploy_da_evm(\n",
|
||||
" addr_path_da,\n",
|
||||
" input_path,\n",
|
||||
" settings_path,\n",
|
||||
" sol_code_path,\n",
|
||||
" RPC_URL,\n",
|
||||
" )\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Here we need to regenerate the witness, prove and then verify all within the same cell. This is because we want to reduce the amount of latency between reading on-chain state and verifying it on-chain. This is because the attest input values read from the oracle are time sensitive (their values are derived from computing on block.timestamp) and can change between the time of reading and the time of verifying.\n",
|
||||
"\n",
|
||||
"Call the view only verify method on the contract to verify the proof. Since it is a view function this is safe to use in production since you don't have to pass your private key."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# !export RUST_BACKTRACE=1\n",
|
||||
"\n",
|
||||
"calls_to_account = on_chain_data(x)\n",
|
||||
"\n",
|
||||
"data = dict(input_data = {'rpc': RPC_URL, 'calls': calls_to_account })\n",
|
||||
"\n",
|
||||
"# Serialize on-chain data into file:\n",
|
||||
"json.dump(data, open(\"input.json\", 'w'))\n",
|
||||
"\n",
|
||||
"# setup web3 instance\n",
|
||||
"w3 = Web3(HTTPProvider(RPC_URL)) \n",
|
||||
"\n",
|
||||
"time_stamp = w3.eth.get_block('latest')['timestamp']\n",
|
||||
"\n",
|
||||
"print(f'time_stamp: {time_stamp}')\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"witness_path = \"witness.json\"\n",
|
||||
"\n",
|
||||
"res = await ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"\n",
|
||||
"res = ezkl.prove(\n",
|
||||
" witness_path,\n",
|
||||
" compiled_model_path,\n",
|
||||
" pk_path,\n",
|
||||
" proof_path,\n",
|
||||
" \"single\",\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
"print(res)\n",
|
||||
"assert os.path.isfile(proof_path)\n",
|
||||
"# read the verifier address\n",
|
||||
"addr_verifier = None\n",
|
||||
"with open(addr_path_verifier, 'r') as f:\n",
|
||||
" addr = f.read()\n",
|
||||
"#read the data attestation address\n",
|
||||
"addr_da = None\n",
|
||||
"with open(addr_path_da, 'r') as f:\n",
|
||||
" addr_da = f.read()\n",
|
||||
"\n",
|
||||
"res = await ezkl.verify_evm(\n",
|
||||
" addr,\n",
|
||||
" proof_path,\n",
|
||||
" RPC_URL,\n",
|
||||
" addr_da,\n",
|
||||
")"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": ".env",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.5"
|
||||
},
|
||||
"orig_nbformat": 4
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
42
examples/onnx/exp/gen.py
Normal file
42
examples/onnx/exp/gen.py
Normal file
@@ -0,0 +1,42 @@
|
||||
from torch import nn
|
||||
import torch
|
||||
import json
|
||||
import numpy as np
|
||||
|
||||
|
||||
class MyModel(nn.Module):
|
||||
def __init__(self):
|
||||
super(MyModel, self).__init__()
|
||||
|
||||
def forward(self, x):
|
||||
m = torch.exp(x)
|
||||
|
||||
return m
|
||||
|
||||
|
||||
circuit = MyModel()
|
||||
|
||||
x = torch.empty(1, 8).uniform_(0, 1)
|
||||
|
||||
out = circuit(x)
|
||||
|
||||
print(out)
|
||||
|
||||
torch.onnx.export(circuit, x, "network.onnx",
|
||||
export_params=True, # store the trained parameter weights inside the model file
|
||||
opset_version=17, # the ONNX version to export the model to
|
||||
do_constant_folding=True, # whether to execute constant folding for optimization
|
||||
input_names=['input'], # the model's input names
|
||||
output_names=['output'], # the model's output names
|
||||
dynamic_axes={'input': {0: 'batch_size'}, # variable length axes
|
||||
'output': {0: 'batch_size'}})
|
||||
|
||||
|
||||
d1 = ((x).detach().numpy()).reshape([-1]).tolist()
|
||||
|
||||
data = dict(
|
||||
input_data=[d1],
|
||||
)
|
||||
|
||||
# Serialize data into file:
|
||||
json.dump(data, open("input.json", 'w'))
|
||||
1
examples/onnx/exp/input.json
Normal file
1
examples/onnx/exp/input.json
Normal file
@@ -0,0 +1 @@
|
||||
{"input_data": [[0.5801457762718201, 0.6019012331962585, 0.8695418238639832, 0.17170941829681396, 0.500616729259491, 0.353726327419281, 0.6726185083389282, 0.5936906337738037]]}
|
||||
14
examples/onnx/exp/network.onnx
Normal file
14
examples/onnx/exp/network.onnx
Normal file
@@ -0,0 +1,14 @@
|
||||
pytorch2.2.2:o
|
||||
|
||||
inputoutput/Exp"Exp
|
||||
main_graphZ!
|
||||
input
|
||||
|
||||
|
||||
batch_size
|
||||
b"
|
||||
output
|
||||
|
||||
|
||||
batch_size
|
||||
B
|
||||
41
examples/onnx/general_exp/gen.py
Normal file
41
examples/onnx/general_exp/gen.py
Normal file
@@ -0,0 +1,41 @@
|
||||
from torch import nn
|
||||
import torch
|
||||
import json
|
||||
import numpy as np
|
||||
|
||||
|
||||
class MyModel(nn.Module):
|
||||
def __init__(self):
|
||||
super(MyModel, self).__init__()
|
||||
|
||||
def forward(self, x):
|
||||
m = 10**x
|
||||
return m
|
||||
|
||||
|
||||
circuit = MyModel()
|
||||
|
||||
x = torch.empty(1, 8).uniform_(0, 1)
|
||||
|
||||
out = circuit(x)
|
||||
|
||||
print(out)
|
||||
|
||||
torch.onnx.export(circuit, x, "network.onnx",
|
||||
export_params=True, # store the trained parameter weights inside the model file
|
||||
opset_version=17, # the ONNX version to export the model to
|
||||
do_constant_folding=True, # whether to execute constant folding for optimization
|
||||
input_names=['input'], # the model's input names
|
||||
output_names=['output'], # the model's output names
|
||||
dynamic_axes={'input': {0: 'batch_size'}, # variable length axes
|
||||
'output': {0: 'batch_size'}})
|
||||
|
||||
|
||||
d1 = ((x).detach().numpy()).reshape([-1]).tolist()
|
||||
|
||||
data = dict(
|
||||
input_data=[d1],
|
||||
)
|
||||
|
||||
# Serialize data into file:
|
||||
json.dump(data, open("input.json", 'w'))
|
||||
1
examples/onnx/general_exp/input.json
Normal file
1
examples/onnx/general_exp/input.json
Normal file
@@ -0,0 +1 @@
|
||||
{"input_data": [[0.9837989807128906, 0.026381194591522217, 0.3403851389884949, 0.14531707763671875, 0.24652725458145142, 0.7945117354393005, 0.4076554775238037, 0.23064672946929932]]}
|
||||
BIN
examples/onnx/general_exp/network.onnx
Normal file
BIN
examples/onnx/general_exp/network.onnx
Normal file
Binary file not shown.
42
examples/onnx/log/gen.py
Normal file
42
examples/onnx/log/gen.py
Normal file
@@ -0,0 +1,42 @@
|
||||
from torch import nn
|
||||
import torch
|
||||
import json
|
||||
import numpy as np
|
||||
|
||||
|
||||
class MyModel(nn.Module):
|
||||
def __init__(self):
|
||||
super(MyModel, self).__init__()
|
||||
|
||||
def forward(self, x):
|
||||
m = torch.log(x)
|
||||
|
||||
return m
|
||||
|
||||
|
||||
circuit = MyModel()
|
||||
|
||||
x = torch.empty(1, 8).uniform_(0, 3)
|
||||
|
||||
out = circuit(x)
|
||||
|
||||
print(out)
|
||||
|
||||
torch.onnx.export(circuit, x, "network.onnx",
|
||||
export_params=True, # store the trained parameter weights inside the model file
|
||||
opset_version=17, # the ONNX version to export the model to
|
||||
do_constant_folding=True, # whether to execute constant folding for optimization
|
||||
input_names=['input'], # the model's input names
|
||||
output_names=['output'], # the model's output names
|
||||
dynamic_axes={'input': {0: 'batch_size'}, # variable length axes
|
||||
'output': {0: 'batch_size'}})
|
||||
|
||||
|
||||
d1 = ((x).detach().numpy()).reshape([-1]).tolist()
|
||||
|
||||
data = dict(
|
||||
input_data=[d1],
|
||||
)
|
||||
|
||||
# Serialize data into file:
|
||||
json.dump(data, open("input.json", 'w'))
|
||||
1
examples/onnx/log/input.json
Normal file
1
examples/onnx/log/input.json
Normal file
@@ -0,0 +1 @@
|
||||
{"input_data": [[1.9252371788024902, 1.8418371677398682, 0.8400403261184692, 2.083845853805542, 0.9760497808456421, 0.6940176486968994, 0.015579521656036377, 2.2689192295074463]]}
|
||||
14
examples/onnx/log/network.onnx
Normal file
14
examples/onnx/log/network.onnx
Normal file
@@ -0,0 +1,14 @@
|
||||
pytorch2.2.2:o
|
||||
|
||||
inputoutput/Log"Log
|
||||
main_graphZ!
|
||||
input
|
||||
|
||||
|
||||
batch_size
|
||||
b"
|
||||
output
|
||||
|
||||
|
||||
batch_size
|
||||
B
|
||||
@@ -1,75 +1,52 @@
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
import sys
|
||||
from torch import nn
|
||||
import json
|
||||
|
||||
sys.path.append("..")
|
||||
|
||||
class Model(nn.Module):
|
||||
"""
|
||||
Just one Linear layer
|
||||
"""
|
||||
def __init__(self, configs):
|
||||
super(Model, self).__init__()
|
||||
self.seq_len = configs.seq_len
|
||||
self.pred_len = configs.pred_len
|
||||
|
||||
# Use this line if you want to visualize the weights
|
||||
# self.Linear.weight = nn.Parameter((1/self.seq_len)*torch.ones([self.pred_len,self.seq_len]))
|
||||
self.channels = configs.enc_in
|
||||
self.individual = configs.individual
|
||||
if self.individual:
|
||||
self.Linear = nn.ModuleList()
|
||||
for i in range(self.channels):
|
||||
self.Linear.append(nn.Linear(self.seq_len,self.pred_len))
|
||||
else:
|
||||
self.Linear = nn.Linear(self.seq_len, self.pred_len)
|
||||
|
||||
def forward(self, x):
|
||||
# x: [Batch, Input length, Channel]
|
||||
if self.individual:
|
||||
output = torch.zeros([x.size(0),self.pred_len,x.size(2)],dtype=x.dtype).to(x.device)
|
||||
for i in range(self.channels):
|
||||
output[:,:,i] = self.Linear[i](x[:,:,i])
|
||||
x = output
|
||||
else:
|
||||
x = self.Linear(x.permute(0,2,1)).permute(0,2,1)
|
||||
return x # [Batch, Output length, Channel]
|
||||
|
||||
class Configs:
|
||||
def __init__(self, seq_len, pred_len, enc_in=321, individual=True):
|
||||
self.seq_len = seq_len
|
||||
self.pred_len = pred_len
|
||||
self.enc_in = enc_in
|
||||
self.individual = individual
|
||||
|
||||
model = 'Linear'
|
||||
seq_len = 10
|
||||
pred_len = 4
|
||||
enc_in = 3
|
||||
|
||||
configs = Configs(seq_len, pred_len, enc_in, True)
|
||||
circuit = Model(configs)
|
||||
|
||||
x = torch.randn(1, seq_len, pred_len)
|
||||
import numpy as np
|
||||
import tf2onnx
|
||||
|
||||
|
||||
torch.onnx.export(circuit, x, "network.onnx",
|
||||
export_params=True, # store the trained parameter weights inside the model file
|
||||
opset_version=15, # the ONNX version to export the model to
|
||||
do_constant_folding=True, # whether to execute constant folding for optimization
|
||||
# the model's input names
|
||||
input_names=['input'],
|
||||
output_names=['output'], # the model's output names
|
||||
dynamic_axes={'input': {0: 'batch_size'}, # variable length axes
|
||||
'output': {0: 'batch_size'}})
|
||||
import tensorflow as tf
|
||||
from tensorflow.keras.layers import *
|
||||
from tensorflow.keras.models import Model
|
||||
|
||||
|
||||
d1 = ((x).detach().numpy()).reshape([-1]).tolist()
|
||||
# gather_nd in tf then export to onnx
|
||||
x = in1 = Input((4, 1), dtype=tf.int32)
|
||||
w = in2 = Input((4, ), dtype=tf.int32)
|
||||
|
||||
class MyLayer(Layer):
|
||||
def call(self, x, w):
|
||||
shape = tf.constant([8])
|
||||
return tf.scatter_nd(x, w, shape)
|
||||
|
||||
x = MyLayer()(x, w)
|
||||
|
||||
|
||||
|
||||
tm = Model((in1, in2), x)
|
||||
tm.summary()
|
||||
tm.compile(optimizer='adam', loss='mse')
|
||||
|
||||
shape = [1, 4, 1]
|
||||
index_shape = [1, 4]
|
||||
# After training, export to onnx (network.onnx) and create a data file (input.json)
|
||||
x = np.random.randint(0, 4, shape)
|
||||
# w = random int tensor
|
||||
w = np.random.randint(0, 4, index_shape)
|
||||
|
||||
spec = tf.TensorSpec(shape, tf.int32, name='input_0')
|
||||
index_spec = tf.TensorSpec(index_shape, tf.int32, name='input_1')
|
||||
|
||||
model_path = "network.onnx"
|
||||
|
||||
tf2onnx.convert.from_keras(tm, input_signature=[spec, index_spec], inputs_as_nchw=['input_0', 'input_1'], opset=12, output_path=model_path)
|
||||
|
||||
|
||||
d = x.reshape([-1]).tolist()
|
||||
d1 = w.reshape([-1]).tolist()
|
||||
|
||||
|
||||
data = dict(
|
||||
input_data=[d1],
|
||||
input_data=[d, d1],
|
||||
)
|
||||
|
||||
# Serialize data into file:
|
||||
|
||||
@@ -1 +1,16 @@
|
||||
{"input_data": [[0.1874287724494934, 1.0498261451721191, 0.22384068369865417, 1.048445224761963, -0.5670360326766968, -0.38653188943862915, 0.12878702580928802, -2.3675858974456787, 0.5800458192825317, -0.43653929233551025, -0.2511898875236511, 0.3324051797389984, 0.27960312366485596, 0.4763695001602173, 0.3796705901622772, 1.1334782838821411, -0.87981778383255, -1.2451434135437012, 0.7672272324562073, -0.24404007196426392, -0.6875824928283691, 0.3619358539581299, -0.10131897777318954, 0.7169521450996399, 1.6585893630981445, -0.5451845526695251, 0.429487019777298, 0.7426952123641968, -0.2543637454509735, 0.06546942889690399, 0.7939824461936951, 0.1579471379518509, -0.043604474514722824, -0.8621711730957031, -0.5344759821891785, -0.05880478024482727, -0.17351101338863373, 0.5095029473304749, -0.7864817976951599, -0.449171245098114]]}
|
||||
{
|
||||
"input_data": [
|
||||
[
|
||||
0,
|
||||
1,
|
||||
2,
|
||||
3
|
||||
],
|
||||
[
|
||||
1,
|
||||
0,
|
||||
2,
|
||||
1
|
||||
]
|
||||
]
|
||||
}
|
||||
Binary file not shown.
851
ezkl.pyi
Normal file
851
ezkl.pyi
Normal file
@@ -0,0 +1,851 @@
|
||||
# This file is automatically generated by pyo3_stub_gen
|
||||
# ruff: noqa: E501, F401
|
||||
|
||||
import os
|
||||
import pathlib
|
||||
import typing
|
||||
from enum import Enum, auto
|
||||
|
||||
class PyG1:
|
||||
r"""
|
||||
pyclass containing the struct used for G1, this is mostly a helper class
|
||||
"""
|
||||
...
|
||||
|
||||
class PyG1Affine:
|
||||
r"""
|
||||
pyclass containing the struct used for G1
|
||||
"""
|
||||
...
|
||||
|
||||
class PyRunArgs:
|
||||
r"""
|
||||
Python class containing the struct used for run_args
|
||||
|
||||
Returns
|
||||
-------
|
||||
PyRunArgs
|
||||
"""
|
||||
...
|
||||
|
||||
class PyCommitments(Enum):
|
||||
r"""
|
||||
pyclass representing an enum, denoting the type of commitment
|
||||
"""
|
||||
KZG = auto()
|
||||
IPA = auto()
|
||||
|
||||
class PyInputType(Enum):
|
||||
Bool = auto()
|
||||
F16 = auto()
|
||||
F32 = auto()
|
||||
F64 = auto()
|
||||
Int = auto()
|
||||
TDim = auto()
|
||||
|
||||
class PyTestDataSource(Enum):
|
||||
r"""
|
||||
pyclass representing an enum
|
||||
"""
|
||||
File = auto()
|
||||
OnChain = auto()
|
||||
|
||||
def aggregate(aggregation_snarks:typing.Sequence[str | os.PathLike | pathlib.Path],proof_path:str | os.PathLike | pathlib.Path,vk_path:str | os.PathLike | pathlib.Path,transcript:str,logrows:int,check_mode:str,split_proofs:bool,srs_path:typing.Optional[str | os.PathLike | pathlib.Path],commitment:PyCommitments) -> bool:
|
||||
r"""
|
||||
Creates an aggregated proof
|
||||
|
||||
Arguments
|
||||
---------
|
||||
aggregation_snarks: list[str]
|
||||
List of paths to the various proofs
|
||||
|
||||
proof_path: str
|
||||
Path to output the aggregated proof
|
||||
|
||||
vk_path: str
|
||||
Path to the VK file
|
||||
|
||||
transcript:
|
||||
Proof transcript type to be used. `evm` used by default. `poseidon` is also supported
|
||||
|
||||
logrows:
|
||||
Logrows used for aggregation circuit
|
||||
|
||||
check_mode: str
|
||||
Run sanity checks during calculations. Accepts `safe` or `unsafe`
|
||||
|
||||
split-proofs: bool
|
||||
Whether the accumulated proofs are segments of a larger circuit
|
||||
|
||||
srs_path: str
|
||||
Path to the SRS used
|
||||
|
||||
commitment: str
|
||||
Accepts "kzg" or "ipa"
|
||||
|
||||
Returns
|
||||
-------
|
||||
bool
|
||||
"""
|
||||
...
|
||||
|
||||
def buffer_to_felts(buffer:typing.Sequence[int]) -> list[str]:
|
||||
r"""
|
||||
Converts a buffer to vector of field elements
|
||||
|
||||
Arguments
|
||||
-------
|
||||
buffer: list[int]
|
||||
List of integers representing a buffer
|
||||
|
||||
Returns
|
||||
-------
|
||||
list[str]
|
||||
List of field elements represented as strings
|
||||
"""
|
||||
...
|
||||
|
||||
def calibrate_settings(data:str | os.PathLike | pathlib.Path,model:str | os.PathLike | pathlib.Path,settings:str | os.PathLike | pathlib.Path,target:str,lookup_safety_margin:float,scales:typing.Optional[typing.Sequence[int]],scale_rebase_multiplier:typing.Sequence[int],max_logrows:typing.Optional[int]) -> typing.Any:
|
||||
r"""
|
||||
Calibrates the circuit settings
|
||||
|
||||
Arguments
|
||||
---------
|
||||
data: str
|
||||
Path to the calibration data
|
||||
|
||||
model: str
|
||||
Path to the onnx file
|
||||
|
||||
settings: str
|
||||
Path to the settings file
|
||||
|
||||
lookup_safety_margin: int
|
||||
the lookup safety margin to use for calibration. if the max lookup is 2^k, then the max lookup will be 2^k * lookup_safety_margin. larger = safer but slower
|
||||
|
||||
scales: list[int]
|
||||
Optional scales to specifically try for calibration
|
||||
|
||||
scale_rebase_multiplier: list[int]
|
||||
Optional scale rebase multipliers to specifically try for calibration. This is the multiplier at which we divide to return to the input scale.
|
||||
|
||||
max_logrows: int
|
||||
Optional max logrows to use for calibration
|
||||
|
||||
|
||||
Returns
|
||||
-------
|
||||
bool
|
||||
"""
|
||||
...
|
||||
|
||||
def compile_circuit(model:str | os.PathLike | pathlib.Path,compiled_circuit:str | os.PathLike | pathlib.Path,settings_path:str | os.PathLike | pathlib.Path) -> bool:
|
||||
r"""
|
||||
Compiles the circuit for use in other steps
|
||||
|
||||
Arguments
|
||||
---------
|
||||
model: str
|
||||
Path to the onnx model file
|
||||
|
||||
compiled_circuit: str
|
||||
Path to output the compiled circuit
|
||||
|
||||
settings_path: str
|
||||
Path to the settings files
|
||||
|
||||
Returns
|
||||
-------
|
||||
bool
|
||||
"""
|
||||
...
|
||||
|
||||
def create_evm_data_attestation(input_data:str | os.PathLike | pathlib.Path,settings_path:str | os.PathLike | pathlib.Path,sol_code_path:str | os.PathLike | pathlib.Path,abi_path:str | os.PathLike | pathlib.Path,witness_path:typing.Optional[str | os.PathLike | pathlib.Path]) -> typing.Any:
|
||||
r"""
|
||||
Creates an EVM compatible data attestation verifier, you will need solc installed in your environment to run this
|
||||
|
||||
Arguments
|
||||
---------
|
||||
input_data: str
|
||||
The path to the .json data file, which should contain the necessary calldata and account addresses needed to read from all the on-chain view functions that return the data that the network ingests as inputs
|
||||
|
||||
settings_path: str
|
||||
The path to the settings file
|
||||
|
||||
sol_code_path: str
|
||||
The path to the create the solidity verifier
|
||||
|
||||
abi_path: str
|
||||
The path to create the ABI for the solidity verifier
|
||||
|
||||
Returns
|
||||
-------
|
||||
bool
|
||||
"""
|
||||
...
|
||||
|
||||
def create_evm_verifier(vk_path:str | os.PathLike | pathlib.Path,settings_path:str | os.PathLike | pathlib.Path,sol_code_path:str | os.PathLike | pathlib.Path,abi_path:str | os.PathLike | pathlib.Path,srs_path:typing.Optional[str | os.PathLike | pathlib.Path],reusable:bool) -> typing.Any:
|
||||
r"""
|
||||
Creates an EVM compatible verifier, you will need solc installed in your environment to run this
|
||||
|
||||
Arguments
|
||||
---------
|
||||
vk_path: str
|
||||
The path to the verification key file
|
||||
|
||||
settings_path: str
|
||||
The path to the settings file
|
||||
|
||||
sol_code_path: str
|
||||
The path to the create the solidity verifier
|
||||
|
||||
abi_path: str
|
||||
The path to create the ABI for the solidity verifier
|
||||
|
||||
srs_path: str
|
||||
The path to the SRS file
|
||||
|
||||
reusable: bool
|
||||
Whether the verifier should be rendered as a reusable contract. If so, then you will need to deploy the VK artifact separately which you can generate using the create_evm_vka command
|
||||
|
||||
Returns
|
||||
-------
|
||||
bool
|
||||
"""
|
||||
...
|
||||
|
||||
def create_evm_verifier_aggr(aggregation_settings:typing.Sequence[str | os.PathLike | pathlib.Path],vk_path:str | os.PathLike | pathlib.Path,sol_code_path:str | os.PathLike | pathlib.Path,abi_path:str | os.PathLike | pathlib.Path,logrows:int,srs_path:typing.Optional[str | os.PathLike | pathlib.Path],reusable:bool) -> typing.Any:
|
||||
r"""
|
||||
Creates an evm compatible aggregate verifier, you will need solc installed in your environment to run this
|
||||
|
||||
Arguments
|
||||
---------
|
||||
aggregation_settings: str
|
||||
path to the settings file
|
||||
|
||||
vk_path: str
|
||||
The path to load the desired verification key file
|
||||
|
||||
sol_code_path: str
|
||||
The path to the Solidity code
|
||||
|
||||
abi_path: str
|
||||
The path to output the Solidity verifier ABI
|
||||
|
||||
logrows: int
|
||||
Number of logrows used during aggregated setup
|
||||
|
||||
srs_path: str
|
||||
The path to the SRS file
|
||||
|
||||
reusable: bool
|
||||
Whether the verifier should be rendered as a reusable contract. If so, then you will need to deploy the VK artifact separately which you can generate using the create_evm_vka command
|
||||
|
||||
Returns
|
||||
-------
|
||||
bool
|
||||
"""
|
||||
...
|
||||
|
||||
def create_evm_vka(vk_path:str | os.PathLike | pathlib.Path,settings_path:str | os.PathLike | pathlib.Path,sol_code_path:str | os.PathLike | pathlib.Path,abi_path:str | os.PathLike | pathlib.Path,srs_path:typing.Optional[str | os.PathLike | pathlib.Path]) -> typing.Any:
|
||||
r"""
|
||||
Creates an Evm VK artifact. This command generated a VK with circuit specific meta data encoding in memory for use by the reusable H2 verifier.
|
||||
This is useful for deploying verifier that were otherwise too big to fit on chain and required aggregation.
|
||||
|
||||
Arguments
|
||||
---------
|
||||
vk_path: str
|
||||
The path to the verification key file
|
||||
|
||||
settings_path: str
|
||||
The path to the settings file
|
||||
|
||||
sol_code_path: str
|
||||
The path to the create the solidity verifying key.
|
||||
|
||||
abi_path: str
|
||||
The path to create the ABI for the solidity verifier
|
||||
|
||||
srs_path: str
|
||||
The path to the SRS file
|
||||
|
||||
Returns
|
||||
-------
|
||||
bool
|
||||
"""
|
||||
...
|
||||
|
||||
def deploy_da_evm(addr_path:str | os.PathLike | pathlib.Path,input_data:str | os.PathLike | pathlib.Path,settings_path:str | os.PathLike | pathlib.Path,sol_code_path:str | os.PathLike | pathlib.Path,rpc_url:typing.Optional[str],optimizer_runs:int,private_key:typing.Optional[str]) -> typing.Any:
|
||||
r"""
|
||||
deploys the solidity da verifier
|
||||
"""
|
||||
...
|
||||
|
||||
def deploy_evm(addr_path:str | os.PathLike | pathlib.Path,sol_code_path:str | os.PathLike | pathlib.Path,rpc_url:typing.Optional[str],contract_type:str,optimizer_runs:int,private_key:typing.Optional[str]) -> typing.Any:
|
||||
r"""
|
||||
deploys the solidity verifier
|
||||
"""
|
||||
...
|
||||
|
||||
def encode_evm_calldata(proof:str | os.PathLike | pathlib.Path,calldata:str | os.PathLike | pathlib.Path,addr_vk:typing.Optional[str]) -> list[int]:
|
||||
r"""
|
||||
Creates encoded evm calldata from a proof file
|
||||
|
||||
Arguments
|
||||
---------
|
||||
proof: str
|
||||
Path to the proof file
|
||||
|
||||
calldata: str
|
||||
Path to the calldata file to save
|
||||
|
||||
addr_vk: str
|
||||
The address of the verification key contract (if the verifier key is to be rendered as a separate contract)
|
||||
|
||||
Returns
|
||||
-------
|
||||
vec[u8]
|
||||
The encoded calldata
|
||||
"""
|
||||
...
|
||||
|
||||
def felt_to_big_endian(felt:str) -> str:
|
||||
r"""
|
||||
Converts a field element hex string to big endian
|
||||
|
||||
Arguments
|
||||
-------
|
||||
felt: str
|
||||
The field element represented as a string
|
||||
|
||||
|
||||
Returns
|
||||
-------
|
||||
str
|
||||
field element represented as a string
|
||||
"""
|
||||
...
|
||||
|
||||
def felt_to_float(felt:str,scale:int) -> float:
|
||||
r"""
|
||||
Converts a field element hex string to a floating point number
|
||||
|
||||
Arguments
|
||||
-------
|
||||
felt: str
|
||||
The field element represented as a string
|
||||
|
||||
scale: float
|
||||
The scaling factor used to convert the field element into a floating point representation
|
||||
|
||||
Returns
|
||||
-------
|
||||
float
|
||||
"""
|
||||
...
|
||||
|
||||
def felt_to_int(felt:str) -> int:
|
||||
r"""
|
||||
Converts a field element hex string to an integer
|
||||
|
||||
Arguments
|
||||
-------
|
||||
felt: str
|
||||
The field element represented as a string
|
||||
|
||||
Returns
|
||||
-------
|
||||
int
|
||||
"""
|
||||
...
|
||||
|
||||
def float_to_felt(input:float,scale:int,input_type:PyInputType) -> str:
|
||||
r"""
|
||||
Converts a floating point element to a field element hex string
|
||||
|
||||
Arguments
|
||||
-------
|
||||
input: float
|
||||
The field element represented as a string
|
||||
|
||||
scale: float
|
||||
The scaling factor used to quantize the float into a field element
|
||||
|
||||
input_type: PyInputType
|
||||
The type of the input
|
||||
|
||||
Returns
|
||||
-------
|
||||
str
|
||||
The field element represented as a string
|
||||
"""
|
||||
...
|
||||
|
||||
def gen_settings(model:str | os.PathLike | pathlib.Path,output:str | os.PathLike | pathlib.Path,py_run_args:typing.Optional[PyRunArgs]) -> bool:
|
||||
r"""
|
||||
Generates the circuit settings
|
||||
|
||||
Arguments
|
||||
---------
|
||||
model: str
|
||||
Path to the onnx file
|
||||
|
||||
output: str
|
||||
Path to create the settings file
|
||||
|
||||
py_run_args: PyRunArgs
|
||||
PyRunArgs object to initialize the settings
|
||||
|
||||
Returns
|
||||
-------
|
||||
bool
|
||||
"""
|
||||
...
|
||||
|
||||
def gen_srs(srs_path:str | os.PathLike | pathlib.Path,logrows:int) -> None:
|
||||
r"""
|
||||
Generates the Structured Reference String (SRS), use this only for testing purposes
|
||||
|
||||
Arguments
|
||||
---------
|
||||
srs_path: str
|
||||
Path to the create the SRS file
|
||||
|
||||
logrows: int
|
||||
The number of logrows for the SRS file
|
||||
"""
|
||||
...
|
||||
|
||||
def gen_vk_from_pk_aggr(path_to_pk:str | os.PathLike | pathlib.Path,vk_output_path:str | os.PathLike | pathlib.Path) -> bool:
|
||||
r"""
|
||||
Generates a vk from a pk for an aggregate circuit and saves it to a file
|
||||
|
||||
Arguments
|
||||
-------
|
||||
path_to_pk: str
|
||||
Path to the proving key
|
||||
|
||||
vk_output_path: str
|
||||
Path to create the vk file
|
||||
|
||||
Returns
|
||||
-------
|
||||
bool
|
||||
"""
|
||||
...
|
||||
|
||||
def gen_vk_from_pk_single(path_to_pk:str | os.PathLike | pathlib.Path,circuit_settings_path:str | os.PathLike | pathlib.Path,vk_output_path:str | os.PathLike | pathlib.Path) -> bool:
|
||||
r"""
|
||||
Generates a vk from a pk for a model circuit and saves it to a file
|
||||
|
||||
Arguments
|
||||
-------
|
||||
path_to_pk: str
|
||||
Path to the proving key
|
||||
|
||||
circuit_settings_path: str
|
||||
Path to the witness file
|
||||
|
||||
vk_output_path: str
|
||||
Path to create the vk file
|
||||
|
||||
Returns
|
||||
-------
|
||||
bool
|
||||
"""
|
||||
...
|
||||
|
||||
def gen_witness(data:str | os.PathLike | pathlib.Path,model:str | os.PathLike | pathlib.Path,output:typing.Optional[str | os.PathLike | pathlib.Path],vk_path:typing.Optional[str | os.PathLike | pathlib.Path],srs_path:typing.Optional[str | os.PathLike | pathlib.Path]) -> typing.Any:
|
||||
r"""
|
||||
Runs the forward pass operation to generate a witness
|
||||
|
||||
Arguments
|
||||
---------
|
||||
data: str
|
||||
Path to the data file
|
||||
|
||||
model: str
|
||||
Path to the compiled model file
|
||||
|
||||
output: str
|
||||
Path to create the witness file
|
||||
|
||||
vk_path: str
|
||||
Path to the verification key
|
||||
|
||||
srs_path: str
|
||||
Path to the SRS file
|
||||
|
||||
Returns
|
||||
-------
|
||||
dict
|
||||
Python object containing the witness values
|
||||
"""
|
||||
...
|
||||
|
||||
def get_srs(settings_path:typing.Optional[str | os.PathLike | pathlib.Path],logrows:typing.Optional[int],srs_path:typing.Optional[str | os.PathLike | pathlib.Path],commitment:typing.Optional[PyCommitments]) -> typing.Any:
|
||||
r"""
|
||||
Gets a public srs
|
||||
|
||||
Arguments
|
||||
---------
|
||||
settings_path: str
|
||||
Path to the settings file
|
||||
|
||||
logrows: int
|
||||
The number of logrows for the SRS file
|
||||
|
||||
srs_path: str
|
||||
Path to the create the SRS file
|
||||
|
||||
commitment: str
|
||||
Specify the commitment used ("kzg", "ipa")
|
||||
|
||||
Returns
|
||||
-------
|
||||
bool
|
||||
"""
|
||||
...
|
||||
|
||||
def ipa_commit(message:typing.Sequence[str],vk_path:str | os.PathLike | pathlib.Path,settings_path:str | os.PathLike | pathlib.Path,srs_path:typing.Optional[str | os.PathLike | pathlib.Path]) -> list[PyG1Affine]:
|
||||
r"""
|
||||
Generate an ipa commitment.
|
||||
|
||||
Arguments
|
||||
-------
|
||||
message: list[str]
|
||||
List of field elements represnted as strings
|
||||
|
||||
vk_path: str
|
||||
Path to the verification key
|
||||
|
||||
settings_path: str
|
||||
Path to the settings file
|
||||
|
||||
srs_path: str
|
||||
Path to the Structure Reference String (SRS) file
|
||||
|
||||
Returns
|
||||
-------
|
||||
list[PyG1Affine]
|
||||
"""
|
||||
...
|
||||
|
||||
def kzg_commit(message:typing.Sequence[str],vk_path:str | os.PathLike | pathlib.Path,settings_path:str | os.PathLike | pathlib.Path,srs_path:typing.Optional[str | os.PathLike | pathlib.Path]) -> list[PyG1Affine]:
|
||||
r"""
|
||||
Generate a kzg commitment.
|
||||
|
||||
Arguments
|
||||
-------
|
||||
message: list[str]
|
||||
List of field elements represnted as strings
|
||||
|
||||
vk_path: str
|
||||
Path to the verification key
|
||||
|
||||
settings_path: str
|
||||
Path to the settings file
|
||||
|
||||
srs_path: str
|
||||
Path to the Structure Reference String (SRS) file
|
||||
|
||||
Returns
|
||||
-------
|
||||
list[PyG1Affine]
|
||||
"""
|
||||
...
|
||||
|
||||
def mock(witness:str | os.PathLike | pathlib.Path,model:str | os.PathLike | pathlib.Path) -> bool:
|
||||
r"""
|
||||
Mocks the prover
|
||||
|
||||
Arguments
|
||||
---------
|
||||
witness: str
|
||||
Path to the witness file
|
||||
|
||||
model: str
|
||||
Path to the compiled model file
|
||||
|
||||
Returns
|
||||
-------
|
||||
bool
|
||||
"""
|
||||
...
|
||||
|
||||
def mock_aggregate(aggregation_snarks:typing.Sequence[str | os.PathLike | pathlib.Path],logrows:int,split_proofs:bool) -> bool:
|
||||
r"""
|
||||
Mocks the aggregate prover
|
||||
|
||||
Arguments
|
||||
---------
|
||||
aggregation_snarks: list[str]
|
||||
List of paths to the relevant proof files
|
||||
|
||||
logrows: int
|
||||
Number of logrows to use for the aggregation circuit
|
||||
|
||||
split_proofs: bool
|
||||
Indicates whether the accumulated are segments of a larger proof
|
||||
|
||||
Returns
|
||||
-------
|
||||
bool
|
||||
"""
|
||||
...
|
||||
|
||||
def poseidon_hash(message:typing.Sequence[str]) -> list[str]:
|
||||
r"""
|
||||
Generate a poseidon hash.
|
||||
|
||||
Arguments
|
||||
-------
|
||||
message: list[str]
|
||||
List of field elements represented as strings
|
||||
|
||||
Returns
|
||||
-------
|
||||
list[str]
|
||||
List of field elements represented as strings
|
||||
"""
|
||||
...
|
||||
|
||||
def prove(witness:str | os.PathLike | pathlib.Path,model:str | os.PathLike | pathlib.Path,pk_path:str | os.PathLike | pathlib.Path,proof_path:typing.Optional[str | os.PathLike | pathlib.Path],proof_type:str,srs_path:typing.Optional[str | os.PathLike | pathlib.Path]) -> typing.Any:
|
||||
r"""
|
||||
Runs the prover on a set of inputs
|
||||
|
||||
Arguments
|
||||
---------
|
||||
witness: str
|
||||
Path to the witness file
|
||||
|
||||
model: str
|
||||
Path to the compiled model file
|
||||
|
||||
pk_path: str
|
||||
Path to the proving key file
|
||||
|
||||
proof_path: str
|
||||
Path to create the proof file
|
||||
|
||||
proof_type: str
|
||||
Accepts `single`, `for-aggr`
|
||||
|
||||
srs_path: str
|
||||
Path to the SRS file
|
||||
|
||||
Returns
|
||||
-------
|
||||
bool
|
||||
"""
|
||||
...
|
||||
|
||||
def setup(model:str | os.PathLike | pathlib.Path,vk_path:str | os.PathLike | pathlib.Path,pk_path:str | os.PathLike | pathlib.Path,srs_path:typing.Optional[str | os.PathLike | pathlib.Path],witness_path:typing.Optional[str | os.PathLike | pathlib.Path],disable_selector_compression:bool) -> bool:
|
||||
r"""
|
||||
Runs the setup process
|
||||
|
||||
Arguments
|
||||
---------
|
||||
model: str
|
||||
Path to the compiled model file
|
||||
|
||||
vk_path: str
|
||||
Path to create the verification key file
|
||||
|
||||
pk_path: str
|
||||
Path to create the proving key file
|
||||
|
||||
srs_path: str
|
||||
Path to the SRS file
|
||||
|
||||
witness_path: str
|
||||
Path to the witness file
|
||||
|
||||
disable_selector_compression: bool
|
||||
Whether to compress the selectors or not
|
||||
|
||||
Returns
|
||||
-------
|
||||
bool
|
||||
"""
|
||||
...
|
||||
|
||||
def setup_aggregate(sample_snarks:typing.Sequence[str | os.PathLike | pathlib.Path],vk_path:str | os.PathLike | pathlib.Path,pk_path:str | os.PathLike | pathlib.Path,logrows:int,split_proofs:bool,srs_path:typing.Optional[str | os.PathLike | pathlib.Path],disable_selector_compression:bool,commitment:PyCommitments) -> bool:
|
||||
r"""
|
||||
Runs the setup process for an aggregate setup
|
||||
|
||||
Arguments
|
||||
---------
|
||||
sample_snarks: list[str]
|
||||
List of paths to the various proofs
|
||||
|
||||
vk_path: str
|
||||
Path to create the aggregated VK
|
||||
|
||||
pk_path: str
|
||||
Path to create the aggregated PK
|
||||
|
||||
logrows: int
|
||||
Number of logrows to use
|
||||
|
||||
split_proofs: bool
|
||||
Whether the accumulated are segments of a larger proof
|
||||
|
||||
srs_path: str
|
||||
Path to the SRS file
|
||||
|
||||
disable_selector_compression: bool
|
||||
Whether to compress selectors
|
||||
|
||||
commitment: str
|
||||
Accepts `kzg`, `ipa`
|
||||
|
||||
Returns
|
||||
-------
|
||||
bool
|
||||
"""
|
||||
...
|
||||
|
||||
def setup_test_evm_witness(data_path:str | os.PathLike | pathlib.Path,compiled_circuit_path:str | os.PathLike | pathlib.Path,test_data:str | os.PathLike | pathlib.Path,input_source:PyTestDataSource,output_source:PyTestDataSource,rpc_url:typing.Optional[str]) -> typing.Any:
|
||||
r"""
|
||||
Setup test evm witness
|
||||
|
||||
Arguments
|
||||
---------
|
||||
data_path: str
|
||||
The path to the .json data file, which should include both the network input (possibly private) and the network output (public input to the proof)
|
||||
|
||||
compiled_circuit_path: str
|
||||
The path to the compiled model file (generated using the compile-circuit command)
|
||||
|
||||
test_data: str
|
||||
For testing purposes only. The optional path to the .json data file that will be generated that contains the OnChain data storage information derived from the file information in the data .json file. Should include both the network input (possibly private) and the network output (public input to the proof)
|
||||
|
||||
input_sources: str
|
||||
Where the input data comes from
|
||||
|
||||
output_source: str
|
||||
Where the output data comes from
|
||||
|
||||
rpc_url: str
|
||||
RPC URL for an EVM compatible node, if None, uses Anvil as a local RPC node
|
||||
|
||||
Returns
|
||||
-------
|
||||
bool
|
||||
"""
|
||||
...
|
||||
|
||||
def swap_proof_commitments(proof_path:str | os.PathLike | pathlib.Path,witness_path:str | os.PathLike | pathlib.Path) -> None:
|
||||
r"""
|
||||
Swap the commitments in a proof
|
||||
|
||||
Arguments
|
||||
-------
|
||||
proof_path: str
|
||||
Path to the proof file
|
||||
|
||||
witness_path: str
|
||||
Path to the witness file
|
||||
"""
|
||||
...
|
||||
|
||||
def table(model:str | os.PathLike | pathlib.Path,py_run_args:typing.Optional[PyRunArgs]) -> str:
|
||||
r"""
|
||||
Displays the table as a string in python
|
||||
|
||||
Arguments
|
||||
---------
|
||||
model: str
|
||||
Path to the onnx file
|
||||
|
||||
Returns
|
||||
---------
|
||||
str
|
||||
Table of the nodes in the onnx file
|
||||
"""
|
||||
...
|
||||
|
||||
def verify(proof_path:str | os.PathLike | pathlib.Path,settings_path:str | os.PathLike | pathlib.Path,vk_path:str | os.PathLike | pathlib.Path,srs_path:typing.Optional[str | os.PathLike | pathlib.Path],reduced_srs:bool) -> bool:
|
||||
r"""
|
||||
Verifies a given proof
|
||||
|
||||
Arguments
|
||||
---------
|
||||
proof_path: str
|
||||
Path to create the proof file
|
||||
|
||||
settings_path: str
|
||||
Path to the settings file
|
||||
|
||||
vk_path: str
|
||||
Path to the verification key file
|
||||
|
||||
srs_path: str
|
||||
Path to the SRS file
|
||||
|
||||
non_reduced_srs: bool
|
||||
Whether to reduce the number of SRS logrows to the number of instances rather than the number of logrows used for proofs (only works if the srs were generated in the same ceremony)
|
||||
|
||||
Returns
|
||||
-------
|
||||
bool
|
||||
"""
|
||||
...
|
||||
|
||||
def verify_aggr(proof_path:str | os.PathLike | pathlib.Path,vk_path:str | os.PathLike | pathlib.Path,logrows:int,commitment:PyCommitments,reduced_srs:bool,srs_path:typing.Optional[str | os.PathLike | pathlib.Path]) -> bool:
|
||||
r"""
|
||||
Verifies and aggregate proof
|
||||
|
||||
Arguments
|
||||
---------
|
||||
proof_path: str
|
||||
The path to the proof file
|
||||
|
||||
vk_path: str
|
||||
The path to the verification key file
|
||||
|
||||
logrows: int
|
||||
logrows used for aggregation circuit
|
||||
|
||||
commitment: str
|
||||
Accepts "kzg" or "ipa"
|
||||
|
||||
reduced_srs: bool
|
||||
Whether to reduce the number of SRS logrows to the number of instances rather than the number of logrows used for proofs (only works if the srs were generated in the same ceremony)
|
||||
|
||||
srs_path: str
|
||||
The path to the SRS file
|
||||
|
||||
Returns
|
||||
-------
|
||||
bool
|
||||
"""
|
||||
...
|
||||
|
||||
def verify_evm(addr_verifier:str,proof_path:str | os.PathLike | pathlib.Path,rpc_url:typing.Optional[str],addr_da:typing.Optional[str],addr_vk:typing.Optional[str]) -> typing.Any:
|
||||
r"""
|
||||
verifies an evm compatible proof, you will need solc installed in your environment to run this
|
||||
|
||||
Arguments
|
||||
---------
|
||||
addr_verifier: str
|
||||
The verifier contract's address as a hex string
|
||||
|
||||
proof_path: str
|
||||
The path to the proof file (generated using the prove command)
|
||||
|
||||
rpc_url: str
|
||||
RPC URL for an Ethereum node, if None will use Anvil but WON'T persist state
|
||||
|
||||
addr_da: str
|
||||
does the verifier use data attestation ?
|
||||
|
||||
addr_vk: str
|
||||
The addess of the separate VK contract (if the verifier key is rendered as a separate contract)
|
||||
Returns
|
||||
-------
|
||||
bool
|
||||
"""
|
||||
...
|
||||
|
||||
9
src/bin/py_stub_gen.rs
Normal file
9
src/bin/py_stub_gen.rs
Normal file
@@ -0,0 +1,9 @@
|
||||
use pyo3_stub_gen::Result;
|
||||
|
||||
fn main() -> Result<()> {
|
||||
// `stub_info` is a function defined by `define_stub_info_gatherer!` macro.
|
||||
env_logger::Builder::from_env(env_logger::Env::default().filter_or("RUST_LOG", "info")).init();
|
||||
let stub = ezkl::bindings::python::stub_info()?;
|
||||
stub.generate()?;
|
||||
Ok(())
|
||||
}
|
||||
@@ -4,6 +4,7 @@ use crate::circuit::modules::poseidon::{
|
||||
PoseidonChip,
|
||||
};
|
||||
use crate::circuit::modules::Module;
|
||||
use crate::circuit::InputType;
|
||||
use crate::circuit::{CheckMode, Tolerance};
|
||||
use crate::commands::*;
|
||||
use crate::fieldutils::{felt_to_integer_rep, integer_rep_to_felt, IntegerRep};
|
||||
@@ -26,7 +27,12 @@ use pyo3::exceptions::{PyIOError, PyRuntimeError};
|
||||
use pyo3::prelude::*;
|
||||
use pyo3::wrap_pyfunction;
|
||||
use pyo3_log;
|
||||
use pyo3_stub_gen::{
|
||||
define_stub_info_gatherer, derive::gen_stub_pyclass, derive::gen_stub_pyclass_enum,
|
||||
derive::gen_stub_pyfunction, TypeInfo,
|
||||
};
|
||||
use snark_verifier::util::arithmetic::PrimeField;
|
||||
use std::collections::HashSet;
|
||||
use std::str::FromStr;
|
||||
use std::{fs::File, path::PathBuf};
|
||||
|
||||
@@ -35,6 +41,7 @@ type PyFelt = String;
|
||||
/// pyclass representing an enum
|
||||
#[pyclass]
|
||||
#[derive(Debug, Clone)]
|
||||
#[gen_stub_pyclass_enum]
|
||||
enum PyTestDataSource {
|
||||
/// The data is loaded from a file
|
||||
File,
|
||||
@@ -54,6 +61,7 @@ impl From<PyTestDataSource> for TestDataSource {
|
||||
/// pyclass containing the struct used for G1, this is mostly a helper class
|
||||
#[pyclass]
|
||||
#[derive(Debug, Clone)]
|
||||
#[gen_stub_pyclass]
|
||||
struct PyG1 {
|
||||
#[pyo3(get, set)]
|
||||
/// Field Element representing x
|
||||
@@ -100,6 +108,7 @@ impl pyo3::ToPyObject for PyG1 {
|
||||
/// pyclass containing the struct used for G1
|
||||
#[pyclass]
|
||||
#[derive(Debug, Clone)]
|
||||
#[gen_stub_pyclass]
|
||||
pub struct PyG1Affine {
|
||||
#[pyo3(get, set)]
|
||||
///
|
||||
@@ -145,6 +154,7 @@ impl pyo3::ToPyObject for PyG1Affine {
|
||||
///
|
||||
#[pyclass]
|
||||
#[derive(Clone)]
|
||||
#[gen_stub_pyclass]
|
||||
struct PyRunArgs {
|
||||
#[pyo3(get, set)]
|
||||
/// float: The tolerance for error on model outputs
|
||||
@@ -180,9 +190,6 @@ struct PyRunArgs {
|
||||
/// list[tuple[str, int]]: Hand-written parser for graph variables, eg. batch_size=1
|
||||
pub variables: Vec<(String, usize)>,
|
||||
#[pyo3(get, set)]
|
||||
/// bool: Rebase the scale using lookup table for division instead of using a range check
|
||||
pub div_rebasing: bool,
|
||||
#[pyo3(get, set)]
|
||||
/// bool: Should constants with 0.0 fraction be rebased to scale 0
|
||||
pub rebase_frac_zero_constants: bool,
|
||||
#[pyo3(get, set)]
|
||||
@@ -197,6 +204,9 @@ struct PyRunArgs {
|
||||
/// int: The number of legs used for decomposition
|
||||
#[pyo3(get, set)]
|
||||
pub decomp_legs: usize,
|
||||
/// bool: Should the circuit use unbounded lookups for log
|
||||
#[pyo3(get, set)]
|
||||
pub bounded_log_lookup: bool,
|
||||
}
|
||||
|
||||
/// default instantiation of PyRunArgs
|
||||
@@ -212,6 +222,7 @@ impl PyRunArgs {
|
||||
impl From<PyRunArgs> for RunArgs {
|
||||
fn from(py_run_args: PyRunArgs) -> Self {
|
||||
RunArgs {
|
||||
bounded_log_lookup: py_run_args.bounded_log_lookup,
|
||||
tolerance: Tolerance::from(py_run_args.tolerance),
|
||||
input_scale: py_run_args.input_scale,
|
||||
param_scale: py_run_args.param_scale,
|
||||
@@ -223,7 +234,6 @@ impl From<PyRunArgs> for RunArgs {
|
||||
output_visibility: py_run_args.output_visibility,
|
||||
param_visibility: py_run_args.param_visibility,
|
||||
variables: py_run_args.variables,
|
||||
div_rebasing: py_run_args.div_rebasing,
|
||||
rebase_frac_zero_constants: py_run_args.rebase_frac_zero_constants,
|
||||
check_mode: py_run_args.check_mode,
|
||||
commitment: Some(py_run_args.commitment.into()),
|
||||
@@ -236,6 +246,7 @@ impl From<PyRunArgs> for RunArgs {
|
||||
impl Into<PyRunArgs> for RunArgs {
|
||||
fn into(self) -> PyRunArgs {
|
||||
PyRunArgs {
|
||||
bounded_log_lookup: self.bounded_log_lookup,
|
||||
tolerance: self.tolerance.val,
|
||||
input_scale: self.input_scale,
|
||||
param_scale: self.param_scale,
|
||||
@@ -247,7 +258,6 @@ impl Into<PyRunArgs> for RunArgs {
|
||||
output_visibility: self.output_visibility,
|
||||
param_visibility: self.param_visibility,
|
||||
variables: self.variables,
|
||||
div_rebasing: self.div_rebasing,
|
||||
rebase_frac_zero_constants: self.rebase_frac_zero_constants,
|
||||
check_mode: self.check_mode,
|
||||
commitment: self.commitment.into(),
|
||||
@@ -259,6 +269,7 @@ impl Into<PyRunArgs> for RunArgs {
|
||||
|
||||
#[pyclass]
|
||||
#[derive(Debug, Clone)]
|
||||
#[gen_stub_pyclass_enum]
|
||||
/// pyclass representing an enum, denoting the type of commitment
|
||||
pub enum PyCommitments {
|
||||
/// KZG commitment
|
||||
@@ -306,6 +317,65 @@ impl FromStr for PyCommitments {
|
||||
}
|
||||
}
|
||||
|
||||
#[pyclass]
|
||||
#[derive(Debug, Clone)]
|
||||
#[gen_stub_pyclass_enum]
|
||||
enum PyInputType {
|
||||
///
|
||||
Bool,
|
||||
///
|
||||
F16,
|
||||
///
|
||||
F32,
|
||||
///
|
||||
F64,
|
||||
///
|
||||
Int,
|
||||
///
|
||||
TDim,
|
||||
}
|
||||
|
||||
impl From<InputType> for PyInputType {
|
||||
fn from(input_type: InputType) -> Self {
|
||||
match input_type {
|
||||
InputType::Bool => PyInputType::Bool,
|
||||
InputType::F16 => PyInputType::F16,
|
||||
InputType::F32 => PyInputType::F32,
|
||||
InputType::F64 => PyInputType::F64,
|
||||
InputType::Int => PyInputType::Int,
|
||||
InputType::TDim => PyInputType::TDim,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<PyInputType> for InputType {
|
||||
fn from(py_input_type: PyInputType) -> Self {
|
||||
match py_input_type {
|
||||
PyInputType::Bool => InputType::Bool,
|
||||
PyInputType::F16 => InputType::F16,
|
||||
PyInputType::F32 => InputType::F32,
|
||||
PyInputType::F64 => InputType::F64,
|
||||
PyInputType::Int => InputType::Int,
|
||||
PyInputType::TDim => InputType::TDim,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl FromStr for PyInputType {
|
||||
type Err = String;
|
||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||
match s.to_lowercase().as_str() {
|
||||
"bool" => Ok(PyInputType::Bool),
|
||||
"f16" => Ok(PyInputType::F16),
|
||||
"f32" => Ok(PyInputType::F32),
|
||||
"f64" => Ok(PyInputType::F64),
|
||||
"int" => Ok(PyInputType::Int),
|
||||
"tdim" => Ok(PyInputType::TDim),
|
||||
_ => Err("Invalid value for InputType".to_string()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Converts a field element hex string to big endian
|
||||
///
|
||||
/// Arguments
|
||||
@@ -322,6 +392,7 @@ impl FromStr for PyCommitments {
|
||||
#[pyfunction(signature = (
|
||||
felt,
|
||||
))]
|
||||
#[gen_stub_pyfunction]
|
||||
fn felt_to_big_endian(felt: PyFelt) -> PyResult<String> {
|
||||
let felt = crate::pfsys::string_to_field::<Fr>(&felt);
|
||||
Ok(format!("{:?}", felt))
|
||||
@@ -341,6 +412,7 @@ fn felt_to_big_endian(felt: PyFelt) -> PyResult<String> {
|
||||
#[pyfunction(signature = (
|
||||
felt,
|
||||
))]
|
||||
#[gen_stub_pyfunction]
|
||||
fn felt_to_int(felt: PyFelt) -> PyResult<IntegerRep> {
|
||||
let felt = crate::pfsys::string_to_field::<Fr>(&felt);
|
||||
let int_rep = felt_to_integer_rep(felt);
|
||||
@@ -365,6 +437,7 @@ fn felt_to_int(felt: PyFelt) -> PyResult<IntegerRep> {
|
||||
felt,
|
||||
scale
|
||||
))]
|
||||
#[gen_stub_pyfunction]
|
||||
fn felt_to_float(felt: PyFelt, scale: crate::Scale) -> PyResult<f64> {
|
||||
let felt = crate::pfsys::string_to_field::<Fr>(&felt);
|
||||
let int_rep = felt_to_integer_rep(felt);
|
||||
@@ -383,6 +456,9 @@ fn felt_to_float(felt: PyFelt, scale: crate::Scale) -> PyResult<f64> {
|
||||
/// scale: float
|
||||
/// The scaling factor used to quantize the float into a field element
|
||||
///
|
||||
/// input_type: PyInputType
|
||||
/// The type of the input
|
||||
///
|
||||
/// Returns
|
||||
/// -------
|
||||
/// str
|
||||
@@ -390,9 +466,12 @@ fn felt_to_float(felt: PyFelt, scale: crate::Scale) -> PyResult<f64> {
|
||||
///
|
||||
#[pyfunction(signature = (
|
||||
input,
|
||||
scale
|
||||
scale,
|
||||
input_type=PyInputType::F64
|
||||
))]
|
||||
fn float_to_felt(input: f64, scale: crate::Scale) -> PyResult<PyFelt> {
|
||||
#[gen_stub_pyfunction]
|
||||
fn float_to_felt(mut input: f64, scale: crate::Scale, input_type: PyInputType) -> PyResult<PyFelt> {
|
||||
InputType::roundtrip(&input_type.into(), &mut input);
|
||||
let int_rep = quantize_float(&input, 0.0, scale)
|
||||
.map_err(|_| PyIOError::new_err("Failed to quantize input"))?;
|
||||
let felt = integer_rep_to_felt(int_rep);
|
||||
@@ -414,6 +493,7 @@ fn float_to_felt(input: f64, scale: crate::Scale) -> PyResult<PyFelt> {
|
||||
#[pyfunction(signature = (
|
||||
buffer
|
||||
))]
|
||||
#[gen_stub_pyfunction]
|
||||
fn buffer_to_felts(buffer: Vec<u8>) -> PyResult<Vec<String>> {
|
||||
fn u8_array_to_u128_le(arr: [u8; 16]) -> u128 {
|
||||
let mut n: u128 = 0;
|
||||
@@ -486,6 +566,7 @@ fn buffer_to_felts(buffer: Vec<u8>) -> PyResult<Vec<String>> {
|
||||
#[pyfunction(signature = (
|
||||
message,
|
||||
))]
|
||||
#[gen_stub_pyfunction]
|
||||
fn poseidon_hash(message: Vec<PyFelt>) -> PyResult<Vec<PyFelt>> {
|
||||
let message: Vec<Fr> = message
|
||||
.iter()
|
||||
@@ -531,6 +612,7 @@ fn poseidon_hash(message: Vec<PyFelt>) -> PyResult<Vec<PyFelt>> {
|
||||
settings_path=PathBuf::from(DEFAULT_SETTINGS),
|
||||
srs_path=None
|
||||
))]
|
||||
#[gen_stub_pyfunction]
|
||||
fn kzg_commit(
|
||||
message: Vec<PyFelt>,
|
||||
vk_path: PathBuf,
|
||||
@@ -589,6 +671,7 @@ fn kzg_commit(
|
||||
settings_path=PathBuf::from(DEFAULT_SETTINGS),
|
||||
srs_path=None
|
||||
))]
|
||||
#[gen_stub_pyfunction]
|
||||
fn ipa_commit(
|
||||
message: Vec<PyFelt>,
|
||||
vk_path: PathBuf,
|
||||
@@ -635,6 +718,7 @@ fn ipa_commit(
|
||||
proof_path=PathBuf::from(DEFAULT_PROOF),
|
||||
witness_path=PathBuf::from(DEFAULT_WITNESS),
|
||||
))]
|
||||
#[gen_stub_pyfunction]
|
||||
fn swap_proof_commitments(proof_path: PathBuf, witness_path: PathBuf) -> PyResult<()> {
|
||||
crate::execute::swap_proof_commitments_cmd(proof_path, witness_path)
|
||||
.map_err(|_| PyIOError::new_err("Failed to swap commitments"))?;
|
||||
@@ -664,6 +748,7 @@ fn swap_proof_commitments(proof_path: PathBuf, witness_path: PathBuf) -> PyResul
|
||||
circuit_settings_path=PathBuf::from(DEFAULT_SETTINGS),
|
||||
vk_output_path=PathBuf::from(DEFAULT_VK),
|
||||
))]
|
||||
#[gen_stub_pyfunction]
|
||||
fn gen_vk_from_pk_single(
|
||||
path_to_pk: PathBuf,
|
||||
circuit_settings_path: PathBuf,
|
||||
@@ -701,6 +786,7 @@ fn gen_vk_from_pk_single(
|
||||
path_to_pk=PathBuf::from(DEFAULT_PK_AGGREGATED),
|
||||
vk_output_path=PathBuf::from(DEFAULT_VK_AGGREGATED),
|
||||
))]
|
||||
#[gen_stub_pyfunction]
|
||||
fn gen_vk_from_pk_aggr(path_to_pk: PathBuf, vk_output_path: PathBuf) -> PyResult<bool> {
|
||||
let pk = load_pk::<KZGCommitmentScheme<Bn256>, AggregationCircuit>(path_to_pk, ())
|
||||
.map_err(|_| PyIOError::new_err("Failed to load pk"))?;
|
||||
@@ -730,6 +816,7 @@ fn gen_vk_from_pk_aggr(path_to_pk: PathBuf, vk_output_path: PathBuf) -> PyResult
|
||||
model = PathBuf::from(DEFAULT_MODEL),
|
||||
py_run_args = None
|
||||
))]
|
||||
#[gen_stub_pyfunction]
|
||||
fn table(model: PathBuf, py_run_args: Option<PyRunArgs>) -> PyResult<String> {
|
||||
let run_args: RunArgs = py_run_args.unwrap_or_else(PyRunArgs::new).into();
|
||||
let mut reader = File::open(model).map_err(|_| PyIOError::new_err("Failed to open model"))?;
|
||||
@@ -755,6 +842,7 @@ fn table(model: PathBuf, py_run_args: Option<PyRunArgs>) -> PyResult<String> {
|
||||
srs_path,
|
||||
logrows,
|
||||
))]
|
||||
#[gen_stub_pyfunction]
|
||||
fn gen_srs(srs_path: PathBuf, logrows: usize) -> PyResult<()> {
|
||||
let params = ezkl_gen_srs::<KZGCommitmentScheme<Bn256>>(logrows as u32);
|
||||
save_params::<KZGCommitmentScheme<Bn256>>(&srs_path, ¶ms)?;
|
||||
@@ -787,6 +875,7 @@ fn gen_srs(srs_path: PathBuf, logrows: usize) -> PyResult<()> {
|
||||
srs_path=None,
|
||||
commitment=None,
|
||||
))]
|
||||
#[gen_stub_pyfunction]
|
||||
fn get_srs(
|
||||
py: Python,
|
||||
settings_path: Option<PathBuf>,
|
||||
@@ -799,7 +888,7 @@ fn get_srs(
|
||||
None => None,
|
||||
};
|
||||
|
||||
pyo3_asyncio::tokio::future_into_py(py, async move {
|
||||
pyo3_async_runtimes::tokio::future_into_py(py, async move {
|
||||
crate::execute::get_srs_cmd(srs_path, settings_path, logrows, commitment)
|
||||
.await
|
||||
.map_err(|e| {
|
||||
@@ -833,6 +922,7 @@ fn get_srs(
|
||||
output=PathBuf::from(DEFAULT_SETTINGS),
|
||||
py_run_args = None,
|
||||
))]
|
||||
#[gen_stub_pyfunction]
|
||||
fn gen_settings(
|
||||
model: PathBuf,
|
||||
output: PathBuf,
|
||||
@@ -873,8 +963,6 @@ fn gen_settings(
|
||||
/// max_logrows: int
|
||||
/// Optional max logrows to use for calibration
|
||||
///
|
||||
/// only_range_check_rebase: bool
|
||||
/// Check ranges when rebasing
|
||||
///
|
||||
/// Returns
|
||||
/// -------
|
||||
@@ -889,8 +977,8 @@ fn gen_settings(
|
||||
scales = None,
|
||||
scale_rebase_multiplier = DEFAULT_SCALE_REBASE_MULTIPLIERS.split(",").map(|x| x.parse().unwrap()).collect(),
|
||||
max_logrows = None,
|
||||
only_range_check_rebase = DEFAULT_ONLY_RANGE_CHECK_REBASE.parse().unwrap(),
|
||||
))]
|
||||
#[gen_stub_pyfunction]
|
||||
fn calibrate_settings(
|
||||
py: Python,
|
||||
data: PathBuf,
|
||||
@@ -901,9 +989,8 @@ fn calibrate_settings(
|
||||
scales: Option<Vec<crate::Scale>>,
|
||||
scale_rebase_multiplier: Vec<u32>,
|
||||
max_logrows: Option<u32>,
|
||||
only_range_check_rebase: bool,
|
||||
) -> PyResult<Bound<'_, PyAny>> {
|
||||
pyo3_asyncio::tokio::future_into_py(py, async move {
|
||||
pyo3_async_runtimes::tokio::future_into_py(py, async move {
|
||||
crate::execute::calibrate(
|
||||
model,
|
||||
data,
|
||||
@@ -912,7 +999,6 @@ fn calibrate_settings(
|
||||
lookup_safety_margin,
|
||||
scales,
|
||||
scale_rebase_multiplier,
|
||||
only_range_check_rebase,
|
||||
max_logrows,
|
||||
)
|
||||
.await
|
||||
@@ -956,6 +1042,7 @@ fn calibrate_settings(
|
||||
vk_path=None,
|
||||
srs_path=None,
|
||||
))]
|
||||
#[gen_stub_pyfunction]
|
||||
fn gen_witness(
|
||||
py: Python,
|
||||
data: PathBuf,
|
||||
@@ -964,7 +1051,7 @@ fn gen_witness(
|
||||
vk_path: Option<PathBuf>,
|
||||
srs_path: Option<PathBuf>,
|
||||
) -> PyResult<Bound<'_, PyAny>> {
|
||||
pyo3_asyncio::tokio::future_into_py(py, async move {
|
||||
pyo3_async_runtimes::tokio::future_into_py(py, async move {
|
||||
let output = crate::execute::gen_witness(model, data, output, vk_path, srs_path)
|
||||
.await
|
||||
.map_err(|e| {
|
||||
@@ -993,6 +1080,7 @@ fn gen_witness(
|
||||
witness=PathBuf::from(DEFAULT_WITNESS),
|
||||
model=PathBuf::from(DEFAULT_COMPILED_CIRCUIT),
|
||||
))]
|
||||
#[gen_stub_pyfunction]
|
||||
fn mock(witness: PathBuf, model: PathBuf) -> PyResult<bool> {
|
||||
crate::execute::mock(model, witness).map_err(|e| {
|
||||
let err_str = format!("Failed to run mock: {}", e);
|
||||
@@ -1023,6 +1111,7 @@ fn mock(witness: PathBuf, model: PathBuf) -> PyResult<bool> {
|
||||
logrows=DEFAULT_AGGREGATED_LOGROWS.parse().unwrap(),
|
||||
split_proofs = false,
|
||||
))]
|
||||
#[gen_stub_pyfunction]
|
||||
fn mock_aggregate(
|
||||
aggregation_snarks: Vec<PathBuf>,
|
||||
logrows: u32,
|
||||
@@ -1070,6 +1159,7 @@ fn mock_aggregate(
|
||||
witness_path = None,
|
||||
disable_selector_compression=DEFAULT_DISABLE_SELECTOR_COMPRESSION.parse().unwrap(),
|
||||
))]
|
||||
#[gen_stub_pyfunction]
|
||||
fn setup(
|
||||
model: PathBuf,
|
||||
vk_path: PathBuf,
|
||||
@@ -1128,6 +1218,7 @@ fn setup(
|
||||
proof_type=ProofType::default(),
|
||||
srs_path=None,
|
||||
))]
|
||||
#[gen_stub_pyfunction]
|
||||
fn prove(
|
||||
witness: PathBuf,
|
||||
model: PathBuf,
|
||||
@@ -1183,6 +1274,7 @@ fn prove(
|
||||
srs_path=None,
|
||||
reduced_srs=DEFAULT_USE_REDUCED_SRS_FOR_VERIFICATION.parse::<bool>().unwrap(),
|
||||
))]
|
||||
#[gen_stub_pyfunction]
|
||||
fn verify(
|
||||
proof_path: PathBuf,
|
||||
settings_path: PathBuf,
|
||||
@@ -1242,6 +1334,7 @@ fn verify(
|
||||
disable_selector_compression=DEFAULT_DISABLE_SELECTOR_COMPRESSION.parse().unwrap(),
|
||||
commitment=DEFAULT_COMMITMENT.parse().unwrap(),
|
||||
))]
|
||||
#[gen_stub_pyfunction]
|
||||
fn setup_aggregate(
|
||||
sample_snarks: Vec<PathBuf>,
|
||||
vk_path: PathBuf,
|
||||
@@ -1292,6 +1385,7 @@ fn setup_aggregate(
|
||||
compiled_circuit=PathBuf::from(DEFAULT_COMPILED_CIRCUIT),
|
||||
settings_path=PathBuf::from(DEFAULT_SETTINGS),
|
||||
))]
|
||||
#[gen_stub_pyfunction]
|
||||
fn compile_circuit(
|
||||
model: PathBuf,
|
||||
compiled_circuit: PathBuf,
|
||||
@@ -1351,6 +1445,7 @@ fn compile_circuit(
|
||||
srs_path=None,
|
||||
commitment=DEFAULT_COMMITMENT.parse().unwrap(),
|
||||
))]
|
||||
#[gen_stub_pyfunction]
|
||||
fn aggregate(
|
||||
aggregation_snarks: Vec<PathBuf>,
|
||||
proof_path: PathBuf,
|
||||
@@ -1416,6 +1511,7 @@ fn aggregate(
|
||||
reduced_srs=DEFAULT_USE_REDUCED_SRS_FOR_VERIFICATION.parse().unwrap(),
|
||||
srs_path=None,
|
||||
))]
|
||||
#[gen_stub_pyfunction]
|
||||
fn verify_aggr(
|
||||
proof_path: PathBuf,
|
||||
vk_path: PathBuf,
|
||||
@@ -1463,6 +1559,7 @@ fn verify_aggr(
|
||||
calldata=PathBuf::from(DEFAULT_CALLDATA),
|
||||
addr_vk=None,
|
||||
))]
|
||||
#[gen_stub_pyfunction]
|
||||
fn encode_evm_calldata<'a>(
|
||||
proof: PathBuf,
|
||||
calldata: PathBuf,
|
||||
@@ -1515,6 +1612,7 @@ fn encode_evm_calldata<'a>(
|
||||
srs_path=None,
|
||||
reusable = DEFAULT_RENDER_REUSABLE.parse().unwrap(),
|
||||
))]
|
||||
#[gen_stub_pyfunction]
|
||||
fn create_evm_verifier(
|
||||
py: Python,
|
||||
vk_path: PathBuf,
|
||||
@@ -1524,7 +1622,7 @@ fn create_evm_verifier(
|
||||
srs_path: Option<PathBuf>,
|
||||
reusable: bool,
|
||||
) -> PyResult<Bound<'_, PyAny>> {
|
||||
pyo3_asyncio::tokio::future_into_py(py, async move {
|
||||
pyo3_async_runtimes::tokio::future_into_py(py, async move {
|
||||
crate::execute::create_evm_verifier(
|
||||
vk_path,
|
||||
srs_path,
|
||||
@@ -1574,6 +1672,7 @@ fn create_evm_verifier(
|
||||
abi_path=PathBuf::from(DEFAULT_VERIFIER_ABI),
|
||||
srs_path=None
|
||||
))]
|
||||
#[gen_stub_pyfunction]
|
||||
fn create_evm_vka(
|
||||
py: Python,
|
||||
vk_path: PathBuf,
|
||||
@@ -1582,7 +1681,7 @@ fn create_evm_vka(
|
||||
abi_path: PathBuf,
|
||||
srs_path: Option<PathBuf>,
|
||||
) -> PyResult<Bound<'_, PyAny>> {
|
||||
pyo3_asyncio::tokio::future_into_py(py, async move {
|
||||
pyo3_async_runtimes::tokio::future_into_py(py, async move {
|
||||
crate::execute::create_evm_vka(vk_path, srs_path, settings_path, sol_code_path, abi_path)
|
||||
.await
|
||||
.map_err(|e| {
|
||||
@@ -1621,6 +1720,7 @@ fn create_evm_vka(
|
||||
abi_path=PathBuf::from(DEFAULT_VERIFIER_DA_ABI),
|
||||
witness_path=None,
|
||||
))]
|
||||
#[gen_stub_pyfunction]
|
||||
fn create_evm_data_attestation(
|
||||
py: Python,
|
||||
input_data: PathBuf,
|
||||
@@ -1629,7 +1729,7 @@ fn create_evm_data_attestation(
|
||||
abi_path: PathBuf,
|
||||
witness_path: Option<PathBuf>,
|
||||
) -> PyResult<Bound<'_, PyAny>> {
|
||||
pyo3_asyncio::tokio::future_into_py(py, async move {
|
||||
pyo3_async_runtimes::tokio::future_into_py(py, async move {
|
||||
crate::execute::create_evm_data_attestation(
|
||||
settings_path,
|
||||
sol_code_path,
|
||||
@@ -1681,6 +1781,7 @@ fn create_evm_data_attestation(
|
||||
output_source,
|
||||
rpc_url=None,
|
||||
))]
|
||||
#[gen_stub_pyfunction]
|
||||
fn setup_test_evm_witness(
|
||||
py: Python,
|
||||
data_path: PathBuf,
|
||||
@@ -1690,7 +1791,7 @@ fn setup_test_evm_witness(
|
||||
output_source: PyTestDataSource,
|
||||
rpc_url: Option<String>,
|
||||
) -> PyResult<Bound<'_, PyAny>> {
|
||||
pyo3_asyncio::tokio::future_into_py(py, async move {
|
||||
pyo3_async_runtimes::tokio::future_into_py(py, async move {
|
||||
crate::execute::setup_test_evm_witness(
|
||||
data_path,
|
||||
compiled_circuit_path,
|
||||
@@ -1718,6 +1819,7 @@ fn setup_test_evm_witness(
|
||||
optimizer_runs=DEFAULT_OPTIMIZER_RUNS.parse().unwrap(),
|
||||
private_key=None,
|
||||
))]
|
||||
#[gen_stub_pyfunction]
|
||||
fn deploy_evm(
|
||||
py: Python,
|
||||
addr_path: PathBuf,
|
||||
@@ -1727,7 +1829,7 @@ fn deploy_evm(
|
||||
optimizer_runs: usize,
|
||||
private_key: Option<String>,
|
||||
) -> PyResult<Bound<'_, PyAny>> {
|
||||
pyo3_asyncio::tokio::future_into_py(py, async move {
|
||||
pyo3_async_runtimes::tokio::future_into_py(py, async move {
|
||||
crate::execute::deploy_evm(
|
||||
sol_code_path,
|
||||
rpc_url,
|
||||
@@ -1756,6 +1858,7 @@ fn deploy_evm(
|
||||
optimizer_runs=DEFAULT_OPTIMIZER_RUNS.parse().unwrap(),
|
||||
private_key=None
|
||||
))]
|
||||
#[gen_stub_pyfunction]
|
||||
fn deploy_da_evm(
|
||||
py: Python,
|
||||
addr_path: PathBuf,
|
||||
@@ -1766,7 +1869,7 @@ fn deploy_da_evm(
|
||||
optimizer_runs: usize,
|
||||
private_key: Option<String>,
|
||||
) -> PyResult<Bound<'_, PyAny>> {
|
||||
pyo3_asyncio::tokio::future_into_py(py, async move {
|
||||
pyo3_async_runtimes::tokio::future_into_py(py, async move {
|
||||
crate::execute::deploy_da_evm(
|
||||
input_data,
|
||||
settings_path,
|
||||
@@ -1814,6 +1917,7 @@ fn deploy_da_evm(
|
||||
addr_da = None,
|
||||
addr_vk = None,
|
||||
))]
|
||||
#[gen_stub_pyfunction]
|
||||
fn verify_evm<'a>(
|
||||
py: Python<'a>,
|
||||
addr_verifier: &'a str,
|
||||
@@ -1836,7 +1940,7 @@ fn verify_evm<'a>(
|
||||
None
|
||||
};
|
||||
|
||||
pyo3_asyncio::tokio::future_into_py(py, async move {
|
||||
pyo3_async_runtimes::tokio::future_into_py(py, async move {
|
||||
crate::execute::verify_evm(proof_path, addr_verifier, rpc_url, addr_da, addr_vk)
|
||||
.await
|
||||
.map_err(|e| {
|
||||
@@ -1886,6 +1990,7 @@ fn verify_evm<'a>(
|
||||
srs_path=None,
|
||||
reusable = DEFAULT_RENDER_REUSABLE.parse().unwrap(),
|
||||
))]
|
||||
#[gen_stub_pyfunction]
|
||||
fn create_evm_verifier_aggr(
|
||||
py: Python,
|
||||
aggregation_settings: Vec<PathBuf>,
|
||||
@@ -1896,7 +2001,7 @@ fn create_evm_verifier_aggr(
|
||||
srs_path: Option<PathBuf>,
|
||||
reusable: bool,
|
||||
) -> PyResult<Bound<'_, PyAny>> {
|
||||
pyo3_asyncio::tokio::future_into_py(py, async move {
|
||||
pyo3_async_runtimes::tokio::future_into_py(py, async move {
|
||||
crate::execute::create_evm_aggregate_verifier(
|
||||
vk_path,
|
||||
srs_path,
|
||||
@@ -1916,15 +2021,19 @@ fn create_evm_verifier_aggr(
|
||||
})
|
||||
}
|
||||
|
||||
// Define a function to gather stub information.
|
||||
define_stub_info_gatherer!(stub_info);
|
||||
|
||||
// Python Module
|
||||
#[pymodule]
|
||||
fn ezkl(_py: Python<'_>, m: &PyModule) -> PyResult<()> {
|
||||
fn ezkl(m: &Bound<'_, PyModule>) -> PyResult<()> {
|
||||
pyo3_log::init();
|
||||
m.add_class::<PyRunArgs>()?;
|
||||
m.add_class::<PyG1Affine>()?;
|
||||
m.add_class::<PyG1>()?;
|
||||
m.add_class::<PyTestDataSource>()?;
|
||||
m.add_class::<PyCommitments>()?;
|
||||
m.add_class::<PyInputType>()?;
|
||||
m.add("__version__", env!("CARGO_PKG_VERSION"))?;
|
||||
m.add_function(wrap_pyfunction!(felt_to_big_endian, m)?)?;
|
||||
m.add_function(wrap_pyfunction!(felt_to_int, m)?)?;
|
||||
@@ -1963,3 +2072,48 @@ fn ezkl(_py: Python<'_>, m: &PyModule) -> PyResult<()> {
|
||||
m.add_function(wrap_pyfunction!(encode_evm_calldata, m)?)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
impl pyo3_stub_gen::PyStubType for CalibrationTarget {
|
||||
fn type_output() -> TypeInfo {
|
||||
TypeInfo {
|
||||
name: "str".to_string(),
|
||||
import: HashSet::new(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl pyo3_stub_gen::PyStubType for ProofType {
|
||||
fn type_output() -> TypeInfo {
|
||||
TypeInfo {
|
||||
name: "str".to_string(),
|
||||
import: HashSet::new(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl pyo3_stub_gen::PyStubType for TranscriptType {
|
||||
fn type_output() -> TypeInfo {
|
||||
TypeInfo {
|
||||
name: "str".to_string(),
|
||||
import: HashSet::new(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl pyo3_stub_gen::PyStubType for CheckMode {
|
||||
fn type_output() -> TypeInfo {
|
||||
TypeInfo {
|
||||
name: "str".to_string(),
|
||||
import: HashSet::new(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl pyo3_stub_gen::PyStubType for ContractType {
|
||||
fn type_output() -> TypeInfo {
|
||||
TypeInfo {
|
||||
name: "str".to_string(),
|
||||
import: HashSet::new(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -141,10 +141,11 @@ pub(crate) fn gen_vk(
|
||||
.map_err(|e| EZKLError::InternalError(format!("Failed to create verifying key: {}", e)))?;
|
||||
|
||||
let mut serialized_vk = Vec::new();
|
||||
vk.write(&mut serialized_vk, halo2_proofs::SerdeFormat::RawBytes)
|
||||
.map_err(|e| {
|
||||
EZKLError::InternalError(format!("Failed to serialize verifying key: {}", e))
|
||||
})?;
|
||||
vk.write(
|
||||
&mut serialized_vk,
|
||||
halo2_proofs::SerdeFormat::RawBytesUnchecked,
|
||||
)
|
||||
.map_err(|e| EZKLError::InternalError(format!("Failed to serialize verifying key: {}", e)))?;
|
||||
|
||||
Ok(serialized_vk)
|
||||
}
|
||||
@@ -165,7 +166,7 @@ pub(crate) fn gen_pk(
|
||||
let mut reader = BufReader::new(&vk[..]);
|
||||
let vk = VerifyingKey::<G1Affine>::read::<_, GraphCircuit>(
|
||||
&mut reader,
|
||||
halo2_proofs::SerdeFormat::RawBytes,
|
||||
halo2_proofs::SerdeFormat::RawBytesUnchecked,
|
||||
circuit.settings().clone(),
|
||||
)
|
||||
.map_err(|e| EZKLError::InternalError(format!("Failed to deserialize verifying key: {}", e)))?;
|
||||
@@ -197,7 +198,7 @@ pub(crate) fn verify(
|
||||
let mut reader = BufReader::new(&vk[..]);
|
||||
let vk = VerifyingKey::<G1Affine>::read::<_, GraphCircuit>(
|
||||
&mut reader,
|
||||
halo2_proofs::SerdeFormat::RawBytes,
|
||||
halo2_proofs::SerdeFormat::RawBytesUnchecked,
|
||||
circuit_settings.clone(),
|
||||
)
|
||||
.map_err(|e| EZKLError::InternalError(format!("Failed to deserialize vk: {}", e)))?;
|
||||
@@ -277,7 +278,7 @@ pub(crate) fn verify_aggr(
|
||||
let mut reader = BufReader::new(&vk[..]);
|
||||
let vk = VerifyingKey::<G1Affine>::read::<_, AggregationCircuit>(
|
||||
&mut reader,
|
||||
halo2_proofs::SerdeFormat::RawBytes,
|
||||
halo2_proofs::SerdeFormat::RawBytesUnchecked,
|
||||
(),
|
||||
)
|
||||
.map_err(|e| EZKLError::InternalError(format!("Failed to deserialize vk: {}", e)))?;
|
||||
@@ -365,7 +366,7 @@ pub(crate) fn prove(
|
||||
let mut reader = BufReader::new(&pk[..]);
|
||||
let pk = ProvingKey::<G1Affine>::read::<_, GraphCircuit>(
|
||||
&mut reader,
|
||||
halo2_proofs::SerdeFormat::RawBytes,
|
||||
halo2_proofs::SerdeFormat::RawBytesUnchecked,
|
||||
circuit.settings().clone(),
|
||||
)
|
||||
.map_err(|e| EZKLError::InternalError(format!("Failed to deserialize proving key: {}", e)))?;
|
||||
@@ -487,7 +488,7 @@ pub(crate) fn vk_validation(vk: Vec<u8>, settings: Vec<u8>) -> Result<bool, EZKL
|
||||
let mut reader = BufReader::new(&vk[..]);
|
||||
let _ = VerifyingKey::<G1Affine>::read::<_, GraphCircuit>(
|
||||
&mut reader,
|
||||
halo2_proofs::SerdeFormat::RawBytes,
|
||||
halo2_proofs::SerdeFormat::RawBytesUnchecked,
|
||||
circuit_settings,
|
||||
)
|
||||
.map_err(|e| EZKLError::InternalError(format!("Failed to deserialize verifying key: {}", e)))?;
|
||||
@@ -504,7 +505,7 @@ pub(crate) fn pk_validation(pk: Vec<u8>, settings: Vec<u8>) -> Result<bool, EZKL
|
||||
let mut reader = BufReader::new(&pk[..]);
|
||||
let _ = ProvingKey::<G1Affine>::read::<_, GraphCircuit>(
|
||||
&mut reader,
|
||||
halo2_proofs::SerdeFormat::RawBytes,
|
||||
halo2_proofs::SerdeFormat::RawBytesUnchecked,
|
||||
circuit_settings,
|
||||
)
|
||||
.map_err(|e| EZKLError::InternalError(format!("Failed to deserialize proving key: {}", e)))?;
|
||||
|
||||
@@ -22,6 +22,7 @@ use halo2curves::{
|
||||
bn256::{Bn256, Fr, G1Affine},
|
||||
ff::PrimeField,
|
||||
};
|
||||
use std::str::FromStr;
|
||||
use wasm_bindgen::prelude::*;
|
||||
use wasm_bindgen_console_logger::DEFAULT_LOGGER;
|
||||
|
||||
@@ -113,9 +114,15 @@ pub fn feltToFloat(
|
||||
#[wasm_bindgen]
|
||||
#[allow(non_snake_case)]
|
||||
pub fn floatToFelt(
|
||||
input: f64,
|
||||
mut input: f64,
|
||||
scale: crate::Scale,
|
||||
input_type: &str,
|
||||
) -> Result<wasm_bindgen::Clamped<Vec<u8>>, JsError> {
|
||||
crate::circuit::InputType::roundtrip(
|
||||
&crate::circuit::InputType::from_str(input_type)
|
||||
.map_err(|e| JsError::new(&format!("{}", e)))?,
|
||||
&mut input,
|
||||
);
|
||||
let int_rep =
|
||||
quantize_float(&input, 0.0, scale).map_err(|e| JsError::new(&format!("{}", e)))?;
|
||||
let felt = integer_rep_to_felt(int_rep);
|
||||
|
||||
@@ -8,10 +8,9 @@ use halo2_proofs::{
|
||||
use log::debug;
|
||||
#[cfg(feature = "python-bindings")]
|
||||
use pyo3::{
|
||||
conversion::{FromPyObject, PyTryFrom},
|
||||
conversion::{FromPyObject, IntoPy},
|
||||
exceptions::PyValueError,
|
||||
prelude::*,
|
||||
types::PyString,
|
||||
};
|
||||
use serde::{Deserialize, Serialize};
|
||||
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
|
||||
@@ -139,10 +138,9 @@ impl IntoPy<PyObject> for CheckMode {
|
||||
#[cfg(feature = "python-bindings")]
|
||||
/// Obtains CheckMode from PyObject (Required for CheckMode to be compatible with Python)
|
||||
impl<'source> FromPyObject<'source> for CheckMode {
|
||||
fn extract(ob: &'source PyAny) -> PyResult<Self> {
|
||||
let trystr = <PyString as PyTryFrom>::try_from(ob)?;
|
||||
let strval = trystr.to_string();
|
||||
match strval.to_lowercase().as_str() {
|
||||
fn extract_bound(ob: &pyo3::Bound<'source, pyo3::PyAny>) -> PyResult<Self> {
|
||||
let trystr = String::extract_bound(ob)?;
|
||||
match trystr.to_lowercase().as_str() {
|
||||
"safe" => Ok(CheckMode::SAFE),
|
||||
"unsafe" => Ok(CheckMode::UNSAFE),
|
||||
_ => Err(PyValueError::new_err("Invalid value for CheckMode")),
|
||||
@@ -161,8 +159,8 @@ impl IntoPy<PyObject> for Tolerance {
|
||||
#[cfg(feature = "python-bindings")]
|
||||
/// Obtains Tolerance from PyObject (Required for Tolerance to be compatible with Python)
|
||||
impl<'source> FromPyObject<'source> for Tolerance {
|
||||
fn extract(ob: &'source PyAny) -> PyResult<Self> {
|
||||
if let Ok((val, scale)) = ob.extract::<(f32, f32)>() {
|
||||
fn extract_bound(ob: &pyo3::Bound<'source, pyo3::PyAny>) -> PyResult<Self> {
|
||||
if let Ok((val, scale)) = <(f32, f32)>::extract_bound(ob) {
|
||||
Ok(Tolerance {
|
||||
val,
|
||||
scale: utils::F32(scale),
|
||||
|
||||
@@ -97,4 +97,7 @@ pub enum CircuitError {
|
||||
/// Invalid scale
|
||||
#[error("negative scale for an op that requires positive inputs {0}")]
|
||||
NegativeScale(String),
|
||||
#[error("invalid input type {0}")]
|
||||
/// Invalid input type
|
||||
InvalidInputType(String),
|
||||
}
|
||||
|
||||
@@ -13,6 +13,16 @@ use serde::{Deserialize, Serialize};
|
||||
/// An enum representing the operations that consist of both lookups and arithmetic operations.
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
pub enum HybridOp {
|
||||
Ln {
|
||||
scale: utils::F32,
|
||||
},
|
||||
Rsqrt {
|
||||
input_scale: utils::F32,
|
||||
output_scale: utils::F32,
|
||||
},
|
||||
Sqrt {
|
||||
scale: utils::F32,
|
||||
},
|
||||
RoundHalfToEven {
|
||||
scale: utils::F32,
|
||||
legs: usize,
|
||||
@@ -35,7 +45,6 @@ pub enum HybridOp {
|
||||
},
|
||||
Div {
|
||||
denom: utils::F32,
|
||||
use_range_check_for_int: bool,
|
||||
},
|
||||
ReduceMax {
|
||||
axes: Vec<usize>,
|
||||
@@ -112,6 +121,15 @@ impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> Op<F> for Hybrid
|
||||
|
||||
fn as_string(&self) -> String {
|
||||
match self {
|
||||
HybridOp::Rsqrt {
|
||||
input_scale,
|
||||
output_scale,
|
||||
} => format!(
|
||||
"RSQRT (input_scale={}, output_scale={})",
|
||||
input_scale, output_scale
|
||||
),
|
||||
HybridOp::Sqrt { scale } => format!("SQRT(scale={})", scale),
|
||||
HybridOp::Ln { scale } => format!("LN(scale={})", scale),
|
||||
HybridOp::RoundHalfToEven { scale, legs } => {
|
||||
format!("ROUND_HALF_TO_EVEN(scale={}, legs={})", scale, legs)
|
||||
}
|
||||
@@ -119,8 +137,8 @@ impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> Op<F> for Hybrid
|
||||
HybridOp::Floor { scale, legs } => format!("FLOOR(scale={}, legs={})", scale, legs),
|
||||
HybridOp::Round { scale, legs } => format!("ROUND(scale={}, legs={})", scale, legs),
|
||||
|
||||
HybridOp::Max => format!("MAX"),
|
||||
HybridOp::Min => format!("MIN"),
|
||||
HybridOp::Max => "MAX".to_string(),
|
||||
HybridOp::Min => "MIN".to_string(),
|
||||
HybridOp::Recip {
|
||||
input_scale,
|
||||
output_scale,
|
||||
@@ -128,13 +146,7 @@ impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> Op<F> for Hybrid
|
||||
"RECIP (input_scale={}, output_scale={})",
|
||||
input_scale, output_scale
|
||||
),
|
||||
HybridOp::Div {
|
||||
denom,
|
||||
use_range_check_for_int,
|
||||
} => format!(
|
||||
"DIV (denom={}, use_range_check_for_int={})",
|
||||
denom, use_range_check_for_int
|
||||
),
|
||||
HybridOp::Div { denom } => format!("DIV (denom={})", denom),
|
||||
HybridOp::SumPool {
|
||||
padding,
|
||||
stride,
|
||||
@@ -189,6 +201,20 @@ impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> Op<F> for Hybrid
|
||||
values: &[ValTensor<F>],
|
||||
) -> Result<Option<ValTensor<F>>, CircuitError> {
|
||||
Ok(Some(match self {
|
||||
HybridOp::Rsqrt {
|
||||
input_scale,
|
||||
output_scale,
|
||||
} => layouts::rsqrt(
|
||||
config,
|
||||
region,
|
||||
values[..].try_into()?,
|
||||
*input_scale,
|
||||
*output_scale,
|
||||
)?,
|
||||
HybridOp::Sqrt { scale } => {
|
||||
layouts::sqrt(config, region, values[..].try_into()?, *scale)?
|
||||
}
|
||||
HybridOp::Ln { scale } => layouts::ln(config, region, values[..].try_into()?, *scale)?,
|
||||
HybridOp::RoundHalfToEven { scale, legs } => {
|
||||
layouts::round_half_to_even(config, region, values[..].try_into()?, *scale, *legs)?
|
||||
}
|
||||
@@ -227,13 +253,9 @@ impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> Op<F> for Hybrid
|
||||
integer_rep_to_felt(input_scale.0 as i128),
|
||||
integer_rep_to_felt(output_scale.0 as i128),
|
||||
)?,
|
||||
HybridOp::Div {
|
||||
denom,
|
||||
use_range_check_for_int,
|
||||
..
|
||||
} => {
|
||||
if denom.0.fract() == 0.0 && *use_range_check_for_int {
|
||||
layouts::loop_div(
|
||||
HybridOp::Div { denom, .. } => {
|
||||
if denom.0.fract() == 0.0 {
|
||||
layouts::div(
|
||||
config,
|
||||
region,
|
||||
values[..].try_into()?,
|
||||
@@ -324,9 +346,18 @@ impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> Op<F> for Hybrid
|
||||
| HybridOp::ReduceArgMax { .. }
|
||||
| HybridOp::OneHot { .. }
|
||||
| HybridOp::ReduceArgMin { .. } => 0,
|
||||
HybridOp::Softmax { output_scale, .. } | HybridOp::Recip { output_scale, .. } => {
|
||||
|
||||
HybridOp::Recip { output_scale, .. } | HybridOp::Rsqrt { output_scale, .. } => {
|
||||
multiplier_to_scale(output_scale.0 as f64)
|
||||
}
|
||||
HybridOp::Softmax {
|
||||
output_scale,
|
||||
input_scale,
|
||||
..
|
||||
} => multiplier_to_scale((output_scale.0 * input_scale.0) as f64),
|
||||
HybridOp::Ln {
|
||||
scale: output_scale,
|
||||
} => 4 * multiplier_to_scale(output_scale.0 as f64),
|
||||
_ => in_scales[0],
|
||||
};
|
||||
Ok(scale)
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
use std::{
|
||||
collections::{HashMap, HashSet},
|
||||
f64::consts::E,
|
||||
ops::Range,
|
||||
};
|
||||
|
||||
@@ -29,41 +30,96 @@ use crate::{
|
||||
use super::*;
|
||||
use crate::circuit::ops::lookup::LookupOp;
|
||||
|
||||
/// Same as div but splits the division into N parts
|
||||
pub(crate) fn loop_div<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
|
||||
/// Calculate the L1 distance between two tensors.
|
||||
/// ```
|
||||
/// use ezkl::tensor::Tensor;
|
||||
/// use ezkl::fieldutils::IntegerRep;
|
||||
/// use ezkl::circuit::ops::layouts::l1_distance;
|
||||
/// use halo2curves::bn256::Fr as Fp;
|
||||
/// use ezkl::circuit::region::RegionCtx;
|
||||
/// use ezkl::circuit::region::RegionSettings;
|
||||
/// use ezkl::circuit::BaseConfig;
|
||||
/// use ezkl::tensor::ValTensor;
|
||||
/// let dummy_config = BaseConfig::dummy(12, 2);
|
||||
/// let mut dummy_region = RegionCtx::new_dummy(0,2,RegionSettings::all_true(128,2));
|
||||
/// let x = ValTensor::from_integer_rep_tensor(Tensor::<IntegerRep>::new(
|
||||
/// Some(&[1, 2, 3, 2, 3, 4, 3, 4, 5]),
|
||||
/// &[3, 3],
|
||||
/// ).unwrap());
|
||||
/// let k = ValTensor::from_integer_rep_tensor(Tensor::<IntegerRep>::new(
|
||||
/// Some(&[1, 2, 3, 1, 2, 3, 1, 2, 3]),
|
||||
/// &[3, 3],
|
||||
/// ).unwrap());
|
||||
/// let result = l1_distance::<Fp>(&dummy_config, &mut dummy_region, &[x, k]).unwrap();
|
||||
/// let expected = Tensor::<IntegerRep>::new(Some(&[0, 0, 0, 1, 1, 1, 2, 2, 2]), &[3, 3]).unwrap();
|
||||
/// assert_eq!(result.int_evals().unwrap(), expected);
|
||||
/// ```
|
||||
pub fn l1_distance<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
|
||||
config: &BaseConfig<F>,
|
||||
region: &mut RegionCtx<F>,
|
||||
value: &[ValTensor<F>; 1],
|
||||
divisor: F,
|
||||
values: &[ValTensor<F>; 2],
|
||||
) -> Result<ValTensor<F>, CircuitError> {
|
||||
if divisor == F::ONE {
|
||||
return Ok(value[0].clone());
|
||||
}
|
||||
let diff = pairwise(config, region, values, BaseOp::Sub)?;
|
||||
let abs_diff = abs(config, region, &[diff])?;
|
||||
|
||||
// if integer val is divisible by 2, we can use a faster method and div > F::S
|
||||
let mut divisor = divisor;
|
||||
let mut num_parts = 1;
|
||||
Ok(abs_diff)
|
||||
}
|
||||
|
||||
while felt_to_integer_rep(divisor) % 2 == 0
|
||||
&& felt_to_integer_rep(divisor) > (2_i128.pow(F::S - 4))
|
||||
{
|
||||
divisor = integer_rep_to_felt(felt_to_integer_rep(divisor) / 2);
|
||||
num_parts += 1;
|
||||
}
|
||||
/// Determines if from a set of 3 tensors the 1st is closest to a reference tensor.
|
||||
/// should only be used in the context of a monotonic function like the product used in the division, recipe, and sqrt arguments;
|
||||
/// or the increasing powers of 2 in the ln argument. Which is used to construct a convex error function.
|
||||
fn optimum_convex_function<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
|
||||
config: &BaseConfig<F>,
|
||||
region: &mut RegionCtx<F>,
|
||||
x: &ValTensor<F>,
|
||||
f: impl Fn(&BaseConfig<F>, &mut RegionCtx<F>, &ValTensor<F>) -> Result<ValTensor<F>, CircuitError>,
|
||||
) -> Result<(), CircuitError> {
|
||||
let one = create_constant_tensor(F::from(1), 1);
|
||||
|
||||
let output = div(config, region, value, divisor)?;
|
||||
if num_parts == 1 {
|
||||
return Ok(output);
|
||||
}
|
||||
let f_x = f(config, region, x)?;
|
||||
|
||||
let divisor_int = 2_i128.pow(num_parts - 1);
|
||||
let divisor_felt = integer_rep_to_felt(divisor_int);
|
||||
if divisor_int <= 2_i128.pow(F::S - 3) {
|
||||
div(config, region, &[output], divisor_felt)
|
||||
} else {
|
||||
// keep splitting the divisor until it satisfies the condition
|
||||
loop_div(config, region, &[output], divisor_felt)
|
||||
}
|
||||
let x_plus_1 = pairwise(config, region, &[x.clone(), one.clone()], BaseOp::Add)?;
|
||||
let f_x_plus_1 = f(config, region, &x_plus_1)?;
|
||||
|
||||
let x_minus_1 = pairwise(config, region, &[x.clone(), one.clone()], BaseOp::Sub)?;
|
||||
let f_x_minus_1 = f(config, region, &x_minus_1)?;
|
||||
|
||||
// because the function is convex, the result should be the minimum of the three
|
||||
// not that we offset the x by 1 to get the next value
|
||||
// f(x) <= f(x+1) and f(x) <= f(x-1)
|
||||
// the result is 1 if the function is optimal solely because of the convexity of the function
|
||||
// the distances can be equal but this is only possible if f(x) and f(x+1) are both optimal (or f(x) and f(x-1)).
|
||||
let f_x_is_opt_rhs = less_equal(config, region, &[f_x.clone(), f_x_plus_1.clone()])?;
|
||||
let f_x_is_opt_lhs = less_equal(config, region, &[f_x.clone(), f_x_minus_1.clone()])?;
|
||||
|
||||
let is_opt = and(config, region, &[f_x_is_opt_lhs, f_x_is_opt_rhs])?;
|
||||
|
||||
let mut comparison_unit = create_constant_tensor(F::ONE, is_opt.len());
|
||||
comparison_unit.reshape(is_opt.dims())?;
|
||||
|
||||
// assert that the result is 1
|
||||
enforce_equality(config, region, &[is_opt, comparison_unit])?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Err is less than some constant
|
||||
pub fn diff_less_than<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
|
||||
config: &BaseConfig<F>,
|
||||
region: &mut RegionCtx<F>,
|
||||
values: &[ValTensor<F>; 2],
|
||||
constant: F,
|
||||
) -> Result<(), CircuitError> {
|
||||
let distance = l1_distance(config, region, values)?;
|
||||
|
||||
let constant = create_constant_tensor(constant, 1);
|
||||
let is_less = less(config, region, &[distance.clone(), constant.clone()])?;
|
||||
|
||||
// assert the result is 1
|
||||
let comparison_unit = create_constant_tensor(F::ONE, is_less.len());
|
||||
enforce_equality(config, region, &[is_less, comparison_unit])?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Div accumulated layout
|
||||
@@ -80,13 +136,8 @@ pub(crate) fn div<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
|
||||
let input = value[0].clone();
|
||||
let input_dims = input.dims();
|
||||
|
||||
let range_check_bracket = felt_to_integer_rep(div) / 2;
|
||||
|
||||
let divisor = create_constant_tensor(div, 1);
|
||||
|
||||
let divisor = region.assign(&config.custom_gates.inputs[1], &divisor)?;
|
||||
region.increment(divisor.len());
|
||||
|
||||
let is_assigned = !input.any_unknowns()? && !divisor.any_unknowns()?;
|
||||
|
||||
let mut claimed_output: ValTensor<F> = if is_assigned {
|
||||
@@ -117,19 +168,7 @@ pub(crate) fn div<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
|
||||
BaseOp::Mult,
|
||||
)?;
|
||||
|
||||
let diff_with_input = pairwise(
|
||||
config,
|
||||
region,
|
||||
&[product.clone(), input.clone()],
|
||||
BaseOp::Sub,
|
||||
)?;
|
||||
|
||||
range_check(
|
||||
config,
|
||||
region,
|
||||
&[diff_with_input],
|
||||
&(-range_check_bracket, range_check_bracket),
|
||||
)?;
|
||||
diff_less_than(config, region, &[input.clone(), product.clone()], div)?;
|
||||
|
||||
Ok(claimed_output)
|
||||
}
|
||||
@@ -145,19 +184,7 @@ pub(crate) fn recip<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
|
||||
let input = value[0].clone();
|
||||
let input_dims = input.dims();
|
||||
|
||||
let integer_input_scale = felt_to_integer_rep(input_scale);
|
||||
let integer_output_scale = felt_to_integer_rep(output_scale);
|
||||
|
||||
// range_check_bracket is min of input_scale * output_scale and 2^F::S - 3
|
||||
let range_check_len = std::cmp::min(integer_output_scale, 2_i128.pow(F::S - 4));
|
||||
|
||||
let input_scale_ratio = if range_check_len > 0 {
|
||||
integer_rep_to_felt(integer_input_scale * integer_output_scale / range_check_len)
|
||||
} else {
|
||||
F::ONE
|
||||
};
|
||||
|
||||
let range_check_bracket = range_check_len / 2;
|
||||
let unit_scale = create_constant_tensor(output_scale * input_scale, 1);
|
||||
|
||||
let is_assigned = !input.any_unknowns()?;
|
||||
|
||||
@@ -183,25 +210,22 @@ pub(crate) fn recip<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
|
||||
let claimed_output = region.assign(&config.custom_gates.output, &claimed_output)?;
|
||||
region.increment(claimed_output.len());
|
||||
|
||||
// this is now of scale 2 * scale
|
||||
let product = pairwise(
|
||||
config,
|
||||
region,
|
||||
&[claimed_output.clone(), input.clone()],
|
||||
BaseOp::Mult,
|
||||
)?;
|
||||
|
||||
// divide by input_scale
|
||||
let rebased_div = loop_div(config, region, &[product], input_scale_ratio)?;
|
||||
|
||||
let zero_inverse_val =
|
||||
tensor::ops::nonlinearities::zero_recip(felt_to_integer_rep(output_scale) as f64)[0];
|
||||
let zero_inverse = create_constant_tensor(integer_rep_to_felt(zero_inverse_val), 1);
|
||||
|
||||
let equal_zero_mask = equals_zero(config, region, &[input.clone()])?;
|
||||
|
||||
let not_equal_zero_mask = not(config, region, &[equal_zero_mask.clone()])?;
|
||||
let equal_inverse_mask = equals(config, region, &[claimed_output.clone(), zero_inverse])?;
|
||||
|
||||
let masked_unit_scale = pairwise(
|
||||
config,
|
||||
region,
|
||||
&[unit_scale.clone(), not_equal_zero_mask.clone()],
|
||||
BaseOp::Mult,
|
||||
)?;
|
||||
|
||||
// assert the two masks are equal
|
||||
enforce_equality(
|
||||
config,
|
||||
@@ -209,24 +233,135 @@ pub(crate) fn recip<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
|
||||
&[equal_zero_mask.clone(), equal_inverse_mask],
|
||||
)?;
|
||||
|
||||
let unit_scale = create_constant_tensor(integer_rep_to_felt(range_check_len), 1);
|
||||
let err_func = |config: &BaseConfig<F>,
|
||||
region: &mut RegionCtx<F>,
|
||||
x: &ValTensor<F>|
|
||||
-> Result<ValTensor<F>, CircuitError> {
|
||||
let product = pairwise(config, region, &[x.clone(), input.clone()], BaseOp::Mult)?;
|
||||
|
||||
let unit_mask = pairwise(config, region, &[equal_zero_mask, unit_scale], BaseOp::Mult)?;
|
||||
let distance = l1_distance(
|
||||
config,
|
||||
region,
|
||||
&[product.clone(), masked_unit_scale.clone()],
|
||||
)?;
|
||||
Ok(distance)
|
||||
};
|
||||
|
||||
// now add the unit mask to the rebased_div
|
||||
let rebased_offset_div = pairwise(config, region, &[rebased_div, unit_mask], BaseOp::Add)?;
|
||||
|
||||
// at most the error should be in the original unit scale's range
|
||||
range_check(
|
||||
config,
|
||||
region,
|
||||
&[rebased_offset_div],
|
||||
&(range_check_bracket, 3 * range_check_bracket),
|
||||
)?;
|
||||
optimum_convex_function(config, region, &claimed_output, err_func)?;
|
||||
|
||||
Ok(claimed_output)
|
||||
}
|
||||
|
||||
/// Square root accumulated layout
|
||||
/// # Example
|
||||
/// ```
|
||||
/// use ezkl::tensor::Tensor;
|
||||
/// use ezkl::fieldutils::IntegerRep;
|
||||
/// use ezkl::circuit::ops::layouts::sqrt;
|
||||
/// use halo2curves::bn256::Fr as Fp;
|
||||
/// use ezkl::circuit::region::RegionCtx;
|
||||
/// use ezkl::circuit::region::RegionSettings;
|
||||
/// use ezkl::circuit::BaseConfig;
|
||||
/// use ezkl::tensor::ValTensor;
|
||||
/// let dummy_config = BaseConfig::dummy(12, 2);
|
||||
/// let mut dummy_region = RegionCtx::new_dummy(0,2,RegionSettings::all_true(128,2));
|
||||
/// let x = ValTensor::from_integer_rep_tensor(Tensor::<IntegerRep>::new(
|
||||
/// Some(&[1, 2, 3, 2, 3, 4, 3, 4, 9]),
|
||||
/// &[3, 3],
|
||||
/// ).unwrap());
|
||||
/// let result = sqrt::<Fp>(&dummy_config, &mut dummy_region, &[x], 1.0.into()).unwrap();
|
||||
/// let expected = Tensor::<IntegerRep>::new(Some(&[1, 1, 2, 1, 2, 2, 2, 2, 3]), &[3, 3]).unwrap();
|
||||
/// assert_eq!(result.int_evals().unwrap(), expected);
|
||||
/// ```
|
||||
pub fn sqrt<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
|
||||
config: &BaseConfig<F>,
|
||||
region: &mut RegionCtx<F>,
|
||||
value: &[ValTensor<F>; 1],
|
||||
input_scale: utils::F32,
|
||||
) -> Result<ValTensor<F>, CircuitError> {
|
||||
let input = value[0].clone();
|
||||
let input_dims = input.dims();
|
||||
|
||||
let unit_scale = create_constant_tensor(integer_rep_to_felt(input_scale.0 as IntegerRep), 1);
|
||||
|
||||
let is_assigned = !input.any_unknowns()?;
|
||||
|
||||
let mut claimed_output: ValTensor<F> = if is_assigned {
|
||||
let input_evals = input.int_evals()?;
|
||||
tensor::ops::nonlinearities::sqrt(&input_evals, input_scale.0 as f64)
|
||||
.par_iter()
|
||||
.map(|x| Value::known(integer_rep_to_felt(*x)))
|
||||
.collect::<Tensor<Value<F>>>()
|
||||
.into()
|
||||
} else {
|
||||
Tensor::new(
|
||||
Some(&vec![Value::<F>::unknown(); input.len()]),
|
||||
&[input.len()],
|
||||
)?
|
||||
.into()
|
||||
};
|
||||
claimed_output.reshape(input_dims)?;
|
||||
let claimed_output = region.assign(&config.custom_gates.output, &claimed_output)?;
|
||||
region.increment(claimed_output.len());
|
||||
|
||||
// force the output to be positive or zero
|
||||
let claimed_output = abs(config, region, &[claimed_output.clone()])?;
|
||||
|
||||
// rescaled input
|
||||
let rescaled_input = pairwise(config, region, &[input.clone(), unit_scale], BaseOp::Mult)?;
|
||||
|
||||
let err_func = |config: &BaseConfig<F>,
|
||||
region: &mut RegionCtx<F>,
|
||||
x: &ValTensor<F>|
|
||||
-> Result<ValTensor<F>, CircuitError> {
|
||||
let product = pairwise(config, region, &[x.clone(), x.clone()], BaseOp::Mult)?;
|
||||
let distance = l1_distance(config, region, &[product.clone(), rescaled_input.clone()])?;
|
||||
Ok(distance)
|
||||
};
|
||||
|
||||
optimum_convex_function(config, region, &claimed_output, err_func)?;
|
||||
|
||||
Ok(claimed_output)
|
||||
}
|
||||
|
||||
/// Reciprocal square root accumulated layout
|
||||
/// # Example
|
||||
/// ```
|
||||
/// use ezkl::tensor::Tensor;
|
||||
/// use ezkl::fieldutils::IntegerRep;
|
||||
/// use ezkl::circuit::ops::layouts::rsqrt;
|
||||
/// use halo2curves::bn256::Fr as Fp;
|
||||
/// use ezkl::circuit::region::RegionCtx;
|
||||
/// use ezkl::circuit::region::RegionSettings;
|
||||
/// use ezkl::circuit::BaseConfig;
|
||||
/// use ezkl::tensor::ValTensor;
|
||||
/// let dummy_config = BaseConfig::dummy(12, 2);
|
||||
/// let mut dummy_region = RegionCtx::new_dummy(0,2,RegionSettings::all_true(128,2));
|
||||
/// let x = ValTensor::from_integer_rep_tensor(Tensor::<IntegerRep>::new(
|
||||
/// Some(&[1, 2, 3, 2, 3, 4, 3, 4, 5]),
|
||||
/// &[3, 3],
|
||||
/// ).unwrap());
|
||||
/// let result = rsqrt::<Fp>(&dummy_config, &mut dummy_region, &[x], 1.0.into(), 1.0.into()).unwrap();
|
||||
/// let expected = Tensor::<IntegerRep>::new(Some(&[1, 1, 1, 1, 1, 1, 1, 1, 1]), &[3, 3]).unwrap();
|
||||
/// assert_eq!(result.int_evals().unwrap(), expected);
|
||||
/// ```
|
||||
pub fn rsqrt<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
|
||||
config: &BaseConfig<F>,
|
||||
region: &mut RegionCtx<F>,
|
||||
value: &[ValTensor<F>; 1],
|
||||
input_scale: utils::F32,
|
||||
output_scale: utils::F32,
|
||||
) -> Result<ValTensor<F>, CircuitError> {
|
||||
let sqrt = sqrt(config, region, value, input_scale)?;
|
||||
|
||||
let felt_output_scale = integer_rep_to_felt(output_scale.0 as IntegerRep);
|
||||
let felt_input_scale = integer_rep_to_felt(input_scale.0 as IntegerRep);
|
||||
|
||||
let recip = recip(config, region, &[sqrt], felt_input_scale, felt_output_scale)?;
|
||||
|
||||
Ok(recip)
|
||||
}
|
||||
|
||||
/// Dot product of two tensors.
|
||||
/// ```
|
||||
/// use ezkl::tensor::Tensor;
|
||||
@@ -1552,6 +1687,7 @@ pub(crate) fn linearize_nd_index<F: PrimeField + TensorType + PartialOrd + std::
|
||||
Ok(output.into())
|
||||
}
|
||||
|
||||
// assumes unique values in fullset
|
||||
pub(crate) fn get_missing_set_elements<
|
||||
F: PrimeField + TensorType + PartialOrd + std::hash::Hash,
|
||||
>(
|
||||
@@ -1564,6 +1700,8 @@ pub(crate) fn get_missing_set_elements<
|
||||
let set_len = fullset.len();
|
||||
input.flatten();
|
||||
|
||||
// while fullset is less than len of input concat
|
||||
|
||||
let is_assigned = !input.any_unknowns()? && !fullset.any_unknowns()?;
|
||||
|
||||
let mut claimed_output: ValTensor<F> = if is_assigned {
|
||||
@@ -1805,6 +1943,10 @@ pub fn sum<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
|
||||
region: &mut RegionCtx<F>,
|
||||
values: &[ValTensor<F>; 1],
|
||||
) -> Result<ValTensor<F>, CircuitError> {
|
||||
if values[0].len() == 1 {
|
||||
return Ok(values[0].clone());
|
||||
}
|
||||
|
||||
region.flush()?;
|
||||
// time this entire function run
|
||||
let global_start = instant::Instant::now();
|
||||
@@ -3102,7 +3244,7 @@ pub fn sumpool<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
|
||||
last_elem.reshape(&[&[batch_size, image_channels], shape].concat())?;
|
||||
|
||||
if normalized {
|
||||
last_elem = loop_div(config, region, &[last_elem], F::from(kernel_len as u64))?;
|
||||
last_elem = div(config, region, &[last_elem], F::from(kernel_len as u64))?;
|
||||
}
|
||||
Ok(last_elem)
|
||||
}
|
||||
@@ -4507,6 +4649,293 @@ pub fn ceil<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
|
||||
)
|
||||
}
|
||||
|
||||
/// integer ln layout
|
||||
/// # Arguments
|
||||
/// * `config` - BaseConfig
|
||||
/// * `region` - RegionCtx
|
||||
/// * `values` - &[ValTensor<F>; 1]
|
||||
/// * `scale` - utils::F32
|
||||
/// # Returns
|
||||
/// * ValTensor<F>
|
||||
/// # Example
|
||||
///
|
||||
/// ```
|
||||
/// use ezkl::tensor::Tensor;
|
||||
/// use ezkl::fieldutils::IntegerRep;
|
||||
/// use ezkl::circuit::ops::layouts::ln;
|
||||
/// use ezkl::tensor::val::ValTensor;
|
||||
/// use halo2curves::bn256::Fr as Fp;
|
||||
/// use ezkl::circuit::region::RegionCtx;
|
||||
/// use ezkl::circuit::region::RegionSettings;
|
||||
/// use ezkl::circuit::BaseConfig;
|
||||
/// let dummy_config = BaseConfig::dummy(12, 2);
|
||||
/// let mut dummy_region = RegionCtx::new_dummy(0,2,RegionSettings::all_true(128,2));
|
||||
/// let x = ValTensor::from_integer_rep_tensor(Tensor::<IntegerRep>::new(
|
||||
/// Some(&[3, 2, 3, 1]),
|
||||
/// &[1, 1, 2, 2],
|
||||
/// ).unwrap());
|
||||
///
|
||||
/// let result = ln::<Fp>(&dummy_config, &mut dummy_region, &[x], 2.0.into()).unwrap();
|
||||
/// let expected = Tensor::<IntegerRep>::new(Some(&[4, 0, 4, -8]), &[1, 1, 2, 2]).unwrap();
|
||||
/// assert_eq!(result.int_evals().unwrap(), expected);
|
||||
///
|
||||
/// ```
|
||||
pub fn ln<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
|
||||
config: &BaseConfig<F>,
|
||||
region: &mut RegionCtx<F>,
|
||||
values: &[ValTensor<F>; 1],
|
||||
scale: utils::F32,
|
||||
) -> Result<ValTensor<F>, CircuitError> {
|
||||
// first generate the claimed val
|
||||
|
||||
let mut input = values[0].clone();
|
||||
let scale_as_felt = integer_rep_to_felt(scale.0.round() as IntegerRep);
|
||||
|
||||
let triple_scaled_as_felt_tensor =
|
||||
create_constant_tensor(scale_as_felt * scale_as_felt * scale_as_felt, 1);
|
||||
|
||||
// natural ln is log2(x) * ln(2)
|
||||
let ln2 = utils::F32::from(2.0_f32.ln());
|
||||
// now create a constant tensor for ln2 with scale
|
||||
let ln2_tensor: ValTensor<F> = create_constant_tensor(
|
||||
integer_rep_to_felt((ln2.0 * scale.0).round() as IntegerRep),
|
||||
1,
|
||||
);
|
||||
let unit = create_constant_tensor(integer_rep_to_felt(1), 1);
|
||||
let negative_one = create_constant_tensor(integer_rep_to_felt(-1), 1);
|
||||
|
||||
// 2. assign the image
|
||||
if !input.all_prev_assigned() {
|
||||
input = region.assign(&config.custom_gates.inputs[0], &input)?;
|
||||
// don't need to increment because the claimed output is assigned to output and incremented accordingly
|
||||
}
|
||||
|
||||
let is_assigned = !input.any_unknowns()?;
|
||||
|
||||
let mut claimed_output: ValTensor<F> = if is_assigned {
|
||||
let input_evals = input.int_evals()?;
|
||||
// returns an integer with the base 2 logarithm
|
||||
tensor::ops::nonlinearities::ilog2(&input_evals.clone(), scale.0 as f64)
|
||||
.par_iter()
|
||||
.map(|x| Value::known(integer_rep_to_felt(*x)))
|
||||
.collect::<Tensor<Value<F>>>()
|
||||
.into()
|
||||
} else {
|
||||
Tensor::new(
|
||||
Some(&vec![Value::<F>::unknown(); input.len()]),
|
||||
&[input.len()],
|
||||
)?
|
||||
.into()
|
||||
};
|
||||
claimed_output.reshape(input.dims())?;
|
||||
region.assign(&config.custom_gates.output, &claimed_output)?;
|
||||
region.increment(claimed_output.len());
|
||||
|
||||
let pow2_of_claimed_output = nonlinearity(
|
||||
config,
|
||||
region,
|
||||
&[claimed_output.clone()],
|
||||
&LookupOp::PowersOfTwo { scale },
|
||||
)?;
|
||||
|
||||
let num_bits = (std::mem::size_of::<IntegerRep>() * 8) as IntegerRep;
|
||||
|
||||
region.update_max_min_lookup_inputs_force(-num_bits, num_bits)?;
|
||||
|
||||
// now subtract 1 from the claimed output
|
||||
let claimed_output_minus_one = pairwise(
|
||||
config,
|
||||
region,
|
||||
&[claimed_output.clone(), unit.clone()],
|
||||
BaseOp::Sub,
|
||||
)?;
|
||||
|
||||
// now add 1 to the claimed output
|
||||
let claimed_output_plus_one = pairwise(
|
||||
config,
|
||||
region,
|
||||
&[claimed_output.clone(), unit.clone()],
|
||||
BaseOp::Add,
|
||||
)?;
|
||||
|
||||
// prior power of 2 is less than claimed output
|
||||
let prior_pow2 = nonlinearity(
|
||||
config,
|
||||
region,
|
||||
&[claimed_output_minus_one],
|
||||
&LookupOp::PowersOfTwo { scale },
|
||||
)?;
|
||||
|
||||
// next power of 2 is greater than claimed output
|
||||
let next_pow2 = nonlinearity(
|
||||
config,
|
||||
region,
|
||||
&[claimed_output_plus_one],
|
||||
&LookupOp::PowersOfTwo { scale },
|
||||
)?;
|
||||
|
||||
let distance_to_claimed = pairwise(
|
||||
config,
|
||||
region,
|
||||
&[input.clone(), pow2_of_claimed_output.clone()],
|
||||
BaseOp::Sub,
|
||||
)?;
|
||||
|
||||
let abs_distance_to_claimed = abs(config, region, &[distance_to_claimed.clone()])?;
|
||||
|
||||
let abs_distance_to_next_pow2 =
|
||||
l1_distance(config, region, &[input.clone(), next_pow2.clone()])?;
|
||||
|
||||
let abs_distance_to_prior_pow2 =
|
||||
l1_distance(config, region, &[input.clone(), prior_pow2.clone()])?;
|
||||
|
||||
// because we round up this can be equal
|
||||
let is_closest_to_0: ValTensor<F> = less(
|
||||
config,
|
||||
region,
|
||||
&[
|
||||
abs_distance_to_claimed.clone(),
|
||||
abs_distance_to_next_pow2.clone(),
|
||||
],
|
||||
)?;
|
||||
|
||||
let is_closest_to_1 = less(
|
||||
config,
|
||||
region,
|
||||
&[
|
||||
abs_distance_to_claimed.clone(),
|
||||
abs_distance_to_prior_pow2.clone(),
|
||||
],
|
||||
)?;
|
||||
|
||||
let is_closest = and(
|
||||
config,
|
||||
region,
|
||||
&[is_closest_to_0.clone(), is_closest_to_1.clone()],
|
||||
)?;
|
||||
|
||||
let mut comparison_unit = create_constant_tensor(integer_rep_to_felt(1), is_closest.len());
|
||||
comparison_unit.reshape(is_closest.dims())?;
|
||||
let assigned_unit = region.assign(&config.custom_gates.inputs[1], &comparison_unit)?;
|
||||
|
||||
enforce_equality(config, region, &[is_closest, assigned_unit])?;
|
||||
|
||||
// get a linear interpolation now
|
||||
|
||||
let sign_of_distance_to_claimed = sign(config, region, &[distance_to_claimed.clone()])?;
|
||||
let sign_of_distance_to_claimed_is_negative = equals(
|
||||
config,
|
||||
region,
|
||||
&[sign_of_distance_to_claimed.clone(), negative_one.clone()],
|
||||
)?;
|
||||
|
||||
let sign_of_distance_to_claimed_is_positive = not(
|
||||
config,
|
||||
region,
|
||||
&[sign_of_distance_to_claimed_is_negative.clone()],
|
||||
)?;
|
||||
|
||||
let pow2_prior_to_claimed_distance = pairwise(
|
||||
config,
|
||||
region,
|
||||
&[pow2_of_claimed_output.clone(), prior_pow2.clone()],
|
||||
BaseOp::Sub,
|
||||
)?;
|
||||
|
||||
let pow2_next_to_claimed_distance = pairwise(
|
||||
config,
|
||||
region,
|
||||
&[next_pow2.clone(), pow2_of_claimed_output.clone()],
|
||||
BaseOp::Sub,
|
||||
)?;
|
||||
|
||||
let recip_pow2_prior_to_claimed_distance = recip(
|
||||
config,
|
||||
region,
|
||||
&[pow2_prior_to_claimed_distance],
|
||||
scale_as_felt,
|
||||
scale_as_felt * scale_as_felt,
|
||||
)?;
|
||||
|
||||
let interpolated_distance = pairwise(
|
||||
config,
|
||||
region,
|
||||
&[
|
||||
recip_pow2_prior_to_claimed_distance.clone(),
|
||||
distance_to_claimed.clone(),
|
||||
],
|
||||
BaseOp::Mult,
|
||||
)?;
|
||||
|
||||
let gated_prior_interpolated_distance = pairwise(
|
||||
config,
|
||||
region,
|
||||
&[
|
||||
interpolated_distance.clone(),
|
||||
sign_of_distance_to_claimed_is_negative.clone(),
|
||||
],
|
||||
BaseOp::Mult,
|
||||
)?;
|
||||
|
||||
let recip_next_to_claimed_distance = recip(
|
||||
config,
|
||||
region,
|
||||
&[pow2_next_to_claimed_distance],
|
||||
scale_as_felt,
|
||||
scale_as_felt * scale_as_felt,
|
||||
)?;
|
||||
|
||||
let interpolated_distance_next = pairwise(
|
||||
config,
|
||||
region,
|
||||
&[
|
||||
recip_next_to_claimed_distance.clone(),
|
||||
distance_to_claimed.clone(),
|
||||
],
|
||||
BaseOp::Mult,
|
||||
)?;
|
||||
|
||||
let gated_next_interpolated_distance = pairwise(
|
||||
config,
|
||||
region,
|
||||
&[
|
||||
interpolated_distance_next.clone(),
|
||||
sign_of_distance_to_claimed_is_positive.clone(),
|
||||
],
|
||||
BaseOp::Mult,
|
||||
)?;
|
||||
|
||||
let scaled_claimed_output = pairwise(
|
||||
config,
|
||||
region,
|
||||
&[claimed_output.clone(), triple_scaled_as_felt_tensor],
|
||||
BaseOp::Mult,
|
||||
)?;
|
||||
|
||||
let claimed_output = pairwise(
|
||||
config,
|
||||
region,
|
||||
&[
|
||||
scaled_claimed_output.clone(),
|
||||
gated_prior_interpolated_distance.clone(),
|
||||
],
|
||||
BaseOp::Add,
|
||||
)?;
|
||||
|
||||
let claimed_output = pairwise(
|
||||
config,
|
||||
region,
|
||||
&[
|
||||
claimed_output.clone(),
|
||||
gated_next_interpolated_distance.clone(),
|
||||
],
|
||||
BaseOp::Add,
|
||||
)?;
|
||||
|
||||
// now multiply the claimed output by ln2
|
||||
pairwise(config, region, &[claimed_output, ln2_tensor], BaseOp::Mult)
|
||||
}
|
||||
|
||||
/// round layout
|
||||
/// # Arguments
|
||||
/// * `config` - BaseConfig
|
||||
@@ -4551,11 +4980,7 @@ pub fn round<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
|
||||
let zero = ValType::Constant(F::ZERO);
|
||||
|
||||
let one = create_constant_tensor(integer_rep_to_felt(1), 1);
|
||||
let assigned_one = region.assign(&config.custom_gates.inputs[1], &one)?;
|
||||
let negative_one = create_constant_tensor(integer_rep_to_felt(-1), 1);
|
||||
let assigned_negative_one = region.assign(&config.custom_gates.output, &negative_one)?;
|
||||
|
||||
region.increment(1);
|
||||
|
||||
// if scale is not exactly divisible by 2 we warn
|
||||
if scale.0 % 2.0 != 0.0 {
|
||||
@@ -4588,8 +5013,8 @@ pub fn round<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
|
||||
let last_elem = sliced_input.last()?;
|
||||
|
||||
let sign = sliced_input.first()?;
|
||||
let is_positive = equals(config, region, &[sign.clone(), assigned_one.clone()])?;
|
||||
let is_negative = equals(config, region, &[sign, assigned_negative_one.clone()])?;
|
||||
let is_positive = equals(config, region, &[sign.clone(), one.clone()])?;
|
||||
let is_negative = equals(config, region, &[sign, negative_one.clone()])?;
|
||||
|
||||
let is_greater_than_midway = greater_equal(
|
||||
config,
|
||||
@@ -4706,9 +5131,6 @@ pub fn round_half_to_even<F: PrimeField + TensorType + PartialOrd + std::hash::H
|
||||
integer_rep_to_felt((scale.0 / 2.0).round() as IntegerRep),
|
||||
1,
|
||||
);
|
||||
let assigned_midway_point = region.assign(&config.custom_gates.inputs[1], &midway_point)?;
|
||||
|
||||
region.increment(1);
|
||||
|
||||
let dims = decomposition.dims().to_vec();
|
||||
let first_dims = decomposition.dims().to_vec()[..decomposition.dims().len() - 1].to_vec();
|
||||
@@ -4732,11 +5154,8 @@ pub fn round_half_to_even<F: PrimeField + TensorType + PartialOrd + std::hash::H
|
||||
let penultimate_elem =
|
||||
sliced_input.get_slice(&[sliced_input.len() - 2..sliced_input.len() - 1])?;
|
||||
|
||||
let is_equal_to_midway = equals(
|
||||
config,
|
||||
region,
|
||||
&[last_elem.clone(), assigned_midway_point.clone()],
|
||||
)?;
|
||||
let is_equal_to_midway =
|
||||
equals(config, region, &[last_elem.clone(), midway_point.clone()])?;
|
||||
// penultimate_elem is equal to midway point and even, do nothing
|
||||
let is_odd = nonlinearity(
|
||||
config,
|
||||
@@ -4751,11 +5170,8 @@ pub fn round_half_to_even<F: PrimeField + TensorType + PartialOrd + std::hash::H
|
||||
&[is_odd.clone(), is_equal_to_midway.clone()],
|
||||
)?;
|
||||
|
||||
let is_greater_than_midway = greater(
|
||||
config,
|
||||
region,
|
||||
&[last_elem.clone(), assigned_midway_point.clone()],
|
||||
)?;
|
||||
let is_greater_than_midway =
|
||||
greater(config, region, &[last_elem.clone(), midway_point.clone()])?;
|
||||
|
||||
// if the number is equal to midway point and odd increment, or if it is is_greater_than_midway
|
||||
let is_odd_and_equal_to_midway_or_greater_than_midway = or(
|
||||
@@ -5168,11 +5584,8 @@ pub(crate) fn percent<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>
|
||||
input_felt_scale,
|
||||
output_felt_scale,
|
||||
)?;
|
||||
// product of num * (1 / denom) = 2*output_scale
|
||||
let percent = pairwise(config, region, &[input, inv_denom], BaseOp::Mult)?;
|
||||
|
||||
// rebase the percent to 2x the scale
|
||||
loop_div(config, region, &[percent], input_felt_scale)
|
||||
// product of num * (1 / denom) = input_scale * output_scale
|
||||
pairwise(config, region, &[input, inv_denom], BaseOp::Mult)
|
||||
}
|
||||
|
||||
/// Applies softmax
|
||||
@@ -5196,7 +5609,7 @@ pub(crate) fn percent<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>
|
||||
/// ).unwrap());
|
||||
/// let result = softmax::<Fp>(&dummy_config, &mut dummy_region, &[x], 128.0.into(), (128.0 * 128.0).into()).unwrap();
|
||||
/// // doubles the scale of the input
|
||||
/// let expected = Tensor::<IntegerRep>::new(Some(&[2734, 2734, 2756, 2734, 2734, 2691]), &[2, 3]).unwrap();
|
||||
/// let expected = Tensor::<IntegerRep>::new(Some(&[350012, 350012, 352768, 350012, 350012, 344500]), &[2, 3]).unwrap();
|
||||
/// assert_eq!(result.int_evals().unwrap(), expected);
|
||||
/// ```
|
||||
pub fn softmax<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
|
||||
@@ -5215,7 +5628,10 @@ pub fn softmax<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
|
||||
config,
|
||||
region,
|
||||
&[sub],
|
||||
&LookupOp::Exp { scale: input_scale },
|
||||
&LookupOp::Exp {
|
||||
scale: input_scale,
|
||||
base: E.into(),
|
||||
},
|
||||
)?;
|
||||
|
||||
percent(config, region, &[ex.clone()], input_scale, output_scale)
|
||||
@@ -5276,17 +5692,8 @@ pub fn range_check_percent<F: PrimeField + TensorType + PartialOrd + std::hash::
|
||||
let int_scale = scale.0 as IntegerRep;
|
||||
// felt scale
|
||||
let felt_scale = integer_rep_to_felt(int_scale);
|
||||
// range check len capped at 2^(S-3) and make it divisible 2
|
||||
let range_check_bracket = std::cmp::min(
|
||||
utils::F32(scale.0),
|
||||
utils::F32(2_f32.powf((F::S - 5) as f32)),
|
||||
)
|
||||
.0;
|
||||
|
||||
let range_check_bracket_int = range_check_bracket as IntegerRep;
|
||||
|
||||
// input scale ratio we multiply by tol such that in the new scale range_check_len represents tol percent
|
||||
let input_scale_ratio = ((scale.0.powf(2.0) / range_check_bracket) * tol) as IntegerRep / 2 * 2;
|
||||
let input_scale_ratio = (scale.0 * tol) as IntegerRep / 2 * 2;
|
||||
|
||||
let recip = recip(
|
||||
config,
|
||||
@@ -5302,7 +5709,7 @@ pub fn range_check_percent<F: PrimeField + TensorType + PartialOrd + std::hash::
|
||||
let product = pairwise(config, region, &[diff, recip], BaseOp::Mult)?;
|
||||
|
||||
log::debug!("product: {}", product.show());
|
||||
let rebased_product = loop_div(
|
||||
let rebased_product = div(
|
||||
config,
|
||||
region,
|
||||
&[product],
|
||||
@@ -5311,10 +5718,5 @@ pub fn range_check_percent<F: PrimeField + TensorType + PartialOrd + std::hash::
|
||||
log::debug!("rebased_product: {}", rebased_product.show());
|
||||
|
||||
// check that it is within the tolerance range
|
||||
range_check(
|
||||
config,
|
||||
region,
|
||||
&[rebased_product],
|
||||
&(-range_check_bracket_int, range_check_bracket_int),
|
||||
)
|
||||
range_check(config, region, &[rebased_product], &(-int_scale, int_scale))
|
||||
}
|
||||
|
||||
@@ -16,11 +16,10 @@ use halo2curves::ff::PrimeField;
|
||||
pub enum LookupOp {
|
||||
Div { denom: utils::F32 },
|
||||
IsOdd,
|
||||
Sqrt { scale: utils::F32 },
|
||||
Rsqrt { scale: utils::F32 },
|
||||
Sigmoid { scale: utils::F32 },
|
||||
PowersOfTwo { scale: utils::F32 },
|
||||
Ln { scale: utils::F32 },
|
||||
Exp { scale: utils::F32 },
|
||||
Sigmoid { scale: utils::F32 },
|
||||
Exp { scale: utils::F32, base: utils::F32 },
|
||||
Cos { scale: utils::F32 },
|
||||
ACos { scale: utils::F32 },
|
||||
Cosh { scale: utils::F32 },
|
||||
@@ -50,14 +49,13 @@ impl LookupOp {
|
||||
pub fn as_path(&self) -> String {
|
||||
match self {
|
||||
LookupOp::Pow { scale, a } => format!("pow_{}_{}", scale, a),
|
||||
LookupOp::Ln { scale } => format!("ln_{}", scale),
|
||||
LookupOp::PowersOfTwo { scale } => format!("pow2_{}", scale),
|
||||
LookupOp::IsOdd => "is_odd".to_string(),
|
||||
LookupOp::Div { denom } => format!("div_{}", denom),
|
||||
LookupOp::Sigmoid { scale } => format!("sigmoid_{}", scale),
|
||||
LookupOp::Sqrt { scale } => format!("sqrt_{}", scale),
|
||||
LookupOp::Rsqrt { scale } => format!("rsqrt_{}", scale),
|
||||
LookupOp::Erf { scale } => format!("erf_{}", scale),
|
||||
LookupOp::Exp { scale } => format!("exp_{}", scale),
|
||||
LookupOp::Ln { scale } => format!("ln_{}", scale),
|
||||
LookupOp::Exp { scale, base } => format!("exp_{}_{}", scale, base),
|
||||
LookupOp::Cos { scale } => format!("cos_{}", scale),
|
||||
LookupOp::ACos { scale } => format!("acos_{}", scale),
|
||||
LookupOp::Cosh { scale } => format!("cosh_{}", scale),
|
||||
@@ -82,6 +80,12 @@ impl LookupOp {
|
||||
let x = x[0].clone().map(|x| felt_to_integer_rep(x));
|
||||
let res =
|
||||
match &self {
|
||||
LookupOp::Ln { scale } => {
|
||||
Ok::<_, TensorError>(tensor::ops::nonlinearities::ln(&x, scale.into()))
|
||||
}
|
||||
LookupOp::PowersOfTwo { scale } => {
|
||||
Ok::<_, TensorError>(tensor::ops::nonlinearities::ipow2(&x, scale.0.into()))
|
||||
}
|
||||
LookupOp::IsOdd => Ok::<_, TensorError>(tensor::ops::nonlinearities::is_odd(&x)),
|
||||
LookupOp::Pow { scale, a } => Ok::<_, TensorError>(
|
||||
tensor::ops::nonlinearities::pow(&x, scale.0.into(), a.0.into()),
|
||||
@@ -92,21 +96,12 @@ impl LookupOp {
|
||||
LookupOp::Sigmoid { scale } => {
|
||||
Ok::<_, TensorError>(tensor::ops::nonlinearities::sigmoid(&x, scale.into()))
|
||||
}
|
||||
LookupOp::Sqrt { scale } => {
|
||||
Ok::<_, TensorError>(tensor::ops::nonlinearities::sqrt(&x, scale.into()))
|
||||
}
|
||||
LookupOp::Rsqrt { scale } => {
|
||||
Ok::<_, TensorError>(tensor::ops::nonlinearities::rsqrt(&x, scale.into()))
|
||||
}
|
||||
LookupOp::Erf { scale } => {
|
||||
Ok::<_, TensorError>(tensor::ops::nonlinearities::erffunc(&x, scale.into()))
|
||||
}
|
||||
LookupOp::Exp { scale } => {
|
||||
Ok::<_, TensorError>(tensor::ops::nonlinearities::exp(&x, scale.into()))
|
||||
}
|
||||
LookupOp::Ln { scale } => {
|
||||
Ok::<_, TensorError>(tensor::ops::nonlinearities::ln(&x, scale.into()))
|
||||
}
|
||||
LookupOp::Exp { scale, base } => Ok::<_, TensorError>(
|
||||
tensor::ops::nonlinearities::exp(&x, scale.into(), base.into()),
|
||||
),
|
||||
LookupOp::Cos { scale } => {
|
||||
Ok::<_, TensorError>(tensor::ops::nonlinearities::cos(&x, scale.into()))
|
||||
}
|
||||
@@ -163,15 +158,14 @@ impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> Op<F> for Lookup
|
||||
/// Returns the name of the operation
|
||||
fn as_string(&self) -> String {
|
||||
match self {
|
||||
LookupOp::Ln { scale } => format!("LN(scale={})", scale),
|
||||
LookupOp::PowersOfTwo { scale } => format!("POWERS_OF_TWO(scale={})", scale),
|
||||
LookupOp::IsOdd => "IS_ODD".to_string(),
|
||||
LookupOp::Pow { a, scale } => format!("POW(scale={}, exponent={})", scale, a),
|
||||
LookupOp::Div { denom, .. } => format!("DIV(denom={})", denom),
|
||||
LookupOp::Ln { scale } => format!("LN(scale={})", scale),
|
||||
LookupOp::Sigmoid { scale } => format!("SIGMOID(scale={})", scale),
|
||||
LookupOp::Sqrt { scale } => format!("SQRT(scale={})", scale),
|
||||
LookupOp::Erf { scale } => format!("ERF(scale={})", scale),
|
||||
LookupOp::Rsqrt { scale } => format!("RSQRT(scale={})", scale),
|
||||
LookupOp::Exp { scale } => format!("EXP(scale={})", scale),
|
||||
LookupOp::Exp { scale, base } => format!("EXP(scale={}, base={})", scale, base),
|
||||
LookupOp::Tan { scale } => format!("TAN(scale={})", scale),
|
||||
LookupOp::ATan { scale } => format!("ATAN(scale={})", scale),
|
||||
LookupOp::Tanh { scale } => format!("TANH(scale={})", scale),
|
||||
@@ -204,9 +198,7 @@ impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> Op<F> for Lookup
|
||||
|
||||
/// Returns the scale of the output of the operation.
|
||||
fn out_scale(&self, inputs_scale: Vec<crate::Scale>) -> Result<crate::Scale, CircuitError> {
|
||||
let scale = match self {
|
||||
_ => inputs_scale[0],
|
||||
};
|
||||
let scale = inputs_scale[0];
|
||||
Ok(scale)
|
||||
}
|
||||
|
||||
|
||||
@@ -105,7 +105,10 @@ impl InputType {
|
||||
}
|
||||
|
||||
///
|
||||
pub fn roundtrip<T: num::ToPrimitive + num::FromPrimitive + Clone>(&self, input: &mut T) {
|
||||
pub fn roundtrip<T: num::ToPrimitive + num::FromPrimitive + Clone + std::fmt::Debug>(
|
||||
&self,
|
||||
input: &mut T,
|
||||
) {
|
||||
match self {
|
||||
InputType::Bool => {
|
||||
let boolean_input = input.clone().to_i64().unwrap();
|
||||
@@ -118,7 +121,7 @@ impl InputType {
|
||||
*input = T::from_f32(f32_input).unwrap();
|
||||
}
|
||||
InputType::F32 => {
|
||||
let f32_input = input.clone().to_f32().unwrap();
|
||||
let f32_input: f32 = input.clone().to_f32().unwrap();
|
||||
*input = T::from_f32(f32_input).unwrap();
|
||||
}
|
||||
InputType::F64 => {
|
||||
@@ -133,6 +136,22 @@ impl InputType {
|
||||
}
|
||||
}
|
||||
|
||||
impl std::str::FromStr for InputType {
|
||||
type Err = CircuitError;
|
||||
|
||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||
match s {
|
||||
"bool" => Ok(InputType::Bool),
|
||||
"f16" => Ok(InputType::F16),
|
||||
"f32" => Ok(InputType::F32),
|
||||
"f64" => Ok(InputType::F64),
|
||||
"int" => Ok(InputType::Int),
|
||||
"tdim" => Ok(InputType::TDim),
|
||||
e => Err(CircuitError::InvalidInputType(e.to_string())),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
///
|
||||
#[derive(Clone, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, Serialize, Deserialize)]
|
||||
pub struct Input {
|
||||
|
||||
@@ -211,7 +211,7 @@ impl<'a, F: PrimeField + TensorType + PartialOrd + std::hash::Hash> RegionCtx<'a
|
||||
self.min_lookup_inputs().to_string().green(),
|
||||
self.max_range_size().to_string().green(),
|
||||
self.dynamic_lookup_col_coord().to_string().green(),
|
||||
self.shuffle_col_coord().to_string().green(),
|
||||
self.shuffle_col_coord().to_string().green(),
|
||||
self.max_dynamic_input_len().to_string().green()
|
||||
);
|
||||
}
|
||||
@@ -474,7 +474,7 @@ impl<'a, F: PrimeField + TensorType + PartialOrd + std::hash::Hash> RegionCtx<'a
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Update the max and min forcefully
|
||||
/// Update the max and min forcefully
|
||||
pub fn update_max_min_lookup_inputs_force(
|
||||
&mut self,
|
||||
min: IntegerRep,
|
||||
@@ -611,7 +611,6 @@ impl<'a, F: PrimeField + TensorType + PartialOrd + std::hash::Hash> RegionCtx<'a
|
||||
var: &VarTensor,
|
||||
values: &ValTensor<F>,
|
||||
) -> Result<(ValTensor<F>, usize), CircuitError> {
|
||||
|
||||
self.update_max_dynamic_input_len(values.len());
|
||||
|
||||
if let Some(region) = &self.region {
|
||||
|
||||
139
src/commands.rs
139
src/commands.rs
@@ -2,12 +2,7 @@ use alloy::primitives::Address as H160;
|
||||
use clap::{Command, Parser, Subcommand};
|
||||
use clap_complete::{generate, Generator, Shell};
|
||||
#[cfg(feature = "python-bindings")]
|
||||
use pyo3::{
|
||||
conversion::{FromPyObject, PyTryFrom},
|
||||
exceptions::PyValueError,
|
||||
prelude::*,
|
||||
types::PyString,
|
||||
};
|
||||
use pyo3::{conversion::FromPyObject, exceptions::PyValueError, prelude::*};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::path::PathBuf;
|
||||
use std::str::FromStr;
|
||||
@@ -109,8 +104,8 @@ impl IntoPy<PyObject> for TranscriptType {
|
||||
#[cfg(feature = "python-bindings")]
|
||||
/// Obtains TranscriptType from PyObject (Required for TranscriptType to be compatible with Python)
|
||||
impl<'source> FromPyObject<'source> for TranscriptType {
|
||||
fn extract(ob: &'source PyAny) -> PyResult<Self> {
|
||||
let trystr = <PyString as PyTryFrom>::try_from(ob)?;
|
||||
fn extract_bound(ob: &pyo3::Bound<'source, pyo3::PyAny>) -> PyResult<Self> {
|
||||
let trystr = String::extract_bound(ob)?;
|
||||
let strval = trystr.to_string();
|
||||
match strval.to_lowercase().as_str() {
|
||||
"poseidon" => Ok(TranscriptType::Poseidon),
|
||||
@@ -196,9 +191,7 @@ pub enum ContractType {
|
||||
|
||||
impl Default for ContractType {
|
||||
fn default() -> Self {
|
||||
ContractType::Verifier {
|
||||
reusable: false,
|
||||
}
|
||||
ContractType::Verifier { reusable: false }
|
||||
}
|
||||
}
|
||||
|
||||
@@ -210,10 +203,8 @@ impl std::fmt::Display for ContractType {
|
||||
match self {
|
||||
ContractType::Verifier { reusable: true } => {
|
||||
"verifier/reusable".to_string()
|
||||
},
|
||||
ContractType::Verifier {
|
||||
reusable: false,
|
||||
} => "verifier".to_string(),
|
||||
}
|
||||
ContractType::Verifier { reusable: false } => "verifier".to_string(),
|
||||
ContractType::VerifyingKeyArtifact => "vka".to_string(),
|
||||
}
|
||||
)
|
||||
@@ -241,7 +232,6 @@ impl From<&str> for ContractType {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
#[derive(Debug, Copy, Clone, Serialize, Deserialize, PartialEq, PartialOrd)]
|
||||
/// wrapper for H160 to make it easy to parse into flag vals
|
||||
pub struct H160Flag {
|
||||
@@ -287,9 +277,8 @@ impl IntoPy<PyObject> for CalibrationTarget {
|
||||
#[cfg(feature = "python-bindings")]
|
||||
/// Obtains CalibrationTarget from PyObject (Required for CalibrationTarget to be compatible with Python)
|
||||
impl<'source> FromPyObject<'source> for CalibrationTarget {
|
||||
fn extract(ob: &'source PyAny) -> PyResult<Self> {
|
||||
let trystr = <PyString as PyTryFrom>::try_from(ob)?;
|
||||
let strval = trystr.to_string();
|
||||
fn extract_bound(ob: &pyo3::Bound<'source, pyo3::PyAny>) -> PyResult<Self> {
|
||||
let strval = String::extract_bound(ob)?;
|
||||
match strval.to_lowercase().as_str() {
|
||||
"resources" => Ok(CalibrationTarget::Resources {
|
||||
col_overflow: false,
|
||||
@@ -306,12 +295,8 @@ impl<'source> FromPyObject<'source> for CalibrationTarget {
|
||||
impl IntoPy<PyObject> for ContractType {
|
||||
fn into_py(self, py: Python) -> PyObject {
|
||||
match self {
|
||||
ContractType::Verifier { reusable: true } => {
|
||||
"verifier/reusable".to_object(py)
|
||||
}
|
||||
ContractType::Verifier {
|
||||
reusable: false,
|
||||
} => "verifier".to_object(py),
|
||||
ContractType::Verifier { reusable: true } => "verifier/reusable".to_object(py),
|
||||
ContractType::Verifier { reusable: false } => "verifier".to_object(py),
|
||||
ContractType::VerifyingKeyArtifact => "vka".to_object(py),
|
||||
}
|
||||
}
|
||||
@@ -320,31 +305,16 @@ impl IntoPy<PyObject> for ContractType {
|
||||
#[cfg(feature = "python-bindings")]
|
||||
/// Obtains ContractType from PyObject (Required for ContractType to be compatible with Python)
|
||||
impl<'source> FromPyObject<'source> for ContractType {
|
||||
fn extract(ob: &'source PyAny) -> PyResult<Self> {
|
||||
let trystr = <PyString as PyTryFrom>::try_from(ob)?;
|
||||
let strval = trystr.to_string();
|
||||
fn extract_bound(ob: &pyo3::Bound<'source, pyo3::PyAny>) -> PyResult<Self> {
|
||||
let strval = String::extract_bound(ob)?;
|
||||
match strval.to_lowercase().as_str() {
|
||||
"verifier" => Ok(ContractType::Verifier {
|
||||
reusable: false,
|
||||
}),
|
||||
"verifier" => Ok(ContractType::Verifier { reusable: false }),
|
||||
"verifier/reusable" => Ok(ContractType::Verifier { reusable: true }),
|
||||
"vka" => Ok(ContractType::VerifyingKeyArtifact),
|
||||
_ => Err(PyValueError::new_err("Invalid value for ContractType")),
|
||||
}
|
||||
}
|
||||
}
|
||||
// not wasm
|
||||
use lazy_static::lazy_static;
|
||||
|
||||
// if CARGO VERSION is 0.0.0 replace with "source - no compatibility guaranteed"
|
||||
lazy_static! {
|
||||
/// The version of the ezkl library
|
||||
pub static ref VERSION: &'static str = if env!("CARGO_PKG_VERSION") == "0.0.0" {
|
||||
"source - no compatibility guaranteed"
|
||||
} else {
|
||||
env!("CARGO_PKG_VERSION")
|
||||
};
|
||||
}
|
||||
|
||||
/// Get the styles for the CLI
|
||||
pub fn get_styles() -> clap::builder::Styles {
|
||||
@@ -353,49 +323,49 @@ pub fn get_styles() -> clap::builder::Styles {
|
||||
clap::builder::styling::Style::new()
|
||||
.bold()
|
||||
.underline()
|
||||
.fg_color(Some(clap::builder::styling::Color::Ansi(clap::builder::styling::AnsiColor::Cyan))),
|
||||
.fg_color(Some(clap::builder::styling::Color::Ansi(
|
||||
clap::builder::styling::AnsiColor::Cyan,
|
||||
))),
|
||||
)
|
||||
.header(
|
||||
clap::builder::styling::Style::new()
|
||||
.bold()
|
||||
.underline()
|
||||
.fg_color(Some(clap::builder::styling::Color::Ansi(clap::builder::styling::AnsiColor::Cyan))),
|
||||
)
|
||||
.literal(
|
||||
clap::builder::styling::Style::new().fg_color(Some(clap::builder::styling::Color::Ansi(clap::builder::styling::AnsiColor::Magenta))),
|
||||
)
|
||||
.invalid(
|
||||
clap::builder::styling::Style::new()
|
||||
.bold()
|
||||
.fg_color(Some(clap::builder::styling::Color::Ansi(clap::builder::styling::AnsiColor::Red))),
|
||||
)
|
||||
.error(
|
||||
clap::builder::styling::Style::new()
|
||||
.bold()
|
||||
.fg_color(Some(clap::builder::styling::Color::Ansi(clap::builder::styling::AnsiColor::Red))),
|
||||
.fg_color(Some(clap::builder::styling::Color::Ansi(
|
||||
clap::builder::styling::AnsiColor::Cyan,
|
||||
))),
|
||||
)
|
||||
.literal(clap::builder::styling::Style::new().fg_color(Some(
|
||||
clap::builder::styling::Color::Ansi(clap::builder::styling::AnsiColor::Magenta),
|
||||
)))
|
||||
.invalid(clap::builder::styling::Style::new().bold().fg_color(Some(
|
||||
clap::builder::styling::Color::Ansi(clap::builder::styling::AnsiColor::Red),
|
||||
)))
|
||||
.error(clap::builder::styling::Style::new().bold().fg_color(Some(
|
||||
clap::builder::styling::Color::Ansi(clap::builder::styling::AnsiColor::Red),
|
||||
)))
|
||||
.valid(
|
||||
clap::builder::styling::Style::new()
|
||||
.bold()
|
||||
.underline()
|
||||
.fg_color(Some(clap::builder::styling::Color::Ansi(clap::builder::styling::AnsiColor::Green))),
|
||||
)
|
||||
.placeholder(
|
||||
clap::builder::styling::Style::new().fg_color(Some(clap::builder::styling::Color::Ansi(clap::builder::styling::AnsiColor::White))),
|
||||
.fg_color(Some(clap::builder::styling::Color::Ansi(
|
||||
clap::builder::styling::AnsiColor::Green,
|
||||
))),
|
||||
)
|
||||
.placeholder(clap::builder::styling::Style::new().fg_color(Some(
|
||||
clap::builder::styling::Color::Ansi(clap::builder::styling::AnsiColor::White),
|
||||
)))
|
||||
}
|
||||
|
||||
|
||||
/// Print completions for the given generator
|
||||
pub fn print_completions<G: Generator>(gen: G, cmd: &mut Command) {
|
||||
generate(gen, cmd, cmd.get_name().to_string(), &mut std::io::stdout());
|
||||
}
|
||||
|
||||
|
||||
#[allow(missing_docs)]
|
||||
#[derive(Parser, Debug, Clone)]
|
||||
#[command(author, about, long_about = None)]
|
||||
#[clap(version = *VERSION, styles = get_styles(), trailing_var_arg = true)]
|
||||
#[clap(version = crate::version(), styles = get_styles(), trailing_var_arg = true)]
|
||||
pub struct Cli {
|
||||
/// If provided, outputs the completion file for given shell
|
||||
#[clap(long = "generate", value_parser)]
|
||||
@@ -405,7 +375,6 @@ pub struct Cli {
|
||||
pub command: Option<Commands>,
|
||||
}
|
||||
|
||||
|
||||
#[allow(missing_docs)]
|
||||
#[derive(Debug, Subcommand, Clone, Deserialize, Serialize, PartialEq, PartialOrd, ToSubcommand)]
|
||||
pub enum Commands {
|
||||
@@ -455,7 +424,7 @@ pub enum Commands {
|
||||
},
|
||||
|
||||
/// Calibrates the proving scale, lookup bits and logrows from a circuit settings file.
|
||||
CalibrateSettings {
|
||||
CalibrateSettings {
|
||||
/// The path to the .json calibration data file.
|
||||
#[arg(short = 'D', long, default_value = DEFAULT_CALIBRATION_FILE, value_hint = clap::ValueHint::FilePath)]
|
||||
data: Option<PathBuf>,
|
||||
@@ -486,9 +455,6 @@ pub enum Commands {
|
||||
/// max logrows to use for calibration, 26 is the max public SRS size
|
||||
#[arg(long, value_hint = clap::ValueHint::Other)]
|
||||
max_logrows: Option<u32>,
|
||||
// whether to only range check rebases (instead of trying both range check and lookup)
|
||||
#[arg(long, default_value = DEFAULT_ONLY_RANGE_CHECK_REBASE, action = clap::ArgAction::SetTrue)]
|
||||
only_range_check_rebase: Option<bool>,
|
||||
},
|
||||
|
||||
/// Generates a dummy SRS
|
||||
@@ -505,7 +471,7 @@ pub enum Commands {
|
||||
commitment: Option<Commitments>,
|
||||
},
|
||||
|
||||
/// Gets an SRS from a circuit settings file.
|
||||
/// Gets an SRS from a circuit settings file.
|
||||
#[command(name = "get-srs")]
|
||||
GetSrs {
|
||||
/// The path to output the desired srs file, if set to None will save to ~/.ezkl/srs
|
||||
@@ -590,7 +556,7 @@ pub enum Commands {
|
||||
require_equals = true,
|
||||
num_args = 0..=1,
|
||||
default_value_t = TranscriptType::default(),
|
||||
value_enum,
|
||||
value_enum,
|
||||
value_hint = clap::ValueHint::Other
|
||||
)]
|
||||
transcript: TranscriptType,
|
||||
@@ -640,7 +606,7 @@ pub enum Commands {
|
||||
#[arg(long, default_value = DEFAULT_DISABLE_SELECTOR_COMPRESSION, action = clap::ArgAction::SetTrue)]
|
||||
disable_selector_compression: Option<bool>,
|
||||
},
|
||||
/// Deploys a test contact that the data attester reads from and creates a data attestation formatted input.json file that contains call data information
|
||||
/// Deploys a test contact that the data attester reads from and creates a data attestation formatted input.json file that contains call data information
|
||||
#[command(arg_required_else_help = true)]
|
||||
SetupTestEvmData {
|
||||
/// The path to the .json data file, which should include both the network input (possibly private) and the network output (public input to the proof)
|
||||
@@ -664,7 +630,7 @@ pub enum Commands {
|
||||
#[arg(long, default_value = "on-chain", value_hint = clap::ValueHint::Other)]
|
||||
output_source: TestDataSource,
|
||||
},
|
||||
/// The Data Attestation Verifier contract stores the account calls to fetch data to feed into ezkl. This call data can be updated by an admin account. This tests that admin account is able to update this call data.
|
||||
/// The Data Attestation Verifier contract stores the account calls to fetch data to feed into ezkl. This call data can be updated by an admin account. This tests that admin account is able to update this call data.
|
||||
#[command(arg_required_else_help = true)]
|
||||
TestUpdateAccountCalls {
|
||||
/// The path to the verifier contract's address
|
||||
@@ -677,7 +643,7 @@ pub enum Commands {
|
||||
#[arg(short = 'U', long, value_hint = clap::ValueHint::Url)]
|
||||
rpc_url: Option<String>,
|
||||
},
|
||||
/// Swaps the positions in the transcript that correspond to commitments
|
||||
/// Swaps the positions in the transcript that correspond to commitments
|
||||
SwapProofCommitments {
|
||||
/// The path to the proof file
|
||||
#[arg(short = 'P', long, default_value = DEFAULT_PROOF, value_hint = clap::ValueHint::FilePath)]
|
||||
@@ -687,7 +653,7 @@ pub enum Commands {
|
||||
witness_path: Option<PathBuf>,
|
||||
},
|
||||
|
||||
/// Loads model, data, and creates proof
|
||||
/// Loads model, data, and creates proof
|
||||
Prove {
|
||||
/// The path to the .json witness file (generated using the gen-witness command)
|
||||
#[arg(short = 'W', long, default_value = DEFAULT_WITNESS, value_hint = clap::ValueHint::FilePath)]
|
||||
@@ -709,7 +675,7 @@ pub enum Commands {
|
||||
require_equals = true,
|
||||
num_args = 0..=1,
|
||||
default_value_t = ProofType::Single,
|
||||
value_enum,
|
||||
value_enum,
|
||||
value_hint = clap::ValueHint::Other
|
||||
)]
|
||||
proof_type: ProofType,
|
||||
@@ -717,7 +683,7 @@ pub enum Commands {
|
||||
#[arg(long, default_value = DEFAULT_CHECKMODE, value_hint = clap::ValueHint::Other)]
|
||||
check_mode: Option<CheckMode>,
|
||||
},
|
||||
/// Encodes a proof into evm calldata
|
||||
/// Encodes a proof into evm calldata
|
||||
#[command(name = "encode-evm-calldata")]
|
||||
EncodeEvmCalldata {
|
||||
/// The path to the proof file (generated using the prove command)
|
||||
@@ -730,7 +696,7 @@ pub enum Commands {
|
||||
#[arg(long, value_hint = clap::ValueHint::Other)]
|
||||
addr_vk: Option<H160Flag>,
|
||||
},
|
||||
/// Creates an Evm verifier for a single proof
|
||||
/// Creates an Evm verifier for a single proof
|
||||
#[command(name = "create-evm-verifier")]
|
||||
CreateEvmVerifier {
|
||||
/// The path to SRS, if None will use ~/.ezkl/srs/kzg{logrows}.srs
|
||||
@@ -752,7 +718,7 @@ pub enum Commands {
|
||||
#[arg(long, default_value = DEFAULT_RENDER_REUSABLE, action = clap::ArgAction::SetTrue)]
|
||||
reusable: Option<bool>,
|
||||
},
|
||||
/// Creates an Evm verifier artifact for a single proof to be used by the reusable verifier
|
||||
/// Creates an Evm verifier artifact for a single proof to be used by the reusable verifier
|
||||
#[command(name = "create-evm-vka")]
|
||||
CreateEvmVKArtifact {
|
||||
/// The path to SRS, if None will use ~/.ezkl/srs/kzg{logrows}.srs
|
||||
@@ -771,7 +737,7 @@ pub enum Commands {
|
||||
#[arg(long, default_value = DEFAULT_VK_ABI, value_hint = clap::ValueHint::FilePath)]
|
||||
abi_path: Option<PathBuf>,
|
||||
},
|
||||
/// Creates an Evm verifier that attests to on-chain inputs for a single proof
|
||||
/// Creates an Evm verifier that attests to on-chain inputs for a single proof
|
||||
#[command(name = "create-evm-da")]
|
||||
CreateEvmDataAttestation {
|
||||
/// The path to load circuit settings .json file from (generated using the gen-settings command)
|
||||
@@ -795,7 +761,7 @@ pub enum Commands {
|
||||
witness: Option<PathBuf>,
|
||||
},
|
||||
|
||||
/// Creates an Evm verifier for an aggregate proof
|
||||
/// Creates an Evm verifier for an aggregate proof
|
||||
#[command(name = "create-evm-verifier-aggr")]
|
||||
CreateEvmVerifierAggr {
|
||||
/// The path to SRS, if None will use ~/.ezkl/srs/kzg{logrows}.srs
|
||||
@@ -859,7 +825,7 @@ pub enum Commands {
|
||||
#[arg(long, default_value = DEFAULT_COMMITMENT, value_hint = clap::ValueHint::Other)]
|
||||
commitment: Option<Commitments>,
|
||||
},
|
||||
/// Deploys an evm contract (verifier, reusable verifier, or vk artifact) that is generated by ezkl
|
||||
/// Deploys an evm contract (verifier, reusable verifier, or vk artifact) that is generated by ezkl
|
||||
DeployEvm {
|
||||
/// The path to the Solidity code (generated using the create-evm-verifier command)
|
||||
#[arg(long, default_value = DEFAULT_SOL_CODE, value_hint = clap::ValueHint::FilePath)]
|
||||
@@ -880,7 +846,7 @@ pub enum Commands {
|
||||
#[arg(long = "contract-type", short = 'C', default_value = DEFAULT_CONTRACT_DEPLOYMENT_TYPE, value_hint = clap::ValueHint::Other)]
|
||||
contract: ContractType,
|
||||
},
|
||||
/// Deploys an evm verifier that allows for data attestation
|
||||
/// Deploys an evm verifier that allows for data attestation
|
||||
#[command(name = "deploy-evm-da")]
|
||||
DeployEvmDataAttestation {
|
||||
/// The path to the .json data file, which should include both the network input (possibly private) and the network output (public input to the proof)
|
||||
@@ -905,7 +871,7 @@ pub enum Commands {
|
||||
#[arg(short = 'P', long, value_hint = clap::ValueHint::Other)]
|
||||
private_key: Option<String>,
|
||||
},
|
||||
/// Verifies a proof using a local Evm executor, returning accept or reject
|
||||
/// Verifies a proof using a local Evm executor, returning accept or reject
|
||||
#[command(name = "verify-evm")]
|
||||
VerifyEvm {
|
||||
/// The path to the proof file (generated using the prove command)
|
||||
@@ -933,7 +899,6 @@ pub enum Commands {
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
impl Commands {
|
||||
/// Converts the commands to a json string
|
||||
pub fn as_json(&self) -> String {
|
||||
@@ -944,4 +909,4 @@ impl Commands {
|
||||
pub fn from_json(json: &str) -> Self {
|
||||
serde_json::from_str(json).unwrap()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
390
src/eth.rs
390
src/eth.rs
File diff suppressed because one or more lines are too long
113
src/execute.rs
113
src/execute.rs
@@ -1,10 +1,13 @@
|
||||
use crate::circuit::region::RegionSettings;
|
||||
use crate::circuit::CheckMode;
|
||||
use crate::commands::CalibrationTarget;
|
||||
use crate::eth::{deploy_contract_via_solidity, deploy_da_verifier_via_solidity};
|
||||
use crate::eth::{
|
||||
deploy_contract_via_solidity, deploy_da_verifier_via_solidity, fix_da_multi_sol,
|
||||
fix_da_single_sol,
|
||||
};
|
||||
#[allow(unused_imports)]
|
||||
use crate::eth::{fix_da_sol, get_contract_artifacts, verify_proof_via_solidity};
|
||||
use crate::graph::input::GraphData;
|
||||
use crate::eth::{get_contract_artifacts, verify_proof_via_solidity};
|
||||
use crate::graph::input::{Calls, GraphData};
|
||||
use crate::graph::{GraphCircuit, GraphSettings, GraphWitness, Model};
|
||||
use crate::graph::{TestDataSource, TestSources};
|
||||
use crate::pfsys::evm::aggregation_kzg::{AggregationCircuit, PoseidonTranscript};
|
||||
@@ -140,7 +143,6 @@ pub async fn run(command: Commands) -> Result<String, EZKLError> {
|
||||
scales,
|
||||
scale_rebase_multiplier,
|
||||
max_logrows,
|
||||
only_range_check_rebase,
|
||||
} => calibrate(
|
||||
model.unwrap_or(DEFAULT_MODEL.into()),
|
||||
data.unwrap_or(DEFAULT_DATA.into()),
|
||||
@@ -149,7 +151,6 @@ pub async fn run(command: Commands) -> Result<String, EZKLError> {
|
||||
lookup_safety_margin,
|
||||
scales,
|
||||
scale_rebase_multiplier,
|
||||
only_range_check_rebase.unwrap_or(DEFAULT_ONLY_RANGE_CHECK_REBASE.parse().unwrap()),
|
||||
max_logrows,
|
||||
)
|
||||
.await
|
||||
@@ -671,10 +672,10 @@ pub(crate) async fn get_srs_cmd(
|
||||
let srs_uri = format!("{}{}", PUBLIC_SRS_URL, k);
|
||||
let mut reader = Cursor::new(fetch_srs(&srs_uri).await?);
|
||||
// check the SRS
|
||||
let pb = init_spinner();
|
||||
pb.set_message("Validating SRS (this may take a while) ...");
|
||||
let pb = init_spinner();
|
||||
pb.set_message("Validating SRS (this may take a while) ...");
|
||||
let params = ParamsKZG::<Bn256>::read(&mut reader)?;
|
||||
pb.finish_with_message("SRS validated.");
|
||||
pb.finish_with_message("SRS validated.");
|
||||
|
||||
info!("Saving SRS to disk...");
|
||||
let computed_srs_path = get_srs_path(k, srs_path.clone(), commitment);
|
||||
@@ -682,7 +683,10 @@ pub(crate) async fn get_srs_cmd(
|
||||
let mut buffer = BufWriter::with_capacity(*EZKL_BUF_CAPACITY, &mut file);
|
||||
params.write(&mut buffer)?;
|
||||
|
||||
info!("Saved SRS to {}.", computed_srs_path.as_os_str().to_str().unwrap_or("disk"));
|
||||
info!(
|
||||
"Saved SRS to {}.",
|
||||
computed_srs_path.as_os_str().to_str().unwrap_or("disk")
|
||||
);
|
||||
|
||||
info!("SRS downloaded");
|
||||
} else {
|
||||
@@ -728,7 +732,7 @@ pub(crate) async fn gen_witness(
|
||||
None
|
||||
};
|
||||
|
||||
let mut input = circuit.load_graph_input(&data).await?;
|
||||
let mut input = circuit.load_graph_input(&data).await?;
|
||||
#[cfg(any(not(feature = "ezkl"), target_arch = "wasm32"))]
|
||||
let mut input = circuit.load_graph_input(&data)?;
|
||||
|
||||
@@ -968,7 +972,6 @@ pub(crate) async fn calibrate(
|
||||
lookup_safety_margin: f64,
|
||||
scales: Option<Vec<crate::Scale>>,
|
||||
scale_rebase_multiplier: Vec<u32>,
|
||||
only_range_check_rebase: bool,
|
||||
max_logrows: Option<u32>,
|
||||
) -> Result<GraphSettings, EZKLError> {
|
||||
use log::error;
|
||||
@@ -1004,12 +1007,6 @@ pub(crate) async fn calibrate(
|
||||
(11..14).collect::<Vec<crate::Scale>>()
|
||||
};
|
||||
|
||||
let div_rebasing = if only_range_check_rebase {
|
||||
vec![false]
|
||||
} else {
|
||||
vec![true, false]
|
||||
};
|
||||
|
||||
let mut found_params: Vec<GraphSettings> = vec![];
|
||||
|
||||
// 2 x 2 grid
|
||||
@@ -1047,12 +1044,6 @@ pub(crate) async fn calibrate(
|
||||
.map(|(a, b)| (*a, *b))
|
||||
.collect::<Vec<((crate::Scale, crate::Scale), u32)>>();
|
||||
|
||||
let range_grid = range_grid
|
||||
.iter()
|
||||
.cartesian_product(div_rebasing.iter())
|
||||
.map(|(a, b)| (*a, *b))
|
||||
.collect::<Vec<(((crate::Scale, crate::Scale), u32), bool)>>();
|
||||
|
||||
let mut forward_pass_res = HashMap::new();
|
||||
|
||||
let pb = init_bar(range_grid.len() as u64);
|
||||
@@ -1061,30 +1052,23 @@ pub(crate) async fn calibrate(
|
||||
let mut num_failed = 0;
|
||||
let mut num_passed = 0;
|
||||
|
||||
for (((input_scale, param_scale), scale_rebase_multiplier), div_rebasing) in range_grid {
|
||||
for ((input_scale, param_scale), scale_rebase_multiplier) in range_grid {
|
||||
pb.set_message(format!(
|
||||
"i-scale: {}, p-scale: {}, rebase-(x): {}, div-rebase: {}, fail: {}, pass: {}",
|
||||
"i-scale: {}, p-scale: {}, rebase-(x): {}, fail: {}, pass: {}",
|
||||
input_scale.to_string().blue(),
|
||||
param_scale.to_string().blue(),
|
||||
scale_rebase_multiplier.to_string().blue(),
|
||||
div_rebasing.to_string().yellow(),
|
||||
scale_rebase_multiplier.to_string().yellow(),
|
||||
num_failed.to_string().red(),
|
||||
num_passed.to_string().green()
|
||||
));
|
||||
|
||||
let key = (
|
||||
input_scale,
|
||||
param_scale,
|
||||
scale_rebase_multiplier,
|
||||
div_rebasing,
|
||||
);
|
||||
let key = (input_scale, param_scale, scale_rebase_multiplier);
|
||||
forward_pass_res.insert(key, vec![]);
|
||||
|
||||
let local_run_args = RunArgs {
|
||||
input_scale,
|
||||
param_scale,
|
||||
scale_rebase_multiplier,
|
||||
div_rebasing,
|
||||
lookup_range: (IntegerRep::MIN, IntegerRep::MAX),
|
||||
..settings.run_args.clone()
|
||||
};
|
||||
@@ -1188,7 +1172,6 @@ pub(crate) async fn calibrate(
|
||||
let found_run_args = RunArgs {
|
||||
input_scale: new_settings.run_args.input_scale,
|
||||
param_scale: new_settings.run_args.param_scale,
|
||||
div_rebasing: new_settings.run_args.div_rebasing,
|
||||
lookup_range: new_settings.run_args.lookup_range,
|
||||
logrows: new_settings.run_args.logrows,
|
||||
scale_rebase_multiplier: new_settings.run_args.scale_rebase_multiplier,
|
||||
@@ -1296,7 +1279,6 @@ pub(crate) async fn calibrate(
|
||||
best_params.run_args.input_scale,
|
||||
best_params.run_args.param_scale,
|
||||
best_params.run_args.scale_rebase_multiplier,
|
||||
best_params.run_args.div_rebasing,
|
||||
))
|
||||
.ok_or("no params found")?
|
||||
.iter()
|
||||
@@ -1477,13 +1459,24 @@ pub(crate) async fn create_evm_data_attestation(
|
||||
// if input is not provided, we just instantiate dummy input data
|
||||
let data = GraphData::from_path(input).unwrap_or(GraphData::new(DataSource::File(vec![])));
|
||||
|
||||
// The number of input and output instances we attest to for the single call data attestation
|
||||
let mut input_len = None;
|
||||
let mut output_len = None;
|
||||
|
||||
let output_data = if let Some(DataSource::OnChain(source)) = data.output_data {
|
||||
if visibility.output.is_private() {
|
||||
return Err("private output data on chain is not supported on chain".into());
|
||||
}
|
||||
let mut on_chain_output_data = vec![];
|
||||
for call in source.calls {
|
||||
on_chain_output_data.push(call);
|
||||
match source.calls {
|
||||
Calls::Multiple(calls) => {
|
||||
for call in calls {
|
||||
on_chain_output_data.push(call);
|
||||
}
|
||||
}
|
||||
Calls::Single(call) => {
|
||||
output_len = Some(call.len);
|
||||
}
|
||||
}
|
||||
Some(on_chain_output_data)
|
||||
} else {
|
||||
@@ -1495,8 +1488,15 @@ pub(crate) async fn create_evm_data_attestation(
|
||||
return Err("private input data on chain is not supported on chain".into());
|
||||
}
|
||||
let mut on_chain_input_data = vec![];
|
||||
for call in source.calls {
|
||||
on_chain_input_data.push(call);
|
||||
match source.calls {
|
||||
Calls::Multiple(calls) => {
|
||||
for call in calls {
|
||||
on_chain_input_data.push(call);
|
||||
}
|
||||
}
|
||||
Calls::Single(call) => {
|
||||
input_len = Some(call.len);
|
||||
}
|
||||
}
|
||||
Some(on_chain_input_data)
|
||||
} else {
|
||||
@@ -1523,13 +1523,24 @@ pub(crate) async fn create_evm_data_attestation(
|
||||
None
|
||||
};
|
||||
|
||||
let output = fix_da_sol(input_data, output_data, commitment_bytes)?;
|
||||
let mut f = File::create(sol_code_path.clone())?;
|
||||
let _ = f.write(output.as_bytes());
|
||||
// fetch abi of the contract
|
||||
let (abi, _, _) = get_contract_artifacts(sol_code_path, "DataAttestation", 0).await?;
|
||||
// save abi to file
|
||||
serde_json::to_writer(std::fs::File::create(abi_path)?, &abi)?;
|
||||
// if either input_len or output_len is Some then we are in the single call data attestation mode
|
||||
if input_len.is_some() || output_len.is_some() {
|
||||
let output = fix_da_single_sol(input_len, output_len)?;
|
||||
let mut f = File::create(sol_code_path.clone())?;
|
||||
let _ = f.write(output.as_bytes());
|
||||
// fetch abi of the contract
|
||||
let (abi, _, _) = get_contract_artifacts(sol_code_path, "DataAttestationSingle", 0).await?;
|
||||
// save abi to file
|
||||
serde_json::to_writer(std::fs::File::create(abi_path)?, &abi)?;
|
||||
} else {
|
||||
let output = fix_da_multi_sol(input_data, output_data, commitment_bytes)?;
|
||||
let mut f = File::create(sol_code_path.clone())?;
|
||||
let _ = f.write(output.as_bytes());
|
||||
// fetch abi of the contract
|
||||
let (abi, _, _) = get_contract_artifacts(sol_code_path, "DataAttestationMulti", 0).await?;
|
||||
// save abi to file
|
||||
serde_json::to_writer(std::fs::File::create(abi_path)?, &abi)?;
|
||||
}
|
||||
|
||||
Ok(String::new())
|
||||
}
|
||||
@@ -2022,7 +2033,7 @@ pub(crate) fn mock_aggregate(
|
||||
}
|
||||
}
|
||||
// proof aggregation
|
||||
let pb = {
|
||||
let pb = {
|
||||
let pb = init_spinner();
|
||||
pb.set_message("Aggregating (may take a while)...");
|
||||
pb
|
||||
@@ -2033,7 +2044,7 @@ pub(crate) fn mock_aggregate(
|
||||
let prover = halo2_proofs::dev::MockProver::run(logrows, &circuit, vec![circuit.instances()])
|
||||
.map_err(|e| ExecutionError::MockProverError(e.to_string()))?;
|
||||
prover.verify().map_err(ExecutionError::VerifyError)?;
|
||||
pb.finish_with_message("Done.");
|
||||
pb.finish_with_message("Done.");
|
||||
Ok(String::new())
|
||||
}
|
||||
|
||||
@@ -2127,7 +2138,7 @@ pub(crate) fn aggregate(
|
||||
}
|
||||
|
||||
// proof aggregation
|
||||
let pb = {
|
||||
let pb = {
|
||||
let pb = init_spinner();
|
||||
pb.set_message("Aggregating (may take a while)...");
|
||||
pb
|
||||
@@ -2276,7 +2287,7 @@ pub(crate) fn aggregate(
|
||||
);
|
||||
snark.save(&proof_path)?;
|
||||
|
||||
pb.finish_with_message("Done.");
|
||||
pb.finish_with_message("Done.");
|
||||
|
||||
Ok(snark)
|
||||
}
|
||||
|
||||
@@ -128,7 +128,9 @@ impl FileSourceInner {
|
||||
/// Convert to a field element
|
||||
pub fn to_field(&self, scale: crate::Scale) -> Fp {
|
||||
match self {
|
||||
FileSourceInner::Float(f) => integer_rep_to_felt(quantize_float(f, 0.0, scale).unwrap()),
|
||||
FileSourceInner::Float(f) => {
|
||||
integer_rep_to_felt(quantize_float(f, 0.0, scale).unwrap())
|
||||
}
|
||||
FileSourceInner::Bool(f) => {
|
||||
if *f {
|
||||
Fp::one()
|
||||
@@ -155,19 +157,80 @@ impl FileSourceInner {
|
||||
}
|
||||
}
|
||||
|
||||
/// Call type for attested inputs on-chain
|
||||
#[derive(Clone, Debug, PartialOrd, PartialEq)]
|
||||
pub enum Calls {
|
||||
/// Vector of calls to accounts, each returning an attested data point
|
||||
Multiple(Vec<CallsToAccount>),
|
||||
/// Single call to account, returning an array of attested data points
|
||||
Single(CallToAccount),
|
||||
}
|
||||
|
||||
impl Default for Calls {
|
||||
fn default() -> Self {
|
||||
Calls::Multiple(Vec::new())
|
||||
}
|
||||
}
|
||||
/// Inner elements of inputs/outputs coming from on-chain
|
||||
#[derive(Clone, Debug, Deserialize, Serialize, Default, PartialOrd, PartialEq)]
|
||||
pub struct OnChainSource {
|
||||
/// Vector of calls to accounts
|
||||
pub calls: Vec<CallsToAccount>,
|
||||
/// Calls to accounts
|
||||
pub calls: Calls,
|
||||
/// RPC url
|
||||
pub rpc: RPCUrl,
|
||||
}
|
||||
|
||||
impl OnChainSource {
|
||||
/// Create a new OnChainSource
|
||||
pub fn new(calls: Vec<CallsToAccount>, rpc: RPCUrl) -> Self {
|
||||
OnChainSource { calls, rpc }
|
||||
/// Create a new OnChainSource with multiple calls
|
||||
pub fn new_multiple(calls: Vec<CallsToAccount>, rpc: RPCUrl) -> Self {
|
||||
OnChainSource {
|
||||
calls: Calls::Multiple(calls),
|
||||
rpc,
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a new OnChainSource with a single call
|
||||
pub fn new_single(call: CallToAccount, rpc: RPCUrl) -> Self {
|
||||
OnChainSource {
|
||||
calls: Calls::Single(call),
|
||||
rpc,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Serialize for Calls {
|
||||
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: Serializer,
|
||||
{
|
||||
match self {
|
||||
Calls::Single(data) => data.serialize(serializer),
|
||||
Calls::Multiple(data) => data.serialize(serializer),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// !!! ALWAYS USE JSON SERIALIZATION FOR GRAPH INPUT
|
||||
// UNTAGGED ENUMS WONT WORK :( as highlighted here:
|
||||
impl<'de> Deserialize<'de> for Calls {
|
||||
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
|
||||
where
|
||||
D: Deserializer<'de>,
|
||||
{
|
||||
let this_json: Box<serde_json::value::RawValue> = Deserialize::deserialize(deserializer)?;
|
||||
|
||||
let multiple_try: Result<Vec<CallsToAccount>, _> = serde_json::from_str(this_json.get());
|
||||
if let Ok(t) = multiple_try {
|
||||
return Ok(Calls::Multiple(t));
|
||||
}
|
||||
let single_try: Result<CallToAccount, _> = serde_json::from_str(this_json.get());
|
||||
if let Ok(t) = single_try {
|
||||
return Ok(Calls::Single(t));
|
||||
}
|
||||
|
||||
Err(serde::de::Error::custom(
|
||||
"failed to deserialize FileSourceInner",
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -277,7 +340,8 @@ impl OnChainSource {
|
||||
rpc: Option<&str>,
|
||||
) -> Result<(Vec<Tensor<Fp>>, Self), GraphError> {
|
||||
use crate::eth::{
|
||||
evm_quantize, read_on_chain_inputs, test_on_chain_data, DEFAULT_ANVIL_ENDPOINT,
|
||||
evm_quantize_multi, read_on_chain_inputs_multi, test_on_chain_data,
|
||||
DEFAULT_ANVIL_ENDPOINT,
|
||||
};
|
||||
use log::debug;
|
||||
|
||||
@@ -296,7 +360,7 @@ impl OnChainSource {
|
||||
let calls_to_accounts = test_on_chain_data(client.clone(), data).await?;
|
||||
debug!("Calls to accounts: {:?}", calls_to_accounts);
|
||||
let inputs =
|
||||
read_on_chain_inputs(client.clone(), client_address, &calls_to_accounts).await?;
|
||||
read_on_chain_inputs_multi(client.clone(), client_address, &calls_to_accounts).await?;
|
||||
debug!("Inputs: {:?}", inputs);
|
||||
|
||||
let mut quantized_evm_inputs = vec![];
|
||||
@@ -304,7 +368,7 @@ impl OnChainSource {
|
||||
let mut prev = 0;
|
||||
for (idx, i) in data.iter().enumerate() {
|
||||
quantized_evm_inputs.extend(
|
||||
evm_quantize(
|
||||
evm_quantize_multi(
|
||||
client.clone(),
|
||||
vec![scales[idx]; i.len()],
|
||||
&(
|
||||
@@ -330,7 +394,7 @@ impl OnChainSource {
|
||||
// Fill the input_data field of the GraphData struct
|
||||
Ok((
|
||||
inputs,
|
||||
OnChainSource::new(calls_to_accounts.clone(), used_rpc),
|
||||
OnChainSource::new_multiple(calls_to_accounts.clone(), used_rpc),
|
||||
))
|
||||
}
|
||||
}
|
||||
@@ -350,6 +414,24 @@ pub struct CallsToAccount {
|
||||
/// Address of the contract to read the data from.
|
||||
pub address: String,
|
||||
}
|
||||
|
||||
/// Defines a view only call to accounts to fetch the on-chain input data.
|
||||
/// This data will be included as part of the first elements in the publicInputs
|
||||
/// for the sol evm verifier and will be verifyWithDataAttestation.sol
|
||||
#[derive(Clone, Debug, Deserialize, Serialize, Default, PartialOrd, PartialEq)]
|
||||
pub struct CallToAccount {
|
||||
/// The call_data is a byte strings representing the ABI encoded function call to
|
||||
/// read the data from the address. This call must return a single array of integers that can be
|
||||
/// be safely cast to the int128 type in solidity.
|
||||
pub call_data: Call,
|
||||
/// The number of decimals for f32 conversion of all of the elements returned from the
|
||||
/// call.
|
||||
pub decimals: Decimals,
|
||||
/// Address of the contract to read the data from.
|
||||
pub address: String,
|
||||
/// The number of elements returned from the call.
|
||||
pub len: usize,
|
||||
}
|
||||
/// Enum that defines source of the inputs/outputs to the EZKL model
|
||||
#[derive(Clone, Debug, Serialize, PartialOrd, PartialEq)]
|
||||
#[serde(untagged)]
|
||||
@@ -600,6 +682,28 @@ impl ToPyObject for CallsToAccount {
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "python-bindings")]
|
||||
impl ToPyObject for CallToAccount {
|
||||
fn to_object(&self, py: Python) -> PyObject {
|
||||
let dict = PyDict::new(py);
|
||||
dict.set_item("account", &self.address).unwrap();
|
||||
dict.set_item("call_data", &self.call_data).unwrap();
|
||||
dict.set_item("decimals", &self.decimals).unwrap();
|
||||
dict.set_item("len", &self.len).unwrap();
|
||||
dict.to_object(py)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "python-bindings")]
|
||||
impl ToPyObject for Calls {
|
||||
fn to_object(&self, py: Python) -> PyObject {
|
||||
match self {
|
||||
Calls::Multiple(calls) => calls.to_object(py),
|
||||
Calls::Single(call) => call.to_object(py),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "python-bindings")]
|
||||
impl ToPyObject for DataSource {
|
||||
fn to_object(&self, py: Python) -> PyObject {
|
||||
@@ -608,7 +712,8 @@ impl ToPyObject for DataSource {
|
||||
DataSource::OnChain(source) => {
|
||||
let dict = PyDict::new(py);
|
||||
dict.set_item("rpc_url", &source.rpc).unwrap();
|
||||
dict.set_item("calls_to_accounts", &source.calls).unwrap();
|
||||
dict.set_item("calls_to_accounts", &source.calls.to_object(py))
|
||||
.unwrap();
|
||||
dict.to_object(py)
|
||||
}
|
||||
DataSource::DB(source) => {
|
||||
|
||||
@@ -60,7 +60,10 @@ use pyo3::prelude::*;
|
||||
#[cfg(feature = "python-bindings")]
|
||||
use pyo3::types::PyDict;
|
||||
#[cfg(feature = "python-bindings")]
|
||||
use pyo3::types::PyDictMethods;
|
||||
#[cfg(feature = "python-bindings")]
|
||||
use pyo3::ToPyObject;
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::ops::Deref;
|
||||
pub use utilities::*;
|
||||
@@ -133,6 +136,8 @@ pub struct GraphWitness {
|
||||
pub min_lookup_inputs: IntegerRep,
|
||||
/// max range check size
|
||||
pub max_range_size: IntegerRep,
|
||||
/// (optional) version of ezkl used
|
||||
pub version: Option<String>,
|
||||
}
|
||||
|
||||
impl GraphWitness {
|
||||
@@ -161,6 +166,7 @@ impl GraphWitness {
|
||||
max_lookup_inputs: 0,
|
||||
min_lookup_inputs: 0,
|
||||
max_range_size: 0,
|
||||
version: None,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -340,10 +346,10 @@ impl ToPyObject for GraphWitness {
|
||||
if let Some(processed_inputs) = &self.processed_inputs {
|
||||
//poseidon_hash
|
||||
if let Some(processed_inputs_poseidon_hash) = &processed_inputs.poseidon_hash {
|
||||
insert_poseidon_hash_pydict(dict_inputs, processed_inputs_poseidon_hash).unwrap();
|
||||
insert_poseidon_hash_pydict(&dict_inputs, processed_inputs_poseidon_hash).unwrap();
|
||||
}
|
||||
if let Some(processed_inputs_polycommit) = &processed_inputs.polycommit {
|
||||
insert_polycommit_pydict(dict_inputs, processed_inputs_polycommit).unwrap();
|
||||
insert_polycommit_pydict(&dict_inputs, processed_inputs_polycommit).unwrap();
|
||||
}
|
||||
|
||||
dict.set_item("processed_inputs", dict_inputs).unwrap();
|
||||
@@ -351,10 +357,10 @@ impl ToPyObject for GraphWitness {
|
||||
|
||||
if let Some(processed_params) = &self.processed_params {
|
||||
if let Some(processed_params_poseidon_hash) = &processed_params.poseidon_hash {
|
||||
insert_poseidon_hash_pydict(dict_params, processed_params_poseidon_hash).unwrap();
|
||||
insert_poseidon_hash_pydict(&dict_params, processed_params_poseidon_hash).unwrap();
|
||||
}
|
||||
if let Some(processed_params_polycommit) = &processed_params.polycommit {
|
||||
insert_polycommit_pydict(dict_inputs, processed_params_polycommit).unwrap();
|
||||
insert_polycommit_pydict(&dict_params, processed_params_polycommit).unwrap();
|
||||
}
|
||||
|
||||
dict.set_item("processed_params", dict_params).unwrap();
|
||||
@@ -362,10 +368,11 @@ impl ToPyObject for GraphWitness {
|
||||
|
||||
if let Some(processed_outputs) = &self.processed_outputs {
|
||||
if let Some(processed_outputs_poseidon_hash) = &processed_outputs.poseidon_hash {
|
||||
insert_poseidon_hash_pydict(dict_outputs, processed_outputs_poseidon_hash).unwrap();
|
||||
insert_poseidon_hash_pydict(&dict_outputs, processed_outputs_poseidon_hash)
|
||||
.unwrap();
|
||||
}
|
||||
if let Some(processed_outputs_polycommit) = &processed_outputs.polycommit {
|
||||
insert_polycommit_pydict(dict_inputs, processed_outputs_polycommit).unwrap();
|
||||
insert_polycommit_pydict(&dict_outputs, processed_outputs_polycommit).unwrap();
|
||||
}
|
||||
|
||||
dict.set_item("processed_outputs", dict_outputs).unwrap();
|
||||
@@ -376,7 +383,10 @@ impl ToPyObject for GraphWitness {
|
||||
}
|
||||
|
||||
#[cfg(feature = "python-bindings")]
|
||||
fn insert_poseidon_hash_pydict(pydict: &PyDict, poseidon_hash: &Vec<Fp>) -> Result<(), PyErr> {
|
||||
fn insert_poseidon_hash_pydict(
|
||||
pydict: &Bound<'_, PyDict>,
|
||||
poseidon_hash: &Vec<Fp>,
|
||||
) -> Result<(), PyErr> {
|
||||
let poseidon_hash: Vec<String> = poseidon_hash.iter().map(field_to_string).collect();
|
||||
pydict.set_item("poseidon_hash", poseidon_hash)?;
|
||||
|
||||
@@ -384,7 +394,10 @@ fn insert_poseidon_hash_pydict(pydict: &PyDict, poseidon_hash: &Vec<Fp>) -> Resu
|
||||
}
|
||||
|
||||
#[cfg(feature = "python-bindings")]
|
||||
fn insert_polycommit_pydict(pydict: &PyDict, commits: &Vec<Vec<G1Affine>>) -> Result<(), PyErr> {
|
||||
fn insert_polycommit_pydict(
|
||||
pydict: &Bound<'_, PyDict>,
|
||||
commits: &Vec<Vec<G1Affine>>,
|
||||
) -> Result<(), PyErr> {
|
||||
use crate::bindings::python::PyG1Affine;
|
||||
let poseidon_hash: Vec<Vec<PyG1Affine>> = commits
|
||||
.iter()
|
||||
@@ -1001,11 +1014,24 @@ impl GraphCircuit {
|
||||
shapes: &Vec<Vec<usize>>,
|
||||
scales: Vec<crate::Scale>,
|
||||
) -> Result<Vec<Tensor<Fp>>, GraphError> {
|
||||
use crate::eth::{evm_quantize, read_on_chain_inputs, setup_eth_backend};
|
||||
use crate::eth::{
|
||||
evm_quantize_multi, evm_quantize_single, read_on_chain_inputs_multi,
|
||||
read_on_chain_inputs_single, setup_eth_backend,
|
||||
};
|
||||
let (client, client_address) = setup_eth_backend(Some(&source.rpc), None).await?;
|
||||
let inputs = read_on_chain_inputs(client.clone(), client_address, &source.calls).await?;
|
||||
// quantize the supplied data using the provided scale + QuantizeData.sol
|
||||
let quantized_evm_inputs = evm_quantize(client, scales, &inputs).await?;
|
||||
let quantized_evm_inputs = match source.calls {
|
||||
input::Calls::Single(call) => {
|
||||
let (inputs, decimals) =
|
||||
read_on_chain_inputs_single(client.clone(), client_address, call).await?;
|
||||
|
||||
evm_quantize_single(client, scales, &inputs, decimals).await?
|
||||
}
|
||||
input::Calls::Multiple(calls) => {
|
||||
let inputs =
|
||||
read_on_chain_inputs_multi(client.clone(), client_address, &calls).await?;
|
||||
evm_quantize_multi(client, scales, &inputs).await?
|
||||
}
|
||||
};
|
||||
// on-chain data has already been quantized at this point. Just need to reshape it and push into tensor vector
|
||||
let mut inputs: Vec<Tensor<Fp>> = vec![];
|
||||
for (input, shape) in [quantized_evm_inputs].iter().zip(shapes) {
|
||||
@@ -1350,6 +1376,7 @@ impl GraphCircuit {
|
||||
max_lookup_inputs: model_results.max_lookup_inputs,
|
||||
min_lookup_inputs: model_results.min_lookup_inputs,
|
||||
max_range_size: model_results.max_range_size,
|
||||
version: Some(crate::version().to_string()),
|
||||
};
|
||||
|
||||
witness.generate_rescaled_elements(
|
||||
|
||||
@@ -656,7 +656,7 @@ impl Model {
|
||||
|
||||
let mut symbol_values = SymbolValues::default();
|
||||
for (symbol, value) in run_args.variables.iter() {
|
||||
let symbol = model.symbol_table.sym(symbol);
|
||||
let symbol = model.symbols.sym(symbol);
|
||||
symbol_values = symbol_values.with(&symbol, *value as i64);
|
||||
debug!("set {} to {}", symbol, value);
|
||||
}
|
||||
@@ -915,20 +915,9 @@ impl Model {
|
||||
if scales.contains_key(&i) {
|
||||
let scale_diff = n.out_scale - scales[&i];
|
||||
n.opkind = if scale_diff > 0 {
|
||||
RebaseScale::rebase(
|
||||
n.opkind,
|
||||
scales[&i],
|
||||
n.out_scale,
|
||||
1,
|
||||
run_args.div_rebasing,
|
||||
)
|
||||
RebaseScale::rebase(n.opkind, scales[&i], n.out_scale, 1)
|
||||
} else {
|
||||
RebaseScale::rebase_up(
|
||||
n.opkind,
|
||||
scales[&i],
|
||||
n.out_scale,
|
||||
run_args.div_rebasing,
|
||||
)
|
||||
RebaseScale::rebase_up(n.opkind, scales[&i], n.out_scale)
|
||||
};
|
||||
n.out_scale = scales[&i];
|
||||
}
|
||||
@@ -1210,9 +1199,9 @@ impl Model {
|
||||
// Then number of columns in the circuits
|
||||
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
|
||||
region.debug_report();
|
||||
debug!("input indices: {:?}", node.inputs());
|
||||
debug!("output scales: {:?}", node.out_scales());
|
||||
debug!(
|
||||
trace!("input indices: {:?}", node.inputs());
|
||||
trace!("output scales: {:?}", node.out_scales());
|
||||
trace!(
|
||||
"input scales: {:?}",
|
||||
node.inputs()
|
||||
.iter()
|
||||
@@ -1231,8 +1220,8 @@ impl Model {
|
||||
// we re-assign inputs, always from the 0 outlet
|
||||
vec![results.get(idx).ok_or(GraphError::MissingResults)?[0].clone()]
|
||||
};
|
||||
debug!("output dims: {:?}", node.out_dims());
|
||||
debug!(
|
||||
trace!("output dims: {:?}", node.out_dims());
|
||||
trace!(
|
||||
"input dims {:?}",
|
||||
values.iter().map(|v| v.dims()).collect_vec()
|
||||
);
|
||||
|
||||
@@ -120,7 +120,6 @@ impl RebaseScale {
|
||||
global_scale: crate::Scale,
|
||||
op_out_scale: crate::Scale,
|
||||
scale_rebase_multiplier: u32,
|
||||
div_rebasing: bool,
|
||||
) -> SupportedOp {
|
||||
if (op_out_scale > (global_scale * scale_rebase_multiplier as i32))
|
||||
&& !inner.is_constant()
|
||||
@@ -137,7 +136,6 @@ impl RebaseScale {
|
||||
multiplier,
|
||||
rebase_op: HybridOp::Div {
|
||||
denom: crate::circuit::utils::F32((multiplier) as f32),
|
||||
use_range_check_for_int: !div_rebasing,
|
||||
},
|
||||
original_scale: op.original_scale,
|
||||
})
|
||||
@@ -148,7 +146,6 @@ impl RebaseScale {
|
||||
multiplier,
|
||||
rebase_op: HybridOp::Div {
|
||||
denom: crate::circuit::utils::F32(multiplier as f32),
|
||||
use_range_check_for_int: !div_rebasing,
|
||||
},
|
||||
original_scale: op_out_scale,
|
||||
})
|
||||
@@ -163,7 +160,6 @@ impl RebaseScale {
|
||||
inner: SupportedOp,
|
||||
target_scale: crate::Scale,
|
||||
op_out_scale: crate::Scale,
|
||||
div_rebasing: bool,
|
||||
) -> SupportedOp {
|
||||
if (op_out_scale < (target_scale)) && !inner.is_constant() && !inner.is_input() {
|
||||
let multiplier = scale_to_multiplier(op_out_scale - target_scale);
|
||||
@@ -176,7 +172,6 @@ impl RebaseScale {
|
||||
original_scale: op.original_scale,
|
||||
rebase_op: HybridOp::Div {
|
||||
denom: crate::circuit::utils::F32((multiplier) as f32),
|
||||
use_range_check_for_int: !div_rebasing,
|
||||
},
|
||||
})
|
||||
} else {
|
||||
@@ -187,7 +182,6 @@ impl RebaseScale {
|
||||
original_scale: op_out_scale,
|
||||
rebase_op: HybridOp::Div {
|
||||
denom: crate::circuit::utils::F32(multiplier as f32),
|
||||
use_range_check_for_int: !div_rebasing,
|
||||
},
|
||||
})
|
||||
}
|
||||
@@ -595,13 +589,7 @@ impl Node {
|
||||
let mut out_scale = opkind.out_scale(in_scales.clone())?;
|
||||
// rescale the inputs if necessary to get consistent fixed points, we select the largest scale (highest precision)
|
||||
let global_scale = scales.get_max();
|
||||
opkind = RebaseScale::rebase(
|
||||
opkind,
|
||||
global_scale,
|
||||
out_scale,
|
||||
scales.rebase_multiplier,
|
||||
run_args.div_rebasing,
|
||||
);
|
||||
opkind = RebaseScale::rebase(opkind, global_scale, out_scale, scales.rebase_multiplier);
|
||||
|
||||
out_scale = opkind.out_scale(in_scales)?;
|
||||
|
||||
|
||||
@@ -279,6 +279,8 @@ pub fn new_op_from_onnx(
|
||||
symbol_values: &SymbolValues,
|
||||
run_args: &crate::RunArgs,
|
||||
) -> Result<(SupportedOp, Vec<usize>), GraphError> {
|
||||
use std::f64::consts::E;
|
||||
|
||||
use tract_onnx::tract_core::ops::array::Trilu;
|
||||
|
||||
use crate::circuit::InputType;
|
||||
@@ -764,7 +766,7 @@ pub fn new_op_from_onnx(
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
if inputs.len() == 2 {
|
||||
if const_inputs.len() > 0 {
|
||||
if !const_inputs.is_empty() {
|
||||
let const_idx = const_inputs[0];
|
||||
let boxed_op = inputs[const_idx].opkind();
|
||||
let unit = if let Some(c) = extract_const_raw_values(boxed_op) {
|
||||
@@ -842,18 +844,33 @@ pub fn new_op_from_onnx(
|
||||
"Sigmoid" => SupportedOp::Nonlinear(LookupOp::Sigmoid {
|
||||
scale: scale_to_multiplier(input_scales[0]).into(),
|
||||
}),
|
||||
"Sqrt" => SupportedOp::Nonlinear(LookupOp::Sqrt {
|
||||
scale: scale_to_multiplier(input_scales[0]).into(),
|
||||
}),
|
||||
"Rsqrt" => SupportedOp::Nonlinear(LookupOp::Rsqrt {
|
||||
"Sqrt" => SupportedOp::Hybrid(HybridOp::Sqrt {
|
||||
scale: scale_to_multiplier(input_scales[0]).into(),
|
||||
}),
|
||||
"Rsqrt" => {
|
||||
let in_scale = input_scales[0];
|
||||
let max_scale = std::cmp::max(scales.get_max(), in_scale);
|
||||
SupportedOp::Hybrid(HybridOp::Rsqrt {
|
||||
input_scale: (scale_to_multiplier(in_scale) as f32).into(),
|
||||
output_scale: (scale_to_multiplier(max_scale) as f32).into(),
|
||||
})
|
||||
}
|
||||
"Exp" => SupportedOp::Nonlinear(LookupOp::Exp {
|
||||
scale: scale_to_multiplier(input_scales[0]).into(),
|
||||
base: E.into(),
|
||||
}),
|
||||
"Ln" => SupportedOp::Nonlinear(LookupOp::Ln {
|
||||
scale: scale_to_multiplier(input_scales[0]).into(),
|
||||
}),
|
||||
"Ln" => {
|
||||
if run_args.bounded_log_lookup {
|
||||
SupportedOp::Hybrid(HybridOp::Ln {
|
||||
scale: scale_to_multiplier(input_scales[0]).into(),
|
||||
})
|
||||
} else {
|
||||
SupportedOp::Nonlinear(LookupOp::Ln {
|
||||
scale: scale_to_multiplier(input_scales[0]).into(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
"Sin" => SupportedOp::Nonlinear(LookupOp::Sin {
|
||||
scale: scale_to_multiplier(input_scales[0]).into(),
|
||||
}),
|
||||
@@ -990,21 +1007,21 @@ pub fn new_op_from_onnx(
|
||||
op
|
||||
}
|
||||
"Iff" => SupportedOp::Linear(PolyOp::Iff),
|
||||
"Less" => {
|
||||
"<" => {
|
||||
if inputs.len() == 2 {
|
||||
SupportedOp::Hybrid(HybridOp::Less)
|
||||
} else {
|
||||
return Err(GraphError::InvalidDims(idx, "less".to_string()));
|
||||
}
|
||||
}
|
||||
"LessEqual" => {
|
||||
"<=" => {
|
||||
if inputs.len() == 2 {
|
||||
SupportedOp::Hybrid(HybridOp::LessEqual)
|
||||
} else {
|
||||
return Err(GraphError::InvalidDims(idx, "less equal".to_string()));
|
||||
}
|
||||
}
|
||||
"Greater" => {
|
||||
">" => {
|
||||
// Extract the slope layer hyperparams
|
||||
if inputs.len() == 2 {
|
||||
SupportedOp::Hybrid(HybridOp::Greater)
|
||||
@@ -1012,7 +1029,7 @@ pub fn new_op_from_onnx(
|
||||
return Err(GraphError::InvalidDims(idx, "greater".to_string()));
|
||||
}
|
||||
}
|
||||
"GreaterEqual" => {
|
||||
">=" => {
|
||||
// Extract the slope layer hyperparams
|
||||
if inputs.len() == 2 {
|
||||
SupportedOp::Hybrid(HybridOp::GreaterEqual)
|
||||
@@ -1120,7 +1137,57 @@ pub fn new_op_from_onnx(
|
||||
})
|
||||
}
|
||||
} else {
|
||||
unimplemented!("only support constant pow for now")
|
||||
if let Some(c) = inputs[0].opkind().get_mutable_constant() {
|
||||
inputs[0].decrement_use();
|
||||
deleted_indices.push(0);
|
||||
if c.raw_values.len() > 1 {
|
||||
unimplemented!("only support scalar base")
|
||||
}
|
||||
|
||||
let base = c.raw_values[0];
|
||||
|
||||
SupportedOp::Nonlinear(LookupOp::Exp {
|
||||
scale: scale_to_multiplier(input_scales[1]).into(),
|
||||
base: base.into(),
|
||||
})
|
||||
} else {
|
||||
unimplemented!("only support constant base or pow for now")
|
||||
}
|
||||
}
|
||||
}
|
||||
"Div" => {
|
||||
let const_idx = inputs
|
||||
.iter()
|
||||
.enumerate()
|
||||
.filter(|(_, n)| n.is_constant())
|
||||
.map(|(i, _)| i)
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
if const_idx.len() > 1 {
|
||||
return Err(GraphError::InvalidDims(idx, "div".to_string()));
|
||||
}
|
||||
|
||||
let const_idx = const_idx[0];
|
||||
|
||||
if const_idx != 1 {
|
||||
unimplemented!("only support div with constant as second input")
|
||||
}
|
||||
|
||||
if let Some(c) = inputs[const_idx].opkind().get_mutable_constant() {
|
||||
if c.raw_values.len() == 1 && c.raw_values[0] != 0. {
|
||||
inputs[const_idx].decrement_use();
|
||||
deleted_indices.push(const_idx);
|
||||
// get the non constant index
|
||||
let denom = c.raw_values[0];
|
||||
|
||||
SupportedOp::Hybrid(HybridOp::Div {
|
||||
denom: denom.into(),
|
||||
})
|
||||
} else {
|
||||
unimplemented!("only support non zero divisors of size 1")
|
||||
}
|
||||
} else {
|
||||
unimplemented!("only support div with constant as second input")
|
||||
}
|
||||
}
|
||||
"Cube" => SupportedOp::Linear(PolyOp::Pow(3)),
|
||||
@@ -1183,7 +1250,7 @@ pub fn new_op_from_onnx(
|
||||
"And" => SupportedOp::Linear(PolyOp::And),
|
||||
"Or" => SupportedOp::Linear(PolyOp::Or),
|
||||
"Xor" => SupportedOp::Linear(PolyOp::Xor),
|
||||
"Equals" => SupportedOp::Hybrid(HybridOp::Equals),
|
||||
"==" => SupportedOp::Hybrid(HybridOp::Equals),
|
||||
"Deconv" => {
|
||||
let deconv_node: &Deconv = match node.op().downcast_ref::<Deconv>() {
|
||||
Some(b) => b,
|
||||
|
||||
@@ -9,8 +9,7 @@ use itertools::Itertools;
|
||||
use log::debug;
|
||||
#[cfg(feature = "python-bindings")]
|
||||
use pyo3::{
|
||||
exceptions::PyValueError, types::PyString, FromPyObject, IntoPy, PyAny, PyObject, PyResult,
|
||||
PyTryFrom, Python, ToPyObject,
|
||||
exceptions::PyValueError, FromPyObject, IntoPy, PyObject, PyResult, Python, ToPyObject,
|
||||
};
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
@@ -137,10 +136,8 @@ impl IntoPy<PyObject> for Visibility {
|
||||
#[cfg(feature = "python-bindings")]
|
||||
/// Obtains Visibility from PyObject (Required for Visibility to be compatible with Python)
|
||||
impl<'source> FromPyObject<'source> for Visibility {
|
||||
fn extract(ob: &'source PyAny) -> PyResult<Self> {
|
||||
let trystr = <PyString as PyTryFrom>::try_from(ob)?;
|
||||
let strval = trystr.to_string();
|
||||
|
||||
fn extract_bound(ob: &pyo3::Bound<'source, pyo3::PyAny>) -> PyResult<Self> {
|
||||
let strval = String::extract_bound(ob)?;
|
||||
let strval = strval.as_str();
|
||||
|
||||
if strval.contains("hashed/private") {
|
||||
|
||||
24
src/lib.rs
24
src/lib.rs
@@ -108,7 +108,19 @@ use serde::{Deserialize, Serialize};
|
||||
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
|
||||
use tosubcommand::ToFlags;
|
||||
|
||||
/// Bindings managment
|
||||
// if CARGO VERSION is 0.0.0 replace with "source - no compatibility guaranteed"
|
||||
/// The version of the ezkl library
|
||||
const VERSION: &str = env!("CARGO_PKG_VERSION");
|
||||
|
||||
/// Get the version of the library
|
||||
pub fn version() -> &'static str {
|
||||
match VERSION {
|
||||
"0.0.0" => "source - no compatibility guaranteed",
|
||||
_ => VERSION,
|
||||
}
|
||||
}
|
||||
|
||||
/// Bindings management
|
||||
#[cfg(any(
|
||||
feature = "ios-bindings",
|
||||
all(target_arch = "wasm32", target_os = "unknown"),
|
||||
@@ -297,8 +309,6 @@ pub struct RunArgs {
|
||||
all(feature = "ezkl", not(target_arch = "wasm32")),
|
||||
arg(long, default_value = "false")
|
||||
)]
|
||||
/// Rebase the scale using lookup table for division instead of using a range check
|
||||
pub div_rebasing: bool,
|
||||
/// Should constants with 0.0 fraction be rebased to scale 0
|
||||
#[cfg_attr(
|
||||
all(feature = "ezkl", not(target_arch = "wasm32")),
|
||||
@@ -317,11 +327,18 @@ pub struct RunArgs {
|
||||
#[cfg_attr(all(feature = "ezkl", not(target_arch = "wasm32")), arg(long, default_value = "2", value_hint = clap::ValueHint::Other))]
|
||||
/// the number of legs used for decompositions
|
||||
pub decomp_legs: usize,
|
||||
#[cfg_attr(
|
||||
all(feature = "ezkl", not(target_arch = "wasm32")),
|
||||
arg(long, default_value = "false")
|
||||
)]
|
||||
/// use unbounded lookup for the log
|
||||
pub bounded_log_lookup: bool,
|
||||
}
|
||||
|
||||
impl Default for RunArgs {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
bounded_log_lookup: false,
|
||||
tolerance: Tolerance::default(),
|
||||
input_scale: 7,
|
||||
param_scale: 7,
|
||||
@@ -333,7 +350,6 @@ impl Default for RunArgs {
|
||||
input_visibility: Visibility::Private,
|
||||
output_visibility: Visibility::Public,
|
||||
param_visibility: Visibility::Private,
|
||||
div_rebasing: false,
|
||||
rebase_frac_zero_constants: false,
|
||||
check_mode: CheckMode::UNSAFE,
|
||||
commitment: None,
|
||||
|
||||
@@ -46,6 +46,9 @@ use thiserror::Error as thisError;
|
||||
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
|
||||
use tosubcommand::ToFlags;
|
||||
|
||||
#[cfg(feature = "python-bindings")]
|
||||
use pyo3::types::PyDictMethods;
|
||||
|
||||
use halo2curves::bn256::{Bn256, Fr, G1Affine};
|
||||
|
||||
fn serde_format_from_str(s: &str) -> halo2_proofs::SerdeFormat {
|
||||
@@ -59,10 +62,7 @@ fn serde_format_from_str(s: &str) -> halo2_proofs::SerdeFormat {
|
||||
|
||||
#[allow(missing_docs)]
|
||||
#[derive(Copy, Clone, Default, Debug, PartialEq, Eq, Deserialize, Serialize, PartialOrd)]
|
||||
#[cfg_attr(
|
||||
all(feature = "ezkl", not(target_arch = "wasm32")),
|
||||
derive(ValueEnum)
|
||||
)]
|
||||
#[cfg_attr(all(feature = "ezkl", not(target_arch = "wasm32")), derive(ValueEnum))]
|
||||
pub enum ProofType {
|
||||
#[default]
|
||||
Single,
|
||||
@@ -119,9 +119,8 @@ impl ToPyObject for ProofType {
|
||||
#[cfg(feature = "python-bindings")]
|
||||
/// Obtains StrategyType from PyObject (Required for StrategyType to be compatible with Python)
|
||||
impl<'source> pyo3::FromPyObject<'source> for ProofType {
|
||||
fn extract(ob: &'source pyo3::PyAny) -> pyo3::PyResult<Self> {
|
||||
let trystr = <pyo3::types::PyString as pyo3::PyTryFrom>::try_from(ob)?;
|
||||
let strval = trystr.to_string();
|
||||
fn extract_bound(ob: &pyo3::Bound<'source, pyo3::PyAny>) -> pyo3::PyResult<Self> {
|
||||
let strval = String::extract_bound(ob)?;
|
||||
match strval.to_lowercase().as_str() {
|
||||
"single" => Ok(ProofType::Single),
|
||||
"for-aggr" => Ok(ProofType::ForAggr),
|
||||
@@ -134,10 +133,7 @@ impl<'source> pyo3::FromPyObject<'source> for ProofType {
|
||||
|
||||
#[allow(missing_docs)]
|
||||
#[derive(Copy, Clone, Debug, PartialEq, Eq, Deserialize, Serialize)]
|
||||
#[cfg_attr(
|
||||
all(feature = "ezkl", not(target_arch = "wasm32")),
|
||||
derive(ValueEnum)
|
||||
)]
|
||||
#[cfg_attr(all(feature = "ezkl", not(target_arch = "wasm32")), derive(ValueEnum))]
|
||||
pub enum StrategyType {
|
||||
Single,
|
||||
Accum,
|
||||
@@ -180,9 +176,8 @@ impl pyo3::IntoPy<PyObject> for StrategyType {
|
||||
#[cfg(feature = "python-bindings")]
|
||||
/// Obtains StrategyType from PyObject (Required for StrategyType to be compatible with Python)
|
||||
impl<'source> pyo3::FromPyObject<'source> for StrategyType {
|
||||
fn extract(ob: &'source pyo3::PyAny) -> pyo3::PyResult<Self> {
|
||||
let trystr = <pyo3::types::PyString as pyo3::PyTryFrom>::try_from(ob)?;
|
||||
let strval = trystr.to_string();
|
||||
fn extract_bound(ob: &pyo3::Bound<'source, pyo3::PyAny>) -> pyo3::PyResult<Self> {
|
||||
let strval = String::extract_bound(ob)?;
|
||||
match strval.to_lowercase().as_str() {
|
||||
"single" => Ok(StrategyType::Single),
|
||||
"accum" => Ok(StrategyType::Accum),
|
||||
@@ -203,10 +198,7 @@ pub enum PfSysError {
|
||||
|
||||
#[allow(missing_docs)]
|
||||
#[derive(Default, Copy, Clone, Debug, PartialEq, Eq, Deserialize, Serialize, PartialOrd)]
|
||||
#[cfg_attr(
|
||||
all(feature = "ezkl", not(target_arch = "wasm32")),
|
||||
derive(ValueEnum)
|
||||
)]
|
||||
#[cfg_attr(all(feature = "ezkl", not(target_arch = "wasm32")), derive(ValueEnum))]
|
||||
pub enum TranscriptType {
|
||||
Poseidon,
|
||||
#[default]
|
||||
@@ -244,7 +236,7 @@ impl ToPyObject for TranscriptType {
|
||||
|
||||
#[cfg(feature = "python-bindings")]
|
||||
///
|
||||
pub fn g1affine_to_pydict(g1affine_dict: &PyDict, g1affine: &G1Affine) {
|
||||
pub fn g1affine_to_pydict(g1affine_dict: &pyo3::Bound<'_, PyDict>, g1affine: &G1Affine) {
|
||||
let g1affine_x = field_to_string(&g1affine.x);
|
||||
let g1affine_y = field_to_string(&g1affine.y);
|
||||
g1affine_dict.set_item("x", g1affine_x).unwrap();
|
||||
@@ -255,7 +247,7 @@ pub fn g1affine_to_pydict(g1affine_dict: &PyDict, g1affine: &G1Affine) {
|
||||
use halo2curves::bn256::G1;
|
||||
#[cfg(feature = "python-bindings")]
|
||||
///
|
||||
pub fn g1_to_pydict(g1_dict: &PyDict, g1: &G1) {
|
||||
pub fn g1_to_pydict(g1_dict: &pyo3::Bound<'_, PyDict>, g1: &G1) {
|
||||
let g1_x = field_to_string(&g1.x);
|
||||
let g1_y = field_to_string(&g1.y);
|
||||
let g1_z = field_to_string(&g1.z);
|
||||
@@ -324,6 +316,8 @@ where
|
||||
pub timestamp: Option<u128>,
|
||||
/// commitment
|
||||
pub commitment: Option<Commitments>,
|
||||
/// (optional) version of ezkl used to generate the proof
|
||||
version: Option<String>,
|
||||
}
|
||||
|
||||
#[cfg(feature = "python-bindings")]
|
||||
@@ -344,7 +338,7 @@ where
|
||||
dict.set_item("instances", field_elems).unwrap();
|
||||
let hex_proof = hex::encode(&self.proof);
|
||||
dict.set_item("proof", format!("0x{}", hex_proof)).unwrap();
|
||||
dict.set_item("transcript_type", self.transcript_type)
|
||||
dict.set_item("transcript_type", self.transcript_type.to_object(py))
|
||||
.unwrap();
|
||||
dict.to_object(py)
|
||||
}
|
||||
@@ -385,6 +379,7 @@ where
|
||||
.as_millis(),
|
||||
),
|
||||
commitment,
|
||||
version: Some(crate::version().to_string()),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -920,6 +915,7 @@ mod tests {
|
||||
pretty_public_inputs: None,
|
||||
timestamp: None,
|
||||
commitment: None,
|
||||
version: None,
|
||||
};
|
||||
|
||||
snark
|
||||
|
||||
@@ -1109,6 +1109,13 @@ impl<T: Clone + TensorType> Tensor<T> {
|
||||
///
|
||||
/// ```
|
||||
pub fn expand(&self, shape: &[usize]) -> Result<Self, TensorError> {
|
||||
// if both have length 1 then we can just return the tensor
|
||||
if self.dims().iter().product::<usize>() == 1 && shape.iter().product::<usize>() == 1 {
|
||||
let mut output = self.clone();
|
||||
output.reshape(shape)?;
|
||||
return Ok(output);
|
||||
}
|
||||
|
||||
if self.dims().len() > shape.len() {
|
||||
return Err(TensorError::DimError(format!(
|
||||
"Cannot expand {:?} to the smaller shape {:?}",
|
||||
|
||||
@@ -1050,6 +1050,7 @@ pub fn scatter_nd<T: TensorType + Send + Sync>(
|
||||
let slice = coord.iter().map(|x| *x..*x + 1).collect::<Vec<_>>();
|
||||
let index_val = index.get_slice(&slice)?;
|
||||
let index_slice = index_val.iter().map(|x| *x..*x + 1).collect::<Vec<_>>();
|
||||
|
||||
let src_val = src.get_slice(&slice)?;
|
||||
output.set_slice(&index_slice, &src_val)?;
|
||||
Ok::<_, TensorError>(())
|
||||
@@ -1500,6 +1501,67 @@ pub mod nonlinearities {
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
/// Powers of 2
|
||||
/// # Arguments
|
||||
/// * `a` - Tensor
|
||||
/// * `scale` - Single value
|
||||
/// # Examples
|
||||
/// ```
|
||||
/// use ezkl::tensor::Tensor;
|
||||
/// use ezkl::fieldutils::IntegerRep;
|
||||
/// use ezkl::tensor::ops::nonlinearities::ipow2;
|
||||
/// let x = Tensor::<IntegerRep>::new(
|
||||
/// Some(&[2, 15, 2, 1, 1, 0]),
|
||||
/// &[2, 3],
|
||||
/// ).unwrap();
|
||||
/// let result = ipow2(&x, 1.0);
|
||||
/// let expected = Tensor::<IntegerRep>::new(Some(&[4, 32768, 4, 2, 2, 1]), &[2, 3]).unwrap();
|
||||
/// assert_eq!(result, expected);
|
||||
/// ```
|
||||
pub fn ipow2(a: &Tensor<IntegerRep>, scale_output: f64) -> Tensor<IntegerRep> {
|
||||
a.par_enum_map(|_, a_i| {
|
||||
let kix = a_i as f64;
|
||||
let kix = scale_output * (2.0_f64).powf(kix);
|
||||
let rounded = kix.round();
|
||||
Ok::<_, TensorError>(rounded as IntegerRep)
|
||||
})
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
/// Elementwise applies ln base 2 to a tensor of integers.
|
||||
/// # Arguments
|
||||
/// * `a` - Tensor
|
||||
/// * `scale_input` - Single value
|
||||
/// ```
|
||||
/// use ezkl::tensor::Tensor;
|
||||
/// use ezkl::fieldutils::IntegerRep;
|
||||
/// use ezkl::tensor::ops::nonlinearities::ilog2;
|
||||
/// let x = Tensor::<IntegerRep>::new(
|
||||
/// Some(&[2, 15, 2, 1, 1, 2]),
|
||||
/// &[2, 3],
|
||||
/// ).unwrap();
|
||||
/// let result = ilog2(&x, 1.0);
|
||||
/// let expected = Tensor::<IntegerRep>::new(Some(&[1, 4, 1, 0, 0, 1]), &[2, 3]).unwrap();
|
||||
/// assert_eq!(result, expected);
|
||||
/// ```
|
||||
pub fn ilog2(a: &Tensor<IntegerRep>, scale_input: f64) -> Tensor<IntegerRep> {
|
||||
a.par_enum_map(|_, a_i| {
|
||||
let kix = (a_i as f64) / scale_input;
|
||||
let log = (kix).log2();
|
||||
let floor = log.floor();
|
||||
let ceil = log.ceil();
|
||||
let floor_dist = ((2.0_f64).powf(floor) - kix).abs();
|
||||
let ceil_dist = (kix - (2.0_f64).powf(ceil)).abs();
|
||||
|
||||
if floor_dist < ceil_dist {
|
||||
Ok::<_, TensorError>(floor as IntegerRep)
|
||||
} else {
|
||||
Ok::<_, TensorError>(ceil as IntegerRep)
|
||||
}
|
||||
})
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
/// Elementwise applies sigmoid to a tensor of integers.
|
||||
/// # Arguments
|
||||
///
|
||||
@@ -1603,7 +1665,7 @@ pub mod nonlinearities {
|
||||
/// Some(&[2, 15, 2, 1, 1, 0]),
|
||||
/// &[2, 3],
|
||||
/// ).unwrap();
|
||||
/// let result = exp(&x, 1.0);
|
||||
/// let result = exp(&x, 1.0, std::f64::consts::E);
|
||||
/// let expected = Tensor::<IntegerRep>::new(Some(&[7, 3269017, 7, 3, 3, 1]), &[2, 3]).unwrap();
|
||||
/// assert_eq!(result, expected);
|
||||
///
|
||||
@@ -1612,28 +1674,27 @@ pub mod nonlinearities {
|
||||
/// Some(&[37, 12, 41]),
|
||||
/// &[3],
|
||||
/// ).unwrap();
|
||||
/// let result = exp(&x, 512.0);
|
||||
/// let result = exp(&x, 512.0, std::f64::consts::E);
|
||||
///
|
||||
/// let expected = Tensor::<IntegerRep>::new(Some(&[550, 524, 555]), &[3]).unwrap();
|
||||
///
|
||||
/// assert_eq!(result, expected);
|
||||
/// ```
|
||||
pub fn exp(a: &Tensor<IntegerRep>, scale_input: f64) -> Tensor<IntegerRep> {
|
||||
pub fn exp(a: &Tensor<IntegerRep>, scale_input: f64, base: f64) -> Tensor<IntegerRep> {
|
||||
a.par_enum_map(|_, a_i| {
|
||||
let kix = (a_i as f64) / scale_input;
|
||||
let fout = scale_input * kix.exp();
|
||||
let fout = scale_input * base.powf(kix);
|
||||
let rounded = fout.round();
|
||||
Ok::<_, TensorError>(rounded as IntegerRep)
|
||||
})
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
/// Elementwise applies exponential to a tensor of integers.
|
||||
/// Elementwise applies ln to a tensor of integers.
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `a` - Tensor
|
||||
/// * `scale_input` - Single value
|
||||
/// * `scale_output` - Single value
|
||||
/// # Examples
|
||||
/// ```
|
||||
/// use ezkl::tensor::Tensor;
|
||||
|
||||
Binary file not shown.
File diff suppressed because one or more lines are too long
@@ -27,7 +27,8 @@
|
||||
"check_mode": "UNSAFE",
|
||||
"commitment": "KZG",
|
||||
"decomp_base": 128,
|
||||
"decomp_legs": 2
|
||||
"decomp_legs": 2,
|
||||
"bounded_log_lookup": false
|
||||
},
|
||||
"num_rows": 46,
|
||||
"total_assignments": 92,
|
||||
|
||||
@@ -1 +1 @@
|
||||
{"inputs":[["0200000000000000000000000000000000000000000000000000000000000000","0100000000000000000000000000000000000000000000000000000000000000","0100000000000000000000000000000000000000000000000000000000000000"]],"pretty_elements":{"rescaled_inputs":[["2","1","1"]],"inputs":[["0x0000000000000000000000000000000000000000000000000000000000000002","0x0000000000000000000000000000000000000000000000000000000000000001","0x0000000000000000000000000000000000000000000000000000000000000001"]],"processed_inputs":[],"processed_params":[],"processed_outputs":[],"rescaled_outputs":[["0","0","0","0"]],"outputs":[["0x0000000000000000000000000000000000000000000000000000000000000000","0x0000000000000000000000000000000000000000000000000000000000000000","0x0000000000000000000000000000000000000000000000000000000000000000","0x0000000000000000000000000000000000000000000000000000000000000000"]]},"outputs":[["0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000"]],"processed_inputs":null,"processed_params":null,"processed_outputs":null,"max_lookup_inputs":0,"min_lookup_inputs":0,"max_range_size":127}
|
||||
{"inputs":[["0200000000000000000000000000000000000000000000000000000000000000","0100000000000000000000000000000000000000000000000000000000000000","0100000000000000000000000000000000000000000000000000000000000000"]],"pretty_elements":{"rescaled_inputs":[["2","1","1"]],"inputs":[["0x0000000000000000000000000000000000000000000000000000000000000002","0x0000000000000000000000000000000000000000000000000000000000000001","0x0000000000000000000000000000000000000000000000000000000000000001"]],"processed_inputs":[],"processed_params":[],"processed_outputs":[],"rescaled_outputs":[["0","0","0","0"]],"outputs":[["0x0000000000000000000000000000000000000000000000000000000000000000","0x0000000000000000000000000000000000000000000000000000000000000000","0x0000000000000000000000000000000000000000000000000000000000000000","0x0000000000000000000000000000000000000000000000000000000000000000"]]},"outputs":[["0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000"]],"processed_inputs":null,"processed_params":null,"processed_outputs":null,"max_lookup_inputs":0,"min_lookup_inputs":0,"max_range_size":127,"version":"source - no compatibility guaranteed"}
|
||||
@@ -205,7 +205,7 @@ mod native_tests {
|
||||
"1l_tiny_div",
|
||||
];
|
||||
|
||||
const TESTS: [&str; 95] = [
|
||||
const TESTS: [&str; 98] = [
|
||||
"1l_mlp", //0
|
||||
"1l_slice",
|
||||
"1l_concat",
|
||||
@@ -305,6 +305,9 @@ mod native_tests {
|
||||
"lstm_medium", // 92
|
||||
"lenet_5", // 93
|
||||
"rsqrt", // 94
|
||||
"log", // 95
|
||||
"exp", // 96
|
||||
"general_exp", // 97
|
||||
];
|
||||
|
||||
const WASM_TESTS: [&str; 46] = [
|
||||
@@ -489,7 +492,7 @@ mod native_tests {
|
||||
#[cfg(feature="icicle")]
|
||||
seq!(N in 0..=2 {
|
||||
#(#[test_case(TESTS_AGGR[N])])*
|
||||
fn aggr_prove_and_verify_(test: &str) {
|
||||
fn kzg_aggr_prove_and_verify_(test: &str) {
|
||||
crate::native_tests::init_binary();
|
||||
let test_dir = TempDir::new(test).unwrap();
|
||||
let path = test_dir.path().to_str().unwrap(); crate::native_tests::mv_test_(test_dir.path().to_str().unwrap(), test);
|
||||
@@ -538,12 +541,12 @@ mod native_tests {
|
||||
crate::native_tests::init_binary();
|
||||
let test_dir = TempDir::new(test).unwrap();
|
||||
let path = test_dir.path().to_str().unwrap(); crate::native_tests::mv_test_(path, test);
|
||||
mock(path, test.to_string(), "public", "fixed", "public", 1, "accuracy", None, 0.0);
|
||||
mock(path, test.to_string(), "public", "fixed", "public", 1, "accuracy", None, 0.0, false);
|
||||
test_dir.close().unwrap();
|
||||
}
|
||||
});
|
||||
|
||||
seq!(N in 0..=94 {
|
||||
seq!(N in 0..=97 {
|
||||
|
||||
#(#[test_case(TESTS[N])])*
|
||||
#[ignore]
|
||||
@@ -555,15 +558,7 @@ mod native_tests {
|
||||
test_dir.close().unwrap();
|
||||
}
|
||||
|
||||
#(#[test_case(TESTS[N])])*
|
||||
fn accuracy_measurement_div_rebase_(test: &str) {
|
||||
crate::native_tests::init_binary();
|
||||
crate::native_tests::setup_py_env();
|
||||
let test_dir = TempDir::new(test).unwrap();
|
||||
let path = test_dir.path().to_str().unwrap(); crate::native_tests::mv_test_(path, test);
|
||||
accuracy_measurement(path, test.to_string(), "private", "private", "public", 1, "accuracy", 2.6, true);
|
||||
test_dir.close().unwrap();
|
||||
}
|
||||
|
||||
|
||||
#(#[test_case(TESTS[N])])*
|
||||
fn accuracy_measurement_public_outputs_(test: &str) {
|
||||
@@ -571,7 +566,7 @@ mod native_tests {
|
||||
crate::native_tests::setup_py_env();
|
||||
let test_dir = TempDir::new(test).unwrap();
|
||||
let path = test_dir.path().to_str().unwrap(); crate::native_tests::mv_test_(path, test);
|
||||
accuracy_measurement(path, test.to_string(), "private", "private", "public", 1, "accuracy", 2.6, false);
|
||||
accuracy_measurement(path, test.to_string(), "private", "private", "public", 1, "accuracy", 2.6);
|
||||
test_dir.close().unwrap();
|
||||
}
|
||||
|
||||
@@ -581,7 +576,7 @@ mod native_tests {
|
||||
crate::native_tests::setup_py_env();
|
||||
let test_dir = TempDir::new(test).unwrap();
|
||||
let path = test_dir.path().to_str().unwrap(); crate::native_tests::mv_test_(path, test);
|
||||
accuracy_measurement(path, test.to_string(), "private", "fixed", "private", 1, "accuracy", 2.6 , false);
|
||||
accuracy_measurement(path, test.to_string(), "private", "fixed", "private", 1, "accuracy", 2.6 );
|
||||
test_dir.close().unwrap();
|
||||
}
|
||||
|
||||
@@ -591,7 +586,7 @@ mod native_tests {
|
||||
crate::native_tests::setup_py_env();
|
||||
let test_dir = TempDir::new(test).unwrap();
|
||||
let path = test_dir.path().to_str().unwrap(); crate::native_tests::mv_test_(path, test);
|
||||
accuracy_measurement(path, test.to_string(), "public", "private", "private", 1, "accuracy", 2.6, false);
|
||||
accuracy_measurement(path, test.to_string(), "public", "private", "private", 1, "accuracy", 2.6);
|
||||
test_dir.close().unwrap();
|
||||
}
|
||||
|
||||
@@ -602,7 +597,7 @@ mod native_tests {
|
||||
crate::native_tests::setup_py_env();
|
||||
let test_dir = TempDir::new(test).unwrap();
|
||||
let path = test_dir.path().to_str().unwrap(); crate::native_tests::mv_test_(path, test);
|
||||
accuracy_measurement(path, test.to_string(), "private", "private", "public", 1, "resources", 3.1, false);
|
||||
accuracy_measurement(path, test.to_string(), "private", "private", "public", 1, "resources", 3.1);
|
||||
test_dir.close().unwrap();
|
||||
}
|
||||
|
||||
@@ -611,7 +606,17 @@ mod native_tests {
|
||||
crate::native_tests::init_binary();
|
||||
let test_dir = TempDir::new(test).unwrap();
|
||||
let path = test_dir.path().to_str().unwrap(); crate::native_tests::mv_test_(path, test);
|
||||
mock(path, test.to_string(), "private", "private", "public", 1, "resources", None, 0.0);
|
||||
mock(path, test.to_string(), "private", "private", "public", 1, "resources", None, 0.0, false);
|
||||
test_dir.close().unwrap();
|
||||
}
|
||||
|
||||
|
||||
#(#[test_case(TESTS[N])])*
|
||||
fn mock_bounded_lookup_log(test: &str) {
|
||||
crate::native_tests::init_binary();
|
||||
let test_dir = TempDir::new(test).unwrap();
|
||||
let path = test_dir.path().to_str().unwrap(); crate::native_tests::mv_test_(path, test);
|
||||
mock(path, test.to_string(), "private", "private", "public", 1, "resources", None, 0.0, true);
|
||||
test_dir.close().unwrap();
|
||||
}
|
||||
|
||||
@@ -622,7 +627,7 @@ mod native_tests {
|
||||
let path = test_dir.path().to_str().unwrap(); crate::native_tests::mv_test_(path, test);
|
||||
// gen random number between 0.0 and 1.0
|
||||
let tolerance = rand::thread_rng().gen_range(0.0..1.0) * 100.0;
|
||||
mock(path, test.to_string(), "private", "private", "public", 1, "resources", None, tolerance);
|
||||
mock(path, test.to_string(), "private", "private", "public", 1, "resources", None, tolerance, false);
|
||||
test_dir.close().unwrap();
|
||||
}
|
||||
|
||||
@@ -631,13 +636,13 @@ mod native_tests {
|
||||
#(#[test_case(TESTS[N])])*
|
||||
fn mock_large_batch_public_outputs_(test: &str) {
|
||||
// currently variable output rank is not supported in ONNX
|
||||
if test != "gather_nd" && test != "lstm_large" && test != "lstm_medium" {
|
||||
if test != "gather_nd" && test != "lstm_large" && test != "lstm_medium" && test != "scatter_nd" {
|
||||
crate::native_tests::init_binary();
|
||||
let test_dir = TempDir::new(test).unwrap();
|
||||
let path = test_dir.path().to_str().unwrap(); crate::native_tests::mv_test_(path, test);
|
||||
let large_batch_dir = &format!("large_batches_{}", test);
|
||||
crate::native_tests::mk_data_batches_(path, test, &large_batch_dir, 10);
|
||||
mock(path, large_batch_dir.to_string(), "private", "private", "public", 10, "resources", None, 0.0);
|
||||
mock(path, large_batch_dir.to_string(), "private", "private", "public", 10, "resources", None, 0.0, false);
|
||||
test_dir.close().unwrap();
|
||||
}
|
||||
}
|
||||
@@ -647,7 +652,7 @@ mod native_tests {
|
||||
crate::native_tests::init_binary();
|
||||
let test_dir = TempDir::new(test).unwrap();
|
||||
let path = test_dir.path().to_str().unwrap(); crate::native_tests::mv_test_(path, test);
|
||||
mock(path, test.to_string(), "public", "private", "private", 1, "resources", None, 0.0);
|
||||
mock(path, test.to_string(), "public", "private", "private", 1, "resources", None, 0.0, false);
|
||||
test_dir.close().unwrap();
|
||||
}
|
||||
|
||||
@@ -656,7 +661,7 @@ mod native_tests {
|
||||
crate::native_tests::init_binary();
|
||||
let test_dir = TempDir::new(test).unwrap();
|
||||
let path = test_dir.path().to_str().unwrap(); crate::native_tests::mv_test_(path, test);
|
||||
mock(path, test.to_string(), "public", "hashed", "private", 1, "resources", None, 0.0);
|
||||
mock(path, test.to_string(), "public", "hashed", "private", 1, "resources", None, 0.0, false);
|
||||
test_dir.close().unwrap();
|
||||
}
|
||||
|
||||
@@ -665,7 +670,7 @@ mod native_tests {
|
||||
crate::native_tests::init_binary();
|
||||
let test_dir = TempDir::new(test).unwrap();
|
||||
let path = test_dir.path().to_str().unwrap(); crate::native_tests::mv_test_(path, test);
|
||||
mock(path, test.to_string(), "fixed", "private", "private", 1, "resources", None, 0.0);
|
||||
mock(path, test.to_string(), "fixed", "private", "private", 1, "resources", None, 0.0, false);
|
||||
test_dir.close().unwrap();
|
||||
}
|
||||
|
||||
@@ -674,7 +679,7 @@ mod native_tests {
|
||||
crate::native_tests::init_binary();
|
||||
let test_dir = TempDir::new(test).unwrap();
|
||||
let path = test_dir.path().to_str().unwrap(); crate::native_tests::mv_test_(path, test);
|
||||
mock(path, test.to_string(), "private", "private", "fixed", 1, "resources", None, 0.0);
|
||||
mock(path, test.to_string(), "private", "private", "fixed", 1, "resources", None, 0.0, false);
|
||||
test_dir.close().unwrap();
|
||||
}
|
||||
|
||||
@@ -683,7 +688,7 @@ mod native_tests {
|
||||
crate::native_tests::init_binary();
|
||||
let test_dir = TempDir::new(test).unwrap();
|
||||
let path = test_dir.path().to_str().unwrap(); crate::native_tests::mv_test_(path, test);
|
||||
mock(path, test.to_string(), "private", "fixed", "private", 1, "resources", None, 0.0);
|
||||
mock(path, test.to_string(), "private", "fixed", "private", 1, "resources", None, 0.0, false);
|
||||
test_dir.close().unwrap();
|
||||
}
|
||||
|
||||
@@ -692,7 +697,7 @@ mod native_tests {
|
||||
crate::native_tests::init_binary();
|
||||
let test_dir = TempDir::new(test).unwrap();
|
||||
let path = test_dir.path().to_str().unwrap(); crate::native_tests::mv_test_(path, test);
|
||||
mock(path, test.to_string(), "hashed", "private", "public", 1, "resources", None, 0.0);
|
||||
mock(path, test.to_string(), "hashed", "private", "public", 1, "resources", None, 0.0, false);
|
||||
test_dir.close().unwrap();
|
||||
}
|
||||
|
||||
@@ -701,7 +706,7 @@ mod native_tests {
|
||||
crate::native_tests::init_binary();
|
||||
let test_dir = TempDir::new(test).unwrap();
|
||||
let path = test_dir.path().to_str().unwrap(); crate::native_tests::mv_test_(path, test);
|
||||
mock(path, test.to_string(), "polycommit", "private", "public", 1, "resources", None, 0.0);
|
||||
mock(path, test.to_string(), "polycommit", "private", "public", 1, "resources", None, 0.0, false);
|
||||
test_dir.close().unwrap();
|
||||
}
|
||||
|
||||
@@ -711,7 +716,7 @@ mod native_tests {
|
||||
crate::native_tests::init_binary();
|
||||
let test_dir = TempDir::new(test).unwrap();
|
||||
let path = test_dir.path().to_str().unwrap(); crate::native_tests::mv_test_(path, test);
|
||||
mock(path, test.to_string(), "private", "hashed", "public", 1, "resources", None, 0.0);
|
||||
mock(path, test.to_string(), "private", "hashed", "public", 1, "resources", None, 0.0, false);
|
||||
test_dir.close().unwrap();
|
||||
}
|
||||
|
||||
@@ -721,7 +726,7 @@ mod native_tests {
|
||||
crate::native_tests::init_binary();
|
||||
let test_dir = TempDir::new(test).unwrap();
|
||||
let path = test_dir.path().to_str().unwrap(); crate::native_tests::mv_test_(path, test);
|
||||
mock(path, test.to_string(), "private", "polycommit", "public", 1, "resources", None, 0.0);
|
||||
mock(path, test.to_string(), "private", "polycommit", "public", 1, "resources", None, 0.0, false);
|
||||
test_dir.close().unwrap();
|
||||
}
|
||||
|
||||
@@ -730,7 +735,7 @@ mod native_tests {
|
||||
crate::native_tests::init_binary();
|
||||
let test_dir = TempDir::new(test).unwrap();
|
||||
let path = test_dir.path().to_str().unwrap(); crate::native_tests::mv_test_(path, test);
|
||||
mock(path, test.to_string(), "public", "private", "hashed", 1, "resources", None, 0.0);
|
||||
mock(path, test.to_string(), "public", "private", "hashed", 1, "resources", None, 0.0, false);
|
||||
test_dir.close().unwrap();
|
||||
}
|
||||
|
||||
@@ -740,7 +745,7 @@ mod native_tests {
|
||||
crate::native_tests::init_binary();
|
||||
let test_dir = TempDir::new(test).unwrap();
|
||||
let path = test_dir.path().to_str().unwrap(); crate::native_tests::mv_test_(path, test);
|
||||
mock(path, test.to_string(), "public", "private", "polycommit", 1, "resources", None, 0.0);
|
||||
mock(path, test.to_string(), "public", "private", "polycommit", 1, "resources", None, 0.0, false);
|
||||
test_dir.close().unwrap();
|
||||
}
|
||||
|
||||
@@ -749,7 +754,7 @@ mod native_tests {
|
||||
crate::native_tests::init_binary();
|
||||
let test_dir = TempDir::new(test).unwrap();
|
||||
let path = test_dir.path().to_str().unwrap(); crate::native_tests::mv_test_(path, test);
|
||||
mock(path, test.to_string(), "public", "fixed", "hashed", 1, "resources", None, 0.0);
|
||||
mock(path, test.to_string(), "public", "fixed", "hashed", 1, "resources", None, 0.0, false);
|
||||
test_dir.close().unwrap();
|
||||
}
|
||||
|
||||
@@ -759,7 +764,7 @@ mod native_tests {
|
||||
crate::native_tests::init_binary();
|
||||
let test_dir = TempDir::new(test).unwrap();
|
||||
let path = test_dir.path().to_str().unwrap(); crate::native_tests::mv_test_(path, test);
|
||||
mock(path, test.to_string(), "public", "polycommit", "hashed", 1, "resources", None, 0.0);
|
||||
mock(path, test.to_string(), "public", "polycommit", "hashed", 1, "resources", None, 0.0, false);
|
||||
test_dir.close().unwrap();
|
||||
}
|
||||
|
||||
@@ -769,7 +774,7 @@ mod native_tests {
|
||||
crate::native_tests::init_binary();
|
||||
let test_dir = TempDir::new(test).unwrap();
|
||||
let path = test_dir.path().to_str().unwrap(); crate::native_tests::mv_test_(path, test);
|
||||
mock(path, test.to_string(), "polycommit", "polycommit", "polycommit", 1, "resources", None, 0.0);
|
||||
mock(path, test.to_string(), "polycommit", "polycommit", "polycommit", 1, "resources", None, 0.0, false);
|
||||
test_dir.close().unwrap();
|
||||
}
|
||||
|
||||
@@ -779,7 +784,7 @@ mod native_tests {
|
||||
crate::native_tests::init_binary();
|
||||
let test_dir = TempDir::new(test).unwrap();
|
||||
let path = test_dir.path().to_str().unwrap(); crate::native_tests::mv_test_(path, test);
|
||||
mock(path, test.to_string(), "hashed", "private", "hashed", 1, "resources", None, 0.0);
|
||||
mock(path, test.to_string(), "hashed", "private", "hashed", 1, "resources", None, 0.0, false);
|
||||
test_dir.close().unwrap();
|
||||
}
|
||||
|
||||
@@ -789,7 +794,7 @@ mod native_tests {
|
||||
let test_dir = TempDir::new(test).unwrap();
|
||||
let path = test_dir.path().to_str().unwrap(); crate::native_tests::mv_test_(path, test);
|
||||
// needs an extra row for the large model
|
||||
mock(path, test.to_string(),"hashed", "hashed", "public", 1, "resources", None, 0.0);
|
||||
mock(path, test.to_string(),"hashed", "hashed", "public", 1, "resources", None, 0.0, false);
|
||||
test_dir.close().unwrap();
|
||||
}
|
||||
|
||||
@@ -799,7 +804,7 @@ mod native_tests {
|
||||
let test_dir = TempDir::new(test).unwrap();
|
||||
let path = test_dir.path().to_str().unwrap(); crate::native_tests::mv_test_(path, test);
|
||||
// needs an extra row for the large model
|
||||
mock(path, test.to_string(),"hashed", "hashed", "hashed", 1, "resources", None, 0.0);
|
||||
mock(path, test.to_string(),"hashed", "hashed", "hashed", 1, "resources", None, 0.0, false);
|
||||
test_dir.close().unwrap();
|
||||
}
|
||||
|
||||
@@ -976,7 +981,7 @@ mod native_tests {
|
||||
crate::native_tests::init_binary();
|
||||
let test_dir = TempDir::new(test).unwrap();
|
||||
let path = test_dir.path().to_str().unwrap(); crate::native_tests::mv_test_(path, test);
|
||||
mock(path, test.to_string(), "private", "fixed", "public", 1, "resources", None, 0.0);
|
||||
mock(path, test.to_string(), "private", "fixed", "public", 1, "resources", None, 0.0, false);
|
||||
test_dir.close().unwrap();
|
||||
}
|
||||
});
|
||||
@@ -1453,6 +1458,7 @@ mod native_tests {
|
||||
cal_target: &str,
|
||||
scales_to_use: Option<Vec<u32>>,
|
||||
tolerance: f32,
|
||||
bounded_lookup_log: bool,
|
||||
) {
|
||||
let mut tolerance = tolerance;
|
||||
gen_circuit_settings_and_witness(
|
||||
@@ -1465,10 +1471,10 @@ mod native_tests {
|
||||
cal_target,
|
||||
scales_to_use,
|
||||
2,
|
||||
false,
|
||||
&mut tolerance,
|
||||
Commitments::KZG,
|
||||
2,
|
||||
bounded_lookup_log,
|
||||
);
|
||||
|
||||
if tolerance > 0.0 {
|
||||
@@ -1606,10 +1612,10 @@ mod native_tests {
|
||||
cal_target: &str,
|
||||
scales_to_use: Option<Vec<u32>>,
|
||||
num_inner_columns: usize,
|
||||
div_rebasing: bool,
|
||||
tolerance: &mut f32,
|
||||
commitment: Commitments,
|
||||
lookup_safety_margin: usize,
|
||||
bounded_lookup_log: bool,
|
||||
) {
|
||||
let mut args = vec![
|
||||
"gen-settings".to_string(),
|
||||
@@ -1628,9 +1634,9 @@ mod native_tests {
|
||||
format!("--commitment={}", commitment),
|
||||
];
|
||||
|
||||
if div_rebasing {
|
||||
args.push("--div-rebasing".to_string());
|
||||
};
|
||||
if bounded_lookup_log {
|
||||
args.push("--bounded-log-lookup".to_string());
|
||||
}
|
||||
|
||||
let status = Command::new(format!("{}/release/ezkl", *CARGO_TARGET_DIR))
|
||||
.args(args)
|
||||
@@ -1730,7 +1736,6 @@ mod native_tests {
|
||||
batch_size: usize,
|
||||
cal_target: &str,
|
||||
target_perc: f32,
|
||||
div_rebasing: bool,
|
||||
) {
|
||||
gen_circuit_settings_and_witness(
|
||||
test_dir,
|
||||
@@ -1742,10 +1747,10 @@ mod native_tests {
|
||||
cal_target,
|
||||
None,
|
||||
2,
|
||||
div_rebasing,
|
||||
&mut 0.0,
|
||||
Commitments::KZG,
|
||||
2,
|
||||
false,
|
||||
);
|
||||
|
||||
println!(
|
||||
@@ -2026,10 +2031,10 @@ mod native_tests {
|
||||
target_str,
|
||||
scales_to_use,
|
||||
num_inner_columns,
|
||||
false,
|
||||
&mut 0.0,
|
||||
commitment,
|
||||
lookup_safety_margin,
|
||||
false,
|
||||
);
|
||||
|
||||
let settings_path = format!("{}/{}/settings.json", test_dir, example_name);
|
||||
@@ -2458,10 +2463,10 @@ mod native_tests {
|
||||
// we need the accuracy
|
||||
Some(vec![4]),
|
||||
1,
|
||||
false,
|
||||
&mut 0.0,
|
||||
Commitments::KZG,
|
||||
2,
|
||||
false,
|
||||
);
|
||||
|
||||
let model_path = format!("{}/{}/network.compiled", test_dir, example_name);
|
||||
|
||||
@@ -68,6 +68,8 @@ mod py_tests {
|
||||
"install",
|
||||
"torch-geometric==2.5.2",
|
||||
"torch==2.2.2",
|
||||
"datasets==3.2.0",
|
||||
"torchtext==0.17.2",
|
||||
"torchvision==0.17.2",
|
||||
"pandas==2.2.1",
|
||||
"numpy==1.26.4",
|
||||
@@ -76,8 +78,8 @@ mod py_tests {
|
||||
"nbconvert==7.16.3",
|
||||
"onnx==1.16.0",
|
||||
"kaggle==1.6.8",
|
||||
"py-solc-x==2.0.2",
|
||||
"web3==6.16.0",
|
||||
"py-solc-x==2.0.3",
|
||||
"web3==7.5.0",
|
||||
"librosa==0.10.1",
|
||||
"keras==3.1.1",
|
||||
"tensorflow==2.16.1",
|
||||
@@ -123,7 +125,7 @@ mod py_tests {
|
||||
}
|
||||
}
|
||||
|
||||
const TESTS: [&str; 34] = [
|
||||
const TESTS: [&str; 35] = [
|
||||
"ezkl_demo_batch.ipynb", // 0
|
||||
"proof_splitting.ipynb", // 1
|
||||
"variance.ipynb", // 2
|
||||
@@ -158,6 +160,7 @@ mod py_tests {
|
||||
"mnist_classifier.ipynb", // 31
|
||||
"world_rotation.ipynb", // 32
|
||||
"logistic_regression.ipynb", // 33
|
||||
"univ3-da.ipynb", // 34
|
||||
];
|
||||
|
||||
macro_rules! test_func {
|
||||
@@ -188,6 +191,27 @@ mod py_tests {
|
||||
anvil_child.kill().unwrap();
|
||||
}
|
||||
});
|
||||
|
||||
#[test]
|
||||
fn neural_bag_of_words_notebook() {
|
||||
crate::py_tests::init_binary();
|
||||
let test_dir: TempDir = TempDir::new("neural_bow").unwrap();
|
||||
let path = test_dir.path().to_str().unwrap();
|
||||
crate::py_tests::mv_test_(path, "neural_bow.ipynb");
|
||||
run_notebook(path, "neural_bow.ipynb");
|
||||
test_dir.close().unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn felt_conversion_test_notebook() {
|
||||
crate::py_tests::init_binary();
|
||||
let test_dir: TempDir = TempDir::new("felt_conversion_test").unwrap();
|
||||
let path = test_dir.path().to_str().unwrap();
|
||||
crate::py_tests::mv_test_(path, "felt_conversion_test.ipynb");
|
||||
run_notebook(path, "felt_conversion_test.ipynb");
|
||||
test_dir.close().unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn voice_notebook_() {
|
||||
crate::py_tests::init_binary();
|
||||
|
||||
Reference in New Issue
Block a user