Compare commits

...

9 Commits

Author SHA1 Message Date
dante
04d7b5feaa chore: fold div_rebasing parameter into calibration (#699) 2024-01-31 10:03:12 +00:00
dante
45fd12a04f refactor!: make rebasing multiplicative by default (#698)
BREAKING CHANGE: adds a `required_range_checks` field to `cs`
2024-01-30 18:37:57 +00:00
dante
bc7c33190f feat: allow for separate vk render on-chain (#697) 2024-01-25 19:48:13 +00:00
dante
df72e01414 feat: make selector compression optional (#696) 2024-01-24 00:09:00 +00:00
Tobin South
172e26c00d fix: link for CLI auto-install (#695) 2024-01-22 13:00:27 +00:00
Jason Morton
11ac120f23 fix: large test numbering(#689)
Co-authored-by: dante <45801863+alexander-camuto@users.noreply.github.com>
2024-01-21 21:01:46 +00:00
Jseam
0fdd92e9f3 fix: move install_ezkl_cli.sh into main repo (#694)
Co-authored-by: dante <45801863+alexander-camuto@users.noreply.github.com>
2024-01-21 20:59:39 +00:00
Alexander Camuto
31f58056a5 chore: bump py03 2024-01-21 20:58:32 +00:00
dante
ddbcc1d2d8 fix: calibration should only consider local scales (#691) 2024-01-18 23:28:49 +00:00
69 changed files with 7996 additions and 13078 deletions

View File

@@ -6,7 +6,7 @@ on:
description: "Test scenario tags"
jobs:
large-tests:
runs-on: self-hosted
runs-on: kaiju
steps:
- uses: actions/checkout@v4
- uses: actions-rs/toolchain@v1
@@ -23,6 +23,6 @@ jobs:
- name: Self Attention KZG prove and verify large tests
run: cargo test --release --verbose tests::large_kzg_prove_and_verify_::large_tests_0_expects -- --include-ignored
- name: mobilenet Mock
run: cargo test --release --verbose tests::large_mock_::large_tests_2_expects -- --include-ignored
run: cargo test --release --verbose tests::large_mock_::large_tests_3_expects -- --include-ignored
- name: mobilenet KZG prove and verify large tests
run: cargo test --release --verbose tests::large_kzg_prove_and_verify_::large_tests_2_expects -- --include-ignored
run: cargo test --release --verbose tests::large_kzg_prove_and_verify_::large_tests_3_expects -- --include-ignored

View File

@@ -313,6 +313,8 @@ jobs:
run: (hash svm 2>/dev/null || cargo install svm-rs) && svm install 0.8.20 && solc --version
- name: Install Anvil
run: cargo install --git https://github.com/foundry-rs/foundry --rev 95a93cd397f25f3f8d49d2851eb52bc2d52dd983 --profile local --locked anvil --force
- name: KZG prove and verify tests (EVM + VK rendered seperately)
run: cargo nextest run --release --verbose tests_evm::kzg_evm_prove_and_verify_render_seperately_ --test-threads 1
- name: KZG prove and verify tests (EVM + kzg all)
run: cargo nextest run --release --verbose tests_evm::kzg_evm_kzg_all_prove_and_verify --test-threads 1
- name: KZG prove and verify tests (EVM + kzg inputs)
@@ -601,6 +603,8 @@ jobs:
run: python -m venv .env; source .env/bin/activate; pip install -r requirements.txt;
- name: Build python ezkl
run: source .env/bin/activate; maturin develop --features python-bindings --release
- name: Div rebase
run: source .env/bin/activate; cargo nextest run --release --verbose tests::accuracy_measurement_div_rebase_
- name: Public inputs
run: source .env/bin/activate; cargo nextest run --release --verbose tests::accuracy_measurement_public_inputs_
- name: fixed params
@@ -643,14 +647,13 @@ jobs:
# # now dump the contents of the file into a file called kaggle.json
# echo $KAGGLE_API_KEY > /home/ubuntu/.kaggle/kaggle.json
# chmod 600 /home/ubuntu/.kaggle/kaggle.json
- name: NBEATS tutorial
run: source .env/bin/activate; cargo nextest run py_tests::tests::nbeats_
- name: All notebooks
run: source .env/bin/activate; cargo nextest run py_tests::tests::run_notebook_ --no-capture
- name: Voice tutorial
run: source .env/bin/activate; cargo nextest run py_tests::tests::voice_
- name: All notebooks
run: source .env/bin/activate; cargo nextest run py_tests::tests::run_notebook_ --test-threads 1
- name: NBEATS tutorial
run: source .env/bin/activate; cargo nextest run py_tests::tests::nbeats_
- name: Tictactoe tutorials
run: source .env/bin/activate; cargo nextest run py_tests::tests::tictactoe_ --test-threads 1
run: source .env/bin/activate; cargo nextest run py_tests::tests::tictactoe_ --no-capture
# - name: Postgres tutorials
# run: source .env/bin/activate; cargo nextest run py_tests::tests::postgres_ --test-threads 1

141
Cargo.lock generated
View File

@@ -1058,16 +1058,6 @@ dependencies = [
"itertools 0.10.5",
]
[[package]]
name = "crossbeam-channel"
version = "0.5.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a33c2bf77f2df06183c3aa30d1e96c0695a313d4f9c453cc3762a6db39f99200"
dependencies = [
"cfg-if",
"crossbeam-utils",
]
[[package]]
name = "crossbeam-deque"
version = "0.8.3"
@@ -1088,7 +1078,7 @@ dependencies = [
"autocfg",
"cfg-if",
"crossbeam-utils",
"memoffset 0.9.0",
"memoffset",
"scopeguard",
]
@@ -1378,7 +1368,7 @@ checksum = "68b0cf012f1230e43cd00ebb729c6bb58707ecfa8ad08b52ef3a4ccd2697fc30"
[[package]]
name = "ecc"
version = "0.1.0"
source = "git+https://github.com/zkonduit/halo2wrong?branch=ac/chunked-mv-lookup#c1d7551c82953829caee30fe218759b0d2657d26"
source = "git+https://github.com/zkonduit/halo2wrong?branch=ac/chunked-mv-lookup#b43ebe30e84825d0d004fa27803d99c4187d419f"
dependencies = [
"integer",
"num-bigint",
@@ -1862,7 +1852,7 @@ dependencies = [
"halo2_gadgets",
"halo2_proofs",
"halo2_solidity_verifier",
"halo2curves 0.1.0",
"halo2curves 0.6.0",
"hex",
"indicatif",
"instant",
@@ -2253,7 +2243,7 @@ dependencies = [
[[package]]
name = "halo2_gadgets"
version = "0.2.0"
source = "git+https://github.com/zkonduit/halo2?branch=ac/lookup-modularity#57b9123835aa7d8482f4182ede3e8f4b0aea5c0a"
source = "git+https://github.com/zkonduit/halo2?branch=main#6a2b9ada9804807ddba03bbadaf6e63822cec275"
dependencies = [
"arrayvec 0.7.4",
"bitvec 1.0.1",
@@ -2269,14 +2259,14 @@ dependencies = [
[[package]]
name = "halo2_proofs"
version = "0.2.0"
source = "git+https://github.com/zkonduit/halo2?branch=ac/lookup-modularity#57b9123835aa7d8482f4182ede3e8f4b0aea5c0a"
version = "0.3.0"
source = "git+https://github.com/zkonduit/halo2?branch=main#6a2b9ada9804807ddba03bbadaf6e63822cec275"
dependencies = [
"blake2b_simd",
"env_logger",
"ff",
"group",
"halo2curves 0.1.0",
"halo2curves 0.6.0",
"icicle",
"log",
"maybe-rayon",
@@ -2292,7 +2282,7 @@ dependencies = [
[[package]]
name = "halo2_solidity_verifier"
version = "0.1.0"
source = "git+https://github.com/alexander-camuto/halo2-solidity-verifier?branch=ac/lookup-modularity#cf9a3128bb583680dd4c418defd8d37bd8e5c3f1"
source = "git+https://github.com/alexander-camuto/halo2-solidity-verifier?branch=main#eb04be1f7d005e5b9dd3ff41efa30aeb5e0c34a3"
dependencies = [
"askama",
"blake2b_simd",
@@ -2319,8 +2309,6 @@ dependencies = [
"paste",
"rand 0.8.5",
"rand_core 0.6.4",
"serde",
"serde_arrays",
"static_assertions",
"subtle",
]
@@ -2343,10 +2331,35 @@ dependencies = [
"subtle",
]
[[package]]
name = "halo2curves"
version = "0.6.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d3675880dc0cc7cd468943266297198a28f88210ba60ca5e0e04d121edf86b46"
dependencies = [
"blake2b_simd",
"ff",
"group",
"hex",
"lazy_static",
"num-bigint",
"num-traits",
"pairing",
"pasta_curves",
"paste",
"rand 0.8.5",
"rand_core 0.6.4",
"rayon",
"serde",
"serde_arrays",
"static_assertions",
"subtle",
]
[[package]]
name = "halo2wrong"
version = "0.1.0"
source = "git+https://github.com/zkonduit/halo2wrong?branch=ac/chunked-mv-lookup#c1d7551c82953829caee30fe218759b0d2657d26"
source = "git+https://github.com/zkonduit/halo2wrong?branch=ac/chunked-mv-lookup#b43ebe30e84825d0d004fa27803d99c4187d419f"
dependencies = [
"halo2_proofs",
"num-bigint",
@@ -2423,6 +2436,9 @@ name = "hex"
version = "0.4.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70"
dependencies = [
"serde",
]
[[package]]
name = "hex-literal"
@@ -2668,9 +2684,9 @@ dependencies = [
[[package]]
name = "indoc"
version = "1.0.9"
version = "2.0.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bfa799dd5ed20a7e349f3b4639aa80d74549c81716d9ec4f994c9b5815598306"
checksum = "1e186cfbae8084e513daff4240b4797e342f988cecda4fb6c939150f96315fd8"
[[package]]
name = "inout"
@@ -2696,7 +2712,7 @@ dependencies = [
[[package]]
name = "integer"
version = "0.1.0"
source = "git+https://github.com/zkonduit/halo2wrong?branch=ac/chunked-mv-lookup#c1d7551c82953829caee30fe218759b0d2657d26"
source = "git+https://github.com/zkonduit/halo2wrong?branch=ac/chunked-mv-lookup#b43ebe30e84825d0d004fa27803d99c4187d419f"
dependencies = [
"maingate",
"num-bigint",
@@ -2941,7 +2957,7 @@ checksum = "b06a4cde4c0f271a446782e3eff8de789548ce57dbc8eca9292c27f4a42004b4"
[[package]]
name = "maingate"
version = "0.1.0"
source = "git+https://github.com/zkonduit/halo2wrong?branch=ac/chunked-mv-lookup#c1d7551c82953829caee30fe218759b0d2657d26"
source = "git+https://github.com/zkonduit/halo2wrong?branch=ac/chunked-mv-lookup#b43ebe30e84825d0d004fa27803d99c4187d419f"
dependencies = [
"halo2wrong",
"num-bigint",
@@ -3001,15 +3017,6 @@ dependencies = [
"libc",
]
[[package]]
name = "memoffset"
version = "0.8.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d61c719bcfbcf5d62b3a09efa6088de8c54bc0bfcd3ea7ae39fcc186108b8de1"
dependencies = [
"autocfg",
]
[[package]]
name = "memoffset"
version = "0.9.0"
@@ -3335,6 +3342,15 @@ version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "04744f49eae99ab78e0d5c0b603ab218f515ea8cfe5a456d7629ad883a3b6e7d"
[[package]]
name = "pairing"
version = "0.23.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "81fec4625e73cf41ef4bb6846cafa6d44736525f442ba45e407c4a000a13996f"
dependencies = [
"group",
]
[[package]]
name = "papergrid"
version = "0.9.1"
@@ -3840,14 +3856,14 @@ dependencies = [
[[package]]
name = "pyo3"
version = "0.18.3"
version = "0.20.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e3b1ac5b3731ba34fdaa9785f8d74d17448cd18f30cf19e0c7e7b1fdb5272109"
checksum = "9a89dc7a5850d0e983be1ec2a463a171d20990487c3cfcd68b5363f1ee3d6fe0"
dependencies = [
"cfg-if",
"indoc",
"libc",
"memoffset 0.8.0",
"memoffset",
"parking_lot",
"pyo3-build-config",
"pyo3-ffi",
@@ -3857,9 +3873,9 @@ dependencies = [
[[package]]
name = "pyo3-asyncio"
version = "0.18.0"
version = "0.20.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d3564762e37035cfc486228e10b0528460fa026d681b5763873c693aa0d5c260"
checksum = "6ea6b68e93db3622f3bb3bf363246cf948ed5375afe7abff98ccbdd50b184995"
dependencies = [
"futures",
"once_cell",
@@ -3871,9 +3887,9 @@ dependencies = [
[[package]]
name = "pyo3-asyncio-macros"
version = "0.18.0"
version = "0.20.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "be72d4cd43a27530306bd0d20d3932182fbdd072c6b98d3638bc37efb9d559dd"
checksum = "56c467178e1da6252c95c29ecf898b133f742e9181dca5def15dc24e19d45a39"
dependencies = [
"proc-macro2",
"quote",
@@ -3882,9 +3898,9 @@ dependencies = [
[[package]]
name = "pyo3-build-config"
version = "0.18.3"
version = "0.20.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9cb946f5ac61bb61a5014924910d936ebd2b23b705f7a4a3c40b05c720b079a3"
checksum = "07426f0d8fe5a601f26293f300afd1a7b1ed5e78b2a705870c5f30893c5163be"
dependencies = [
"once_cell",
"target-lexicon",
@@ -3892,9 +3908,9 @@ dependencies = [
[[package]]
name = "pyo3-ffi"
version = "0.18.3"
version = "0.20.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fd4d7c5337821916ea2a1d21d1092e8443cf34879e53a0ac653fbb98f44ff65c"
checksum = "dbb7dec17e17766b46bca4f1a4215a85006b4c2ecde122076c562dd058da6cf1"
dependencies = [
"libc",
"pyo3-build-config",
@@ -3902,9 +3918,9 @@ dependencies = [
[[package]]
name = "pyo3-log"
version = "0.8.2"
version = "0.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c94ff6535a6bae58d7d0b85e60d4c53f7f84d0d0aa35d6a28c3f3e70bfe51444"
checksum = "4c10808ee7250403bedb24bc30c32493e93875fef7ba3e4292226fe924f398bd"
dependencies = [
"arc-swap",
"log",
@@ -3913,25 +3929,26 @@ dependencies = [
[[package]]
name = "pyo3-macros"
version = "0.18.3"
version = "0.20.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a9d39c55dab3fc5a4b25bbd1ac10a2da452c4aca13bb450f22818a002e29648d"
checksum = "05f738b4e40d50b5711957f142878cfa0f28e054aa0ebdfc3fd137a843f74ed3"
dependencies = [
"proc-macro2",
"pyo3-macros-backend",
"quote",
"syn 1.0.109",
"syn 2.0.22",
]
[[package]]
name = "pyo3-macros-backend"
version = "0.18.3"
version = "0.20.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "97daff08a4c48320587b5224cc98d609e3c27b6d437315bd40b605c98eeb5918"
checksum = "0fc910d4851847827daf9d6cdd4a823fbdaab5b8818325c5e97a86da79e8881f"
dependencies = [
"heck",
"proc-macro2",
"quote",
"syn 1.0.109",
"syn 2.0.22",
]
[[package]]
@@ -4040,9 +4057,9 @@ checksum = "60a357793950651c4ed0f3f52338f53b2f809f32d83a07f72909fa13e4c6c1e3"
[[package]]
name = "rayon"
version = "1.7.0"
version = "1.8.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1d2df5196e37bcc87abebc0053e20787d73847bb33134a69841207dd0a47f03b"
checksum = "fa7237101a77a10773db45d62004a272517633fbcc3df19d96455ede1122e051"
dependencies = [
"either",
"rayon-core",
@@ -4050,14 +4067,12 @@ dependencies = [
[[package]]
name = "rayon-core"
version = "1.11.0"
version = "1.12.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4b8f95bd6966f5c87776639160a66bd8ab9895d9d4ab01ddba9fc60661aebe8d"
checksum = "1465873a3dfdaa8ae7cb14b4383657caab0b3e8a0aa9ae8e04b044854c8dfce2"
dependencies = [
"crossbeam-channel",
"crossbeam-deque",
"crossbeam-utils",
"num_cpus",
]
[[package]]
@@ -4745,11 +4760,11 @@ dependencies = [
[[package]]
name = "snark-verifier"
version = "0.1.1"
source = "git+https://github.com/zkonduit/snark-verifier?branch=ac/chunked-mv-lookup#22ee76bee1a24f3732e994b72b10ec09939348de"
source = "git+https://github.com/zkonduit/snark-verifier?branch=ac/chunked-mv-lookup#574b65ea6b4d43eebac5565146519a95b435815c"
dependencies = [
"ecc",
"halo2_proofs",
"halo2curves 0.1.0",
"halo2curves 0.6.0",
"hex",
"itertools 0.10.5",
"lazy_static",
@@ -5552,9 +5567,9 @@ checksum = "f962df74c8c05a667b5ee8bcf162993134c104e96440b663c8daa176dc772d8c"
[[package]]
name = "unindent"
version = "0.1.11"
version = "0.2.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e1766d682d402817b5ac4490b3c3002d91dfa0d22812f341609f97b08757359c"
checksum = "c7de7d73e1754487cb58364ee906a499937a0dfabd86bcb980fa99ec8c8fa2ce"
[[package]]
name = "unzip-n"

View File

@@ -15,9 +15,9 @@ crate-type = ["cdylib", "rlib"]
[dependencies]
halo2_gadgets = { git = "https://github.com/zkonduit/halo2", branch= "ac/lookup-modularity" }
halo2_proofs = { git = "https://github.com/zkonduit/halo2", branch= "ac/lookup-modularity" }
halo2curves = { version = "0.1.0" }
halo2_gadgets = { git = "https://github.com/zkonduit/halo2", branch= "main" }
halo2_proofs = { git = "https://github.com/zkonduit/halo2", branch= "main" }
halo2curves = { version = "0.6.0", features = ["derive_serde"] }
rand = { version = "0.8", default_features = false }
itertools = { version = "0.10.3", default_features = false }
clap = { version = "4.3.3", features = ["derive"]}
@@ -28,7 +28,7 @@ thiserror = { version = "1.0.38", default_features = false }
hex = { version = "0.4.3", default_features = false }
halo2_wrong_ecc = { git = "https://github.com/zkonduit/halo2wrong", branch = "ac/chunked-mv-lookup", package = "ecc" }
snark-verifier = { git = "https://github.com/zkonduit/snark-verifier", branch = "ac/chunked-mv-lookup", features=["derive_serde"]}
halo2_solidity_verifier = { git = "https://github.com/alexander-camuto/halo2-solidity-verifier", branch= "ac/lookup-modularity" }
halo2_solidity_verifier = { git = "https://github.com/alexander-camuto/halo2-solidity-verifier", branch= "main" }
maybe-rayon = { version = "0.1.1", default_features = false }
bincode = { version = "1.3.3", default_features = false }
ark-std = { version = "^0.3.0", default-features = false }
@@ -51,9 +51,9 @@ plotters = { version = "0.3.0", default_features = false, optional = true }
regex = { version = "1", default_features = false }
tokio = { version = "1.26.0", default_features = false, features = ["macros", "rt"] }
tokio-util = { version = "0.7.9", features = ["codec"] }
pyo3 = { version = "0.18.3", features = ["extension-module", "abi3-py37", "macros"], default_features = false, optional = true }
pyo3-asyncio = { version = "0.18.0", features = ["attributes", "tokio-runtime"], default_features = false, optional = true }
pyo3-log = { version = "0.8.1", default_features = false, optional = true }
pyo3 = { version = "0.20.2", features = ["extension-module", "abi3-py37", "macros"], default_features = false, optional = true }
pyo3-asyncio = { version = "0.20.0", features = ["attributes", "tokio-runtime"], default_features = false, optional = true }
pyo3-log = { version = "0.9.0", default_features = false, optional = true }
tract-onnx = { git = "https://github.com/sonos/tract/", rev= "7b1aa33b2f7d1f19b80e270c83320f0f94daff69", default_features = false, optional = true }
tabled = { version = "0.12.0", optional = true }

View File

@@ -64,8 +64,8 @@ More notebook tutorials can be found within `examples/notebooks`.
#### CLI
Install the CLI
```bash
curl https://hub.ezkl.xyz/install_ezkl_cli.sh | bash
``` shell
curl https://raw.githubusercontent.com/zkonduit/ezkl/main/install_ezkl_cli.sh | bash
```
https://user-images.githubusercontent.com/45801863/236771676-5bbbbfd1-ba6f-418a-902e-20738ce0e9f0.mp4

View File

@@ -121,13 +121,16 @@ fn runcnvrl(c: &mut Criterion) {
group.throughput(Throughput::Elements(*size as u64));
group.bench_with_input(BenchmarkId::new("pk", size), &size, |b, &_| {
b.iter(|| {
create_keys::<KZGCommitmentScheme<Bn256>, Fr, MyCircuit>(&circuit, &params)
.unwrap();
create_keys::<KZGCommitmentScheme<Bn256>, Fr, MyCircuit>(
&circuit, &params, true,
)
.unwrap();
});
});
let pk = create_keys::<KZGCommitmentScheme<Bn256>, Fr, MyCircuit>(&circuit, &params)
.unwrap();
let pk =
create_keys::<KZGCommitmentScheme<Bn256>, Fr, MyCircuit>(&circuit, &params, true)
.unwrap();
group.throughput(Throughput::Elements(*size as u64));
group.bench_with_input(BenchmarkId::new("prove", size), &size, |b, &_| {

View File

@@ -90,13 +90,13 @@ fn rundot(c: &mut Criterion) {
group.throughput(Throughput::Elements(len as u64));
group.bench_with_input(BenchmarkId::new("pk", len), &len, |b, &_| {
b.iter(|| {
create_keys::<KZGCommitmentScheme<Bn256>, Fr, MyCircuit>(&circuit, &params)
create_keys::<KZGCommitmentScheme<Bn256>, Fr, MyCircuit>(&circuit, &params, true)
.unwrap();
});
});
let pk =
create_keys::<KZGCommitmentScheme<Bn256>, Fr, MyCircuit>(&circuit, &params).unwrap();
let pk = create_keys::<KZGCommitmentScheme<Bn256>, Fr, MyCircuit>(&circuit, &params, true)
.unwrap();
group.throughput(Throughput::Elements(len as u64));
group.bench_with_input(BenchmarkId::new("prove", len), &len, |b, &_| {

View File

@@ -94,13 +94,13 @@ fn runmatmul(c: &mut Criterion) {
group.throughput(Throughput::Elements(len as u64));
group.bench_with_input(BenchmarkId::new("pk", len), &len, |b, &_| {
b.iter(|| {
create_keys::<KZGCommitmentScheme<Bn256>, Fr, MyCircuit>(&circuit, &params)
create_keys::<KZGCommitmentScheme<Bn256>, Fr, MyCircuit>(&circuit, &params, true)
.unwrap();
});
});
let pk =
create_keys::<KZGCommitmentScheme<Bn256>, Fr, MyCircuit>(&circuit, &params).unwrap();
let pk = create_keys::<KZGCommitmentScheme<Bn256>, Fr, MyCircuit>(&circuit, &params, true)
.unwrap();
group.throughput(Throughput::Elements(len as u64));
group.bench_with_input(BenchmarkId::new("prove", len), &len, |b, &_| {

View File

@@ -1,4 +1,5 @@
use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion, Throughput};
use ezkl::circuit::table::Range;
use ezkl::circuit::*;
use ezkl::circuit::lookup::LookupOp;
@@ -16,7 +17,7 @@ use halo2_proofs::{
use halo2curves::bn256::{Bn256, Fr};
use std::marker::PhantomData;
const BITS: (i128, i128) = (-32768, 32768);
const BITS: Range = (-32768, 32768);
static mut LEN: usize = 4;
const K: usize = 16;
@@ -111,13 +112,13 @@ fn runmatmul(c: &mut Criterion) {
group.throughput(Throughput::Elements(len as u64));
group.bench_with_input(BenchmarkId::new("pk", len), &len, |b, &_| {
b.iter(|| {
create_keys::<KZGCommitmentScheme<Bn256>, Fr, MyCircuit>(&circuit, &params)
create_keys::<KZGCommitmentScheme<Bn256>, Fr, MyCircuit>(&circuit, &params, true)
.unwrap();
});
});
let pk =
create_keys::<KZGCommitmentScheme<Bn256>, Fr, MyCircuit>(&circuit, &params).unwrap();
let pk = create_keys::<KZGCommitmentScheme<Bn256>, Fr, MyCircuit>(&circuit, &params, true)
.unwrap();
group.throughput(Throughput::Elements(len as u64));
group.bench_with_input(BenchmarkId::new("prove", len), &len, |b, &_| {

View File

@@ -3,6 +3,7 @@ use ezkl::circuit::*;
use ezkl::circuit::lookup::LookupOp;
use ezkl::circuit::poly::PolyOp;
use ezkl::circuit::table::Range;
use ezkl::pfsys::create_proof_circuit_kzg;
use ezkl::pfsys::TranscriptType;
use ezkl::pfsys::{create_keys, srs::gen_srs};
@@ -16,7 +17,7 @@ use halo2_proofs::{
use halo2curves::bn256::{Bn256, Fr};
use std::marker::PhantomData;
const BITS: (i128, i128) = (-8180, 8180);
const BITS: Range = (-8180, 8180);
static mut LEN: usize = 4;
static mut K: usize = 16;
@@ -114,13 +115,13 @@ fn runmatmul(c: &mut Criterion) {
group.throughput(Throughput::Elements(k as u64));
group.bench_with_input(BenchmarkId::new("pk", k), &k, |b, &_| {
b.iter(|| {
create_keys::<KZGCommitmentScheme<Bn256>, Fr, MyCircuit>(&circuit, &params)
create_keys::<KZGCommitmentScheme<Bn256>, Fr, MyCircuit>(&circuit, &params, true)
.unwrap();
});
});
let pk =
create_keys::<KZGCommitmentScheme<Bn256>, Fr, MyCircuit>(&circuit, &params).unwrap();
let pk = create_keys::<KZGCommitmentScheme<Bn256>, Fr, MyCircuit>(&circuit, &params, true)
.unwrap();
group.throughput(Throughput::Elements(k as u64));
group.bench_with_input(BenchmarkId::new("prove", k), &k, |b, &_| {

View File

@@ -86,13 +86,13 @@ fn runsum(c: &mut Criterion) {
group.throughput(Throughput::Elements(len as u64));
group.bench_with_input(BenchmarkId::new("pk", len), &len, |b, &_| {
b.iter(|| {
create_keys::<KZGCommitmentScheme<Bn256>, Fr, MyCircuit>(&circuit, &params)
create_keys::<KZGCommitmentScheme<Bn256>, Fr, MyCircuit>(&circuit, &params, true)
.unwrap();
});
});
let pk =
create_keys::<KZGCommitmentScheme<Bn256>, Fr, MyCircuit>(&circuit, &params).unwrap();
let pk = create_keys::<KZGCommitmentScheme<Bn256>, Fr, MyCircuit>(&circuit, &params, true)
.unwrap();
group.throughput(Throughput::Elements(len as u64));
group.bench_with_input(BenchmarkId::new("prove", len), &len, |b, &_| {

View File

@@ -101,13 +101,16 @@ fn runsumpool(c: &mut Criterion) {
group.throughput(Throughput::Elements(*size as u64));
group.bench_with_input(BenchmarkId::new("pk", size), &size, |b, &_| {
b.iter(|| {
create_keys::<KZGCommitmentScheme<Bn256>, Fr, MyCircuit>(&circuit, &params)
.unwrap();
create_keys::<KZGCommitmentScheme<Bn256>, Fr, MyCircuit>(
&circuit, &params, true,
)
.unwrap();
});
});
let pk = create_keys::<KZGCommitmentScheme<Bn256>, Fr, MyCircuit>(&circuit, &params)
.unwrap();
let pk =
create_keys::<KZGCommitmentScheme<Bn256>, Fr, MyCircuit>(&circuit, &params, true)
.unwrap();
group.throughput(Throughput::Elements(*size as u64));
group.bench_with_input(BenchmarkId::new("prove", size), &size, |b, &_| {

View File

@@ -84,13 +84,13 @@ fn runadd(c: &mut Criterion) {
group.throughput(Throughput::Elements(len as u64));
group.bench_with_input(BenchmarkId::new("pk", len), &len, |b, &_| {
b.iter(|| {
create_keys::<KZGCommitmentScheme<Bn256>, Fr, MyCircuit>(&circuit, &params)
create_keys::<KZGCommitmentScheme<Bn256>, Fr, MyCircuit>(&circuit, &params, true)
.unwrap();
});
});
let pk =
create_keys::<KZGCommitmentScheme<Bn256>, Fr, MyCircuit>(&circuit, &params).unwrap();
let pk = create_keys::<KZGCommitmentScheme<Bn256>, Fr, MyCircuit>(&circuit, &params, true)
.unwrap();
group.throughput(Throughput::Elements(len as u64));
group.bench_with_input(BenchmarkId::new("prove", len), &len, |b, &_| {

View File

@@ -83,13 +83,13 @@ fn runpow(c: &mut Criterion) {
group.throughput(Throughput::Elements(len as u64));
group.bench_with_input(BenchmarkId::new("pk", len), &len, |b, &_| {
b.iter(|| {
create_keys::<KZGCommitmentScheme<Bn256>, Fr, MyCircuit>(&circuit, &params)
create_keys::<KZGCommitmentScheme<Bn256>, Fr, MyCircuit>(&circuit, &params, true)
.unwrap();
});
});
let pk =
create_keys::<KZGCommitmentScheme<Bn256>, Fr, MyCircuit>(&circuit, &params).unwrap();
let pk = create_keys::<KZGCommitmentScheme<Bn256>, Fr, MyCircuit>(&circuit, &params, true)
.unwrap();
group.throughput(Throughput::Elements(len as u64));
group.bench_with_input(BenchmarkId::new("prove", len), &len, |b, &_| {

View File

@@ -76,13 +76,13 @@ fn runposeidon(c: &mut Criterion) {
group.throughput(Throughput::Elements(*size as u64));
group.bench_with_input(BenchmarkId::new("pk", size), &size, |b, &_| {
b.iter(|| {
create_keys::<KZGCommitmentScheme<Bn256>, Fr, MyCircuit>(&circuit, &params)
create_keys::<KZGCommitmentScheme<Bn256>, Fr, MyCircuit>(&circuit, &params, true)
.unwrap();
});
});
let pk =
create_keys::<KZGCommitmentScheme<Bn256>, Fr, MyCircuit>(&circuit, &params).unwrap();
let pk = create_keys::<KZGCommitmentScheme<Bn256>, Fr, MyCircuit>(&circuit, &params, true)
.unwrap();
group.throughput(Throughput::Elements(*size as u64));
group.bench_with_input(BenchmarkId::new("prove", size), &size, |b, &_| {

View File

@@ -1,5 +1,6 @@
use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion, Throughput};
use ezkl::circuit::region::RegionCtx;
use ezkl::circuit::table::Range;
use ezkl::circuit::{ops::lookup::LookupOp, BaseConfig as Config, CheckMode};
use ezkl::pfsys::create_proof_circuit_kzg;
use ezkl::pfsys::TranscriptType;
@@ -14,7 +15,7 @@ use halo2_proofs::{
use halo2curves::bn256::{Bn256, Fr};
use rand::Rng;
const BITS: (i128, i128) = (-32768, 32768);
const BITS: Range = (-32768, 32768);
static mut LEN: usize = 4;
const K: usize = 16;
@@ -90,13 +91,13 @@ fn runrelu(c: &mut Criterion) {
group.throughput(Throughput::Elements(len as u64));
group.bench_with_input(BenchmarkId::new("pk", len), &len, |b, &_| {
b.iter(|| {
create_keys::<KZGCommitmentScheme<Bn256>, Fr, NLCircuit>(&circuit, &params)
create_keys::<KZGCommitmentScheme<Bn256>, Fr, NLCircuit>(&circuit, &params, true)
.unwrap();
});
});
let pk =
create_keys::<KZGCommitmentScheme<Bn256>, Fr, NLCircuit>(&circuit, &params).unwrap();
let pk = create_keys::<KZGCommitmentScheme<Bn256>, Fr, NLCircuit>(&circuit, &params, true)
.unwrap();
group.throughput(Throughput::Elements(len as u64));
group.bench_with_input(BenchmarkId::new("prove", len), &len, |b, &_| {

View File

@@ -309,7 +309,7 @@
"metadata": {},
"outputs": [],
"source": [
"print(ezkl.vecu64_to_felt(res['processed_outputs']['poseidon_hash'][0]))"
"print(ezkl.string_to_felt(res['processed_outputs']['poseidon_hash'][0]))"
]
},
{
@@ -338,7 +338,7 @@
"\n",
"def test_on_chain_data(res):\n",
" # Step 0: Convert the tensor to a flat list\n",
" data = [int(ezkl.vecu64_to_felt(res['processed_outputs']['poseidon_hash'][0]), 0)]\n",
" data = [int(ezkl.string_to_felt(res['processed_outputs']['poseidon_hash'][0]), 0)]\n",
"\n",
" # Step 1: Prepare the data\n",
" # Step 2: Prepare and compile the contract.\n",

View File

@@ -441,7 +441,7 @@
"# Serialize calibration data into file:\n",
"json.dump(data, open(cal_data_path, 'w'))\n",
"\n",
"# Optimize for resources, we cap logrows at 17 to reduce setup and proving time, at the expense of accuracy\n",
"# Optimize for resources, we cap logrows at 12 to reduce setup and proving time, at the expense of accuracy\n",
"# You may want to increase the max logrows if accuracy is a concern\n",
"res = ezkl.calibrate_settings(cal_data_path, model_path, settings_path, \"resources\", max_logrows = 12, scales = [2])"
]
@@ -695,7 +695,7 @@
"formatted_output = \"[\"\n",
"for i, value in enumerate(proof[\"instances\"]):\n",
" for j, field_element in enumerate(value):\n",
" onchain_input_array.append(ezkl.vecu64_to_felt(field_element))\n",
" onchain_input_array.append(ezkl.string_to_felt(field_element))\n",
" formatted_output += str(onchain_input_array[-1])\n",
" if j != len(value) - 1:\n",
" formatted_output += \", \"\n",

View File

@@ -300,13 +300,14 @@
},
{
"cell_type": "code",
"execution_count": 9,
"execution_count": 20,
"metadata": {},
"outputs": [],
"source": [
"# iterate over each submodel gen-settings, compile circuit and setup zkSNARK\n",
"\n",
"def setup(i):\n",
" print(\"Setting up split model \"+str(i))\n",
" # file names\n",
" model_path = os.path.join('network_split_'+str(i)+'.onnx')\n",
" settings_path = os.path.join('settings_split_'+str(i)+'.json')\n",
@@ -342,12 +343,12 @@
" compiled_model_path,\n",
" vk_path,\n",
" pk_path,\n",
" compress_selectors=True,\n",
" )\n",
"\n",
" assert res == True\n",
" assert os.path.isfile(vk_path)\n",
" assert os.path.isfile(pk_path)\n",
" \n",
" res = ezkl.gen_witness(data_path, compiled_model_path, witness_path, vk_path)\n",
" run_args.input_scale = settings[\"model_output_scales\"][0]\n",
"\n",
@@ -383,7 +384,6 @@
" compiled_model_path,\n",
" pk_path,\n",
" proof_path,\n",
" \n",
" \"for-aggr\",\n",
" )\n",
"\n",
@@ -413,7 +413,6 @@
" proof_path,\n",
" settings_path,\n",
" vk_path,\n",
" \n",
" )\n",
"\n",
" assert res == True\n",
@@ -442,7 +441,7 @@
" proof_path = os.path.join('proof_split_'+str(i)+'.json')\n",
" proofs.append(proof_path)\n",
"\n",
"ezkl.mock_aggregate(proofs, logrows=23, split_proofs = True)"
"ezkl.mock_aggregate(proofs, logrows=22, split_proofs = True)"
]
}
],

File diff suppressed because one or more lines are too long

View File

@@ -302,7 +302,7 @@
" assert res == True\n",
" assert os.path.isfile(vk_path)\n",
" assert os.path.isfile(pk_path)\n",
" \n",
"\n",
" res = ezkl.gen_witness(data_path, compiled_model_path, witness_path, vk_path)\n",
" run_args.input_scale = settings[\"model_output_scales\"][0]\n",
"\n",
@@ -330,14 +330,14 @@
" compiled_model_path,\n",
" pk_path,\n",
" proof_path,\n",
" \n",
" \"for-aggr\",\n",
" )\n",
"\n",
" print(res)\n",
" res_1_proof = res[\"proof\"]\n",
" assert os.path.isfile(proof_path)\n",
"\n",
" # Verify the proof\n",
" # # Verify the proof\n",
" if i > 0:\n",
" print(\"swapping commitments\")\n",
" # swap the proof commitments if we are not the first model\n",
@@ -356,12 +356,19 @@
"\n",
" res = ezkl.swap_proof_commitments(proof_path, witness_path)\n",
" print(res)\n",
" \n",
" # load proof and then print \n",
" proof = json.load(open(proof_path, 'r'))\n",
" res_2_proof = proof[\"hex_proof\"]\n",
" # show diff in hex strings\n",
" print(res_1_proof)\n",
" print(res_2_proof)\n",
" assert res_1_proof == res_2_proof\n",
"\n",
" res = ezkl.verify(\n",
" proof_path,\n",
" settings_path,\n",
" vk_path,\n",
" \n",
" )\n",
"\n",
" assert res == True\n",
@@ -439,7 +446,7 @@
" proof_path = os.path.join('proof_split_'+str(i)+'.json')\n",
" proofs.append(proof_path)\n",
"\n",
"ezkl.mock_aggregate(proofs, logrows=23, split_proofs = True)"
"ezkl.mock_aggregate(proofs, logrows=22, split_proofs = True)"
]
}
],

View File

@@ -78,7 +78,7 @@
"pk_path = os.path.join('test.pk')\n",
"vk_path = os.path.join('test.vk')\n",
"settings_path = os.path.join('settings.json')\n",
"",
"\n",
"witness_path = os.path.join('witness.json')\n",
"data_path = os.path.join('input.json')"
]
@@ -122,8 +122,8 @@
"# Loop through each element in the y tensor\n",
"for e in y_input:\n",
" # Apply the custom function and append the result to the list\n",
" print(ezkl.float_to_vecu64(e,7))\n",
" result.append(ezkl.poseidon_hash([ezkl.float_to_vecu64(e, 7)])[0])\n",
" print(ezkl.float_to_string(e,7))\n",
" result.append(ezkl.poseidon_hash([ezkl.float_to_string(e, 7)])[0])\n",
"\n",
"y = y.unsqueeze(0)\n",
"y = y.reshape(1, 9)\n",
@@ -343,7 +343,7 @@
"# we force the output to be 0 this corresponds to the set membership test being true -- and we set this to a fixed vis output\n",
"# this means that the output is fixed and the verifier can see it but that if the input is not in the set the output will not be 0 and the verifier will reject\n",
"witness = json.load(open(witness_path, \"r\"))\n",
"witness[\"outputs\"][0] = [[0, 0, 0, 0]]\n",
"witness[\"outputs\"][0] = [\"0000000000000000000000000000000000000000000000000000000000000000\"]\n",
"json.dump(witness, open(witness_path, \"w\"))\n",
"\n",
"witness = json.load(open(witness_path, \"r\"))\n",
@@ -353,7 +353,6 @@
" compiled_model_path,\n",
" vk_path,\n",
" pk_path,\n",
" \n",
" witness_path = witness_path,\n",
" )\n",
"\n",
@@ -520,4 +519,4 @@
},
"nbformat": 4,
"nbformat_minor": 5
}
}

View File

@@ -210,7 +210,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 9,
"id": "b1c561a8",
"metadata": {},
"outputs": [],

View File

@@ -126,7 +126,7 @@
"# Loop through each element in the y tensor\n",
"for e in user_preimages:\n",
" # Apply the custom function and append the result to the list\n",
" users.append(ezkl.poseidon_hash([ezkl.float_to_vecu64(e, 0)])[0])\n",
" users.append(ezkl.poseidon_hash([ezkl.float_to_string(e, 0)])[0])\n",
"\n",
"users_t = torch.tensor(user_preimages)\n",
"users_t = users_t.reshape(1, 6)\n",
@@ -303,7 +303,7 @@
"# we force the output to be 1 this corresponds to the solvency test being true -- and we set this to a fixed vis output\n",
"# this means that the output is fixed and the verifier can see it but that if the input is not in the set the output will not be 0 and the verifier will reject\n",
"witness = json.load(open(witness_path, \"r\"))\n",
"witness[\"outputs\"][0] = [ezkl.float_to_vecu64(1.0, 0)]\n",
"witness[\"outputs\"][0] = [ezkl.float_to_string(1.0, 0)]\n",
"json.dump(witness, open(witness_path, \"w\"))"
]
},
@@ -417,7 +417,7 @@
"# we force the output to be 1 this corresponds to the solvency test being true -- and we set this to a fixed vis output\n",
"# this means that the output is fixed and the verifier can see it but that if the input is not in the set the output will not be 0 and the verifier will reject\n",
"witness = json.load(open(witness_path, \"r\"))\n",
"witness[\"outputs\"][0] = [ezkl.float_to_vecu64(1.0, 0)]\n",
"witness[\"outputs\"][0] = [ezkl.float_to_string(1.0, 0)]\n",
"json.dump(witness, open(witness_path, \"w\"))\n"
]
},

View File

@@ -633,7 +633,7 @@
"json.dump(data, open(cal_path, 'w'))\n",
"\n",
"\n",
"ezkl.calibrate_settings(cal_path, model_path, settings_path, \"resources\")"
"ezkl.calibrate_settings(cal_path, model_path, settings_path, \"resources\", scales = [4])"
]
},
{

View File

@@ -520,7 +520,7 @@
"json.dump(data, open(cal_path, 'w'))\n",
"\n",
"\n",
"ezkl.calibrate_settings(cal_path, model_path, settings_path, \"resources\")"
"ezkl.calibrate_settings(cal_path, model_path, settings_path, \"resources\", scales = [4])"
]
},
{
@@ -636,7 +636,8 @@
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3"
"pygments_lexer": "ipython3",
"version": "3.9.15"
}
},
"nbformat": 4,

View File

@@ -25,17 +25,9 @@
},
{
"cell_type": "code",
"execution_count": 1,
"execution_count": null,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"voice_data_dir: .\n"
]
}
],
"outputs": [],
"source": [
"\n",
"import os\n",
@@ -43,7 +35,7 @@
"\n",
"voice_data_dir = os.environ.get('VOICE_DATA_DIR')\n",
"\n",
"# if is none set to \"\" \n",
"# if is none set to \"\"\n",
"if voice_data_dir is None:\n",
" voice_data_dir = \"\"\n",
"\n",
@@ -637,7 +629,7 @@
"source": [
"\n",
"\n",
"res = ezkl.calibrate_settings(val_data, model_path, settings_path, \"resources\")\n",
"res = ezkl.calibrate_settings(val_data, model_path, settings_path, \"resources\", scales = [4])\n",
"assert res == True\n",
"print(\"verified\")\n"
]
@@ -908,7 +900,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.13"
"version": "3.9.15"
}
},
"nbformat": 4,

View File

@@ -503,11 +503,11 @@
"pyplot.arrow(0, 0, 1, 0, width=0.02, alpha=0.5)\n",
"pyplot.arrow(0, 0, 0, 1, width=0.02, alpha=0.5)\n",
"\n",
"arrow_x = ezkl.vecu64_to_float(witness['outputs'][0][0], out_scale)\n",
"arrow_y = ezkl.vecu64_to_float(witness['outputs'][0][1], out_scale)\n",
"arrow_x = ezkl.string_to_float(witness['outputs'][0][0], out_scale)\n",
"arrow_y = ezkl.string_to_float(witness['outputs'][0][1], out_scale)\n",
"pyplot.arrow(0, 0, arrow_x, arrow_y, width=0.02)\n",
"arrow_x = ezkl.vecu64_to_float(witness['outputs'][0][2], out_scale)\n",
"arrow_y = ezkl.vecu64_to_float(witness['outputs'][0][3], out_scale)\n",
"arrow_x = ezkl.string_to_float(witness['outputs'][0][2], out_scale)\n",
"arrow_y = ezkl.string_to_float(witness['outputs'][0][3], out_scale)\n",
"pyplot.arrow(0, 0, arrow_x, arrow_y, width=0.02)"
]
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

170
install_ezkl_cli.sh Normal file
View File

@@ -0,0 +1,170 @@
#!/usr/bin/env bash
set -e
BASE_DIR=${XDG_CONFIG_HOME:-$HOME}
EZKL_DIR=${EZKL_DIR-"$BASE_DIR/.ezkl"}
# Create the .ezkl bin directory if it doesn't exit
mkdir -p $EZKL_DIR
# Store the correct profile file (i.e. .profile for bash or .zshenv for ZSH).
case $SHELL in
*/zsh)
PROFILE=${ZDOTDIR-"$HOME"}/.zshenv
PREF_SHELL=zsh
;;
*/bash)
PROFILE=$HOME/.bashrc
PREF_SHELL=bash
;;
*/fish)
PROFILE=$HOME/.config/fish/config.fish
PREF_SHELL=fish
;;
*/ash)
PROFILE=$HOME/.profile
PREF_SHELL=ash
;;
*)
echo "NOTICE: Shell could not be detected, you will need to manually add ${EZKL_DIR} to your PATH."
esac
# Check for non standard installation of ezkl
if [ "$(which ezkl)s" != "s" ] && [ "$(which ezkl)" != "$EZKL_DIR/ezkl" ] ; then
echo "ezkl is installed in a non-standard directory, $(which ezkl). To use the automated installer, remove the existing ezkl from path to prevent conflicts"
exit 1
fi
if [[ ":$PATH:" != *":${EZKl_DIR}:"* ]]; then
# Add the ezkl directory to the path and ensure the old PATH variables remain.
echo >> $PROFILE && echo "export PATH=\"\$PATH:$EZKL_DIR\"" >> $PROFILE
fi
# Install latest ezkl version
# Get the right release URL
if [ -z "$1" ]
then
RELEASE_URL="https://api.github.com/repos/zkonduit/ezkl/releases/latest"
echo "No version tags provided, installing the latest ezkl version"
else
RELEASE_URL="https://api.github.com/repos/zkonduit/ezkl/releases/tags/$1"
echo "Installing ezkl version $1"
fi
PLATFORM=""
case "$(uname -s)" in
Darwin*)
PLATFORM="macos"
;;
Linux*Microsoft*)
PLATFORM="linux"
;;
Linux*)
PLATFORM="linux"
;;
CYGWIN*|MINGW*|MINGW32*|MSYS*)
PLATFORM="windows-msvc"
;;
*)
echo "Platform is not supported. If you would need support for the platform please submit an issue https://github.com/zkonduit/ezkl/issues/new/choose"
exit 1
;;
esac
# Check arch
ARCHITECTURE="$(uname -m)"
if [ "${ARCHITECTURE}" = "x86_64" ]; then
# Redirect stderr to /dev/null to avoid printing errors if non Rosetta.
if [ "$(sysctl -n sysctl.proc_translated 2>/dev/null)" = "1" ]; then
ARCHITECTURE="arm64" # Rosetta.
else
ARCHITECTURE="amd64" # Intel.
fi
elif [ "${ARCHITECTURE}" = "arm64" ] ||[ "${ARCHITECTURE}" = "aarch64" ]; then
ARCHITECTURE="aarch64" # Arm.
elif [ "${ARCHITECTURE}" = "amd64" ]; then
ARCHITECTURE="amd64" # Amd
else
echo "Architecture is not supported. If you would need support for the architecture please submit an issue https://github.com/zkonduit/ezkl/issues/new/choose"
exit 1
fi
# Remove existing ezkl
echo "Removing old ezkl binary if it exists"
[ -e file ] && rm file
# download the release and unpack the right tarball
if [ "$PLATFORM" == "windows-msvc" ]; then
JSON_RESPONSE=$(curl -s "$RELEASE_URL")
FILE_URL=$(echo "$JSON_RESPONSE" | grep -o 'https://github.com[^"]*' | grep "build-artifacts.ezkl-windows-msvc.tar.gz")
echo "Downloading package"
curl -L "$FILE_URL" -o "$EZKL_DIR/build-artifacts.ezkl-windows-msvc.tar.gz"
echo "Unpacking package"
tar -xzf "$EZKL_DIR/build-artifacts.ezkl-windows-msvc.tar.gz" -C "$EZKL_DIR"
echo "Cleaning up"
rm "$EZKL_DIR/build-artifacts.ezkl-windows-msvc.tar.gz"
elif [ "$PLATFORM" == "macos" ]; then
if [ "$ARCHITECTURE" == "aarch64" ] || [ "$ARCHITECTURE" == "arm64" ]; then
JSON_RESPONSE=$(curl -s "$RELEASE_URL")
FILE_URL=$(echo "$JSON_RESPONSE" | grep -o 'https://github.com[^"]*' | grep "build-artifacts.ezkl-macos-aarch64.tar.gz")
echo "Downloading package"
curl -L "$FILE_URL" -o "$EZKL_DIR/build-artifacts.ezkl-macos-aarch64.tar.gz"
echo "Unpacking package"
tar -xzf "$EZKL_DIR/build-artifacts.ezkl-macos-aarch64.tar.gz" -C "$EZKL_DIR"
echo "Cleaning up"
rm "$EZKL_DIR/build-artifacts.ezkl-macos-aarch64.tar.gz"
else
JSON_RESPONSE=$(curl -s "$RELEASE_URL")
FILE_URL=$(echo "$JSON_RESPONSE" | grep -o 'https://github.com[^"]*' | grep "build-artifacts.ezkl-macos.tar.gz")
echo "Downloading package"
curl -L "$FILE_URL" -o "$EZKL_DIR/build-artifacts.ezkl-macos.tar.gz"
echo "Unpacking package"
tar -xzf "$EZKL_DIR/build-artifacts.ezkl-macos.tar.gz" -C "$EZKL_DIR"
echo "Cleaning up"
rm "$EZKL_DIR/build-artifacts.ezkl-macos.tar.gz"
fi
elif [ "$PLATFORM" == "linux" ]; then
if [ "${ARCHITECTURE}" = "amd64" ]; then
JSON_RESPONSE=$(curl -s "$RELEASE_URL")
FILE_URL=$(echo "$JSON_RESPONSE" | grep -o 'https://github.com[^"]*' | grep "build-artifacts.ezkl-linux-gnu.tar.gz")
echo "Downloading package"
curl -L "$FILE_URL" -o "$EZKL_DIR/build-artifacts.ezkl-linux-gnu.tar.gz"
echo "Unpacking package"
tar -xzf "$EZKL_DIR/build-artifacts.ezkl-linux-gnu.tar.gz" -C "$EZKL_DIR"
echo "Cleaning up"
rm "$EZKL_DIR/build-artifacts.ezkl-linux-gnu.tar.gz"
else
echo "ARM architectures are not supported for Linux at the moment. If you would need support for the ARM architectures on linux please submit an issue https://github.com/zkonduit/ezkl/issues/new/choose"
exit 1
fi
else
echo "Platform and Architecture is not supported. If you would need support for the platform and architecture please submit an issue https://github.com/zkonduit/ezkl/issues/new/choose"
exit 1
fi
echo && echo "Successfully downloaded ezkl at ${EZKL_DIR}"
echo "We detected that your preferred shell is ${PREF_SHELL} and added ezkl to PATH. Run 'source ${PROFILE}' or start a new terminal session to use ezkl."

View File

@@ -219,7 +219,7 @@ mod tests {
};
let prover = halo2_proofs::dev::MockProver::run(K as u32, &circuit, vec![]).unwrap();
assert_eq!(prover.verify_par(), Ok(()))
assert_eq!(prover.verify(), Ok(()))
}
}
@@ -240,6 +240,6 @@ mod tests {
message: message.into(),
};
let prover = halo2_proofs::dev::MockProver::run(K as u32, &circuit, vec![]).unwrap();
assert_eq!(prover.verify_par(), Ok(()))
assert_eq!(prover.verify(), Ok(()))
}
}

View File

@@ -499,7 +499,7 @@ mod tests {
_spec: PhantomData,
};
let prover = halo2_proofs::dev::MockProver::run(k, &circuit, output).unwrap();
assert_eq!(prover.verify_par(), Ok(()))
assert_eq!(prover.verify(), Ok(()))
}
#[test]
@@ -518,7 +518,7 @@ mod tests {
_spec: PhantomData,
};
let prover = halo2_proofs::dev::MockProver::run(k, &circuit, output).unwrap();
assert_eq!(prover.verify_par(), Ok(()))
assert_eq!(prover.verify(), Ok(()))
}
#[test]
@@ -551,7 +551,7 @@ mod tests {
};
let prover = halo2_proofs::dev::MockProver::run(k, &circuit, output).unwrap();
assert_eq!(prover.verify_par(), Ok(()))
assert_eq!(prover.verify(), Ok(()))
}
}
@@ -573,6 +573,6 @@ mod tests {
_spec: PhantomData,
};
let prover = halo2_proofs::dev::MockProver::run(k, &circuit, output).unwrap();
assert_eq!(prover.verify_par(), Ok(()))
assert_eq!(prover.verify(), Ok(()))
}
}

View File

@@ -19,7 +19,10 @@ use serde::{Deserialize, Serialize};
use crate::{
circuit::ops::base::BaseOp,
circuit::{table::Table, utils},
circuit::{
table::{Range, RangeCheck, Table},
utils,
},
tensor::{Tensor, TensorType, ValTensor, VarTensor},
};
use std::{collections::BTreeMap, error::Error, marker::PhantomData};
@@ -176,6 +179,10 @@ pub struct BaseConfig<F: PrimeField + TensorType + PartialOrd> {
pub lookup_selectors: BTreeMap<(LookupOp, usize, usize), Selector>,
///
pub tables: BTreeMap<LookupOp, Table<F>>,
///
pub range_checks: BTreeMap<Range, RangeCheck<F>>,
/// [Selector]s generated when configuring the layer. We use a [BTreeMap] as we expect to configure many lookup ops.
pub range_check_selectors: BTreeMap<(Range, usize, usize), Selector>,
/// Activate sanity checks
pub check_mode: CheckMode,
_marker: PhantomData<F>,
@@ -194,7 +201,9 @@ impl<F: PrimeField + TensorType + PartialOrd> BaseConfig<F> {
lookup_index: dummy_var,
selectors: BTreeMap::new(),
lookup_selectors: BTreeMap::new(),
range_check_selectors: BTreeMap::new(),
tables: BTreeMap::new(),
range_checks: BTreeMap::new(),
check_mode: CheckMode::SAFE,
_marker: PhantomData,
}
@@ -325,11 +334,13 @@ impl<F: PrimeField + TensorType + PartialOrd> BaseConfig<F> {
Self {
selectors,
lookup_selectors: BTreeMap::new(),
range_check_selectors: BTreeMap::new(),
inputs: inputs.to_vec(),
lookup_input: VarTensor::Empty,
lookup_output: VarTensor::Empty,
lookup_index: VarTensor::Empty,
tables: BTreeMap::new(),
range_checks: BTreeMap::new(),
output: output.clone(),
check_mode,
_marker: PhantomData,
@@ -344,7 +355,7 @@ impl<F: PrimeField + TensorType + PartialOrd> BaseConfig<F> {
input: &VarTensor,
output: &VarTensor,
index: &VarTensor,
lookup_range: (i128, i128),
lookup_range: Range,
logrows: usize,
nl: &LookupOp,
) -> Result<(), Box<dyn Error>>
@@ -482,6 +493,74 @@ impl<F: PrimeField + TensorType + PartialOrd> BaseConfig<F> {
Ok(())
}
/// Configures and creates lookup selectors
#[allow(clippy::too_many_arguments)]
pub fn configure_range_check(
&mut self,
cs: &mut ConstraintSystem<F>,
input: &VarTensor,
range: Range,
) -> Result<(), Box<dyn Error>>
where
F: Field,
{
let mut selectors = BTreeMap::new();
if !input.is_advice() {
return Err("wrong input type for lookup input".into());
}
// we borrow mutably twice so we need to do this dance
let range_check = if !self.range_checks.contains_key(&range) {
// as all tables have the same input we see if there's another table who's input we can reuse
let range_check = RangeCheck::<F>::configure(cs, range);
self.range_checks.insert(range, range_check.clone());
range_check
} else {
return Ok(());
};
for x in 0..input.num_blocks() {
for y in 0..input.num_inner_cols() {
let single_col_sel = cs.complex_selector();
cs.lookup("", |cs| {
let mut res = vec![];
let sel = cs.query_selector(single_col_sel);
let input_query = match &input {
VarTensor::Advice { inner: advices, .. } => {
cs.query_advice(advices[x][y], Rotation(0))
}
_ => unreachable!(),
};
let default_x = range_check.get_first_element();
let not_sel = Expression::Constant(F::ONE) - sel.clone();
res.extend([(
sel.clone() * input_query.clone()
+ not_sel.clone() * Expression::Constant(default_x),
range_check.input,
)]);
res
});
selectors.insert((range, x, y), single_col_sel);
}
}
self.range_check_selectors.extend(selectors);
// if we haven't previously initialized the input/output, do so now
if let VarTensor::Empty = self.lookup_input {
debug!("assigning lookup input");
self.lookup_input = input.clone();
}
Ok(())
}
/// layout_tables must be called before layout.
pub fn layout_tables(&mut self, layouter: &mut impl Layouter<F>) -> Result<(), Box<dyn Error>> {
for (i, table) in self.tables.values_mut().enumerate() {
@@ -500,6 +579,20 @@ impl<F: PrimeField + TensorType + PartialOrd> BaseConfig<F> {
Ok(())
}
/// layout_range_checks must be called before layout.
pub fn layout_range_checks(
&mut self,
layouter: &mut impl Layouter<F>,
) -> Result<(), Box<dyn Error>> {
for range_check in self.range_checks.values_mut() {
if !range_check.is_assigned {
debug!("laying out range check for {:?}", range_check.range);
range_check.layout(layouter)?;
}
}
Ok(())
}
/// Assigns variables to the regions created when calling `configure`.
/// # Arguments
/// * `values` - The explicit values to the operations.

View File

@@ -19,7 +19,7 @@ use super::{
};
use crate::{
circuit::{ops::base::BaseOp, utils},
fieldutils::i128_to_felt,
fieldutils::{felt_to_i128, i128_to_felt},
tensor::{
get_broadcasted_shape,
ops::{accumulated, add, mult, sub},
@@ -51,6 +51,66 @@ pub fn overflowed_len(starting_idx: usize, mut total_len: usize, column_len: usi
total_len
}
/// Div accumulated layout
pub fn div<F: PrimeField + TensorType + PartialOrd>(
config: &BaseConfig<F>,
region: &mut RegionCtx<F>,
value: &[ValTensor<F>; 1],
div: F,
) -> Result<ValTensor<F>, Box<dyn Error>> {
let input = value[0].clone();
let input_dims = input.dims();
let range_check_bracket = felt_to_i128(div) - 1;
let mut divisor = Tensor::from(vec![ValType::Constant(div)].into_iter());
divisor.set_visibility(&crate::graph::Visibility::Fixed);
let divisor = region.assign(&config.inputs[1], &divisor.into())?;
region.increment(divisor.len());
let is_assigned = !input.any_unknowns()? && !divisor.any_unknowns()?;
let mut claimed_output: ValTensor<F> = if is_assigned {
let input_evals = input.get_int_evals()?;
let divisor_evals = divisor.get_int_evals()?;
tensor::ops::div(&[input_evals.clone(), divisor_evals.clone()])?
.iter()
.map(|x| Ok(Value::known(i128_to_felt(*x))))
.collect::<Result<Tensor<Value<F>>, Box<dyn Error>>>()?
.into()
} else {
Tensor::new(
Some(&vec![Value::<F>::unknown(); input.len()]),
&[input.len()],
)?
.into()
};
claimed_output.reshape(input_dims)?;
let product = pairwise(
config,
region,
&[claimed_output.clone(), divisor.clone()],
BaseOp::Mult,
)?;
let diff_with_input = pairwise(
config,
region,
&[product.clone(), input.clone()],
BaseOp::Sub,
)?;
range_check(
config,
region,
&[diff_with_input],
&(-range_check_bracket, range_check_bracket),
)?;
Ok(claimed_output)
}
/// Dot product accumulated layout
pub fn dot<F: PrimeField + TensorType + PartialOrd>(
config: &BaseConfig<F>,
@@ -1837,15 +1897,6 @@ pub fn deconv<F: PrimeField + TensorType + PartialOrd + std::marker::Send + std:
)));
}
if has_bias {
let bias = &inputs[2];
if (bias.dims().len() != 1) || (bias.dims()[0] != kernel.dims()[0]) {
return Err(Box::new(TensorError::DimMismatch(
"deconv bias".to_string(),
)));
}
}
let (kernel_height, kernel_width) = (kernel.dims()[2], kernel.dims()[3]);
let null_val = ValType::Constant(F::ZERO);
@@ -2313,6 +2364,50 @@ pub fn enforce_equality<F: PrimeField + TensorType + PartialOrd>(
Ok(output)
}
/// layout for range check.
pub fn range_check<F: PrimeField + TensorType + PartialOrd>(
config: &BaseConfig<F>,
region: &mut RegionCtx<F>,
values: &[ValTensor<F>; 1],
range: &crate::circuit::table::Range,
) -> Result<ValTensor<F>, Box<dyn Error>> {
// time the entire operation
let timer = instant::Instant::now();
let x = values[0].clone();
let w = region.assign(&config.lookup_input, &x)?;
let assigned_len = x.len();
let is_dummy = region.is_dummy();
if !is_dummy {
(0..assigned_len)
.map(|i| {
let (x, y, z) = config
.lookup_input
.cartesian_coord(region.linear_coord() + i);
let selector = config.range_check_selectors.get(&(range.clone(), x, y));
region.enable(selector, z)?;
Ok(())
})
.collect::<Result<Vec<_>, Box<dyn Error>>>()?;
}
region.increment(assigned_len);
let elapsed = timer.elapsed();
trace!(
"range check {:?} layout took {:?}, row: {:?}",
range,
elapsed,
region.row()
);
Ok(w)
}
/// layout for nonlinearity check.
pub fn nonlinearity<F: PrimeField + TensorType + PartialOrd>(
config: &BaseConfig<F>,

View File

@@ -3,7 +3,7 @@ use serde::{Deserialize, Serialize};
use std::error::Error;
use crate::{
circuit::{layouts, utils},
circuit::{layouts, table::Range, utils},
fieldutils::{felt_to_i128, i128_to_felt},
graph::{multiplier_to_scale, scale_to_multiplier},
tensor::{self, Tensor, TensorError, TensorType},
@@ -57,7 +57,7 @@ pub enum LookupOp {
impl LookupOp {
/// Returns the range of values that can be represented by the table
pub fn bit_range(max_len: usize) -> (i128, i128) {
pub fn bit_range(max_len: usize) -> Range {
let range = (max_len - 1) as f64 / 2_f64;
let range = range as i128;
(-range, range)

View File

@@ -10,6 +10,8 @@ use halo2curves::ff::PrimeField;
use self::{lookup::LookupOp, region::RegionCtx};
use super::table::Range;
///
pub mod base;
///
@@ -60,6 +62,11 @@ pub trait Op<F: PrimeField + TensorType + PartialOrd>: std::fmt::Debug + Send +
vec![]
}
/// Returns the range checks required by the operation.
fn required_range_checks(&self) -> Vec<Range> {
vec![]
}
/// Returns true if the operation is an input.
fn is_input(&self) -> bool {
false

View File

@@ -237,7 +237,7 @@ impl<F: PrimeField + TensorType + PartialOrd + Serialize + for<'de> Deserialize<
}
PolyOp::Neg => layouts::neg(config, region, values[..].try_into()?)?,
PolyOp::Iff => layouts::iff(config, region, values[..].try_into()?)?,
PolyOp::Einsum { equation } => layouts::einsum(config, region, &values, equation)?,
PolyOp::Einsum { equation } => layouts::einsum(config, region, values, equation)?,
PolyOp::Sum { axes } => {
layouts::sum_axes(config, region, values[..].try_into()?, axes)?
}
@@ -290,12 +290,7 @@ impl<F: PrimeField + TensorType + PartialOrd + Serialize + for<'de> Deserialize<
fn out_scale(&self, in_scales: Vec<crate::Scale>) -> Result<crate::Scale, Box<dyn Error>> {
let scale = match self {
PolyOp::MultiBroadcastTo { .. } => in_scales[0],
PolyOp::Xor | PolyOp::Or | PolyOp::And | PolyOp::Not => 0,
PolyOp::Neg => in_scales[0],
PolyOp::MoveAxis { .. } => in_scales[0],
PolyOp::Downsample { .. } => in_scales[0],
PolyOp::Resize { .. } => in_scales[0],
PolyOp::Iff => in_scales[1],
PolyOp::Einsum { .. } => {
let mut scale = in_scales[0];
@@ -339,14 +334,9 @@ impl<F: PrimeField + TensorType + PartialOrd + Serialize + for<'de> Deserialize<
scale += in_scales[1];
scale
}
PolyOp::Identity => in_scales[0],
PolyOp::Reshape(_) | PolyOp::Flatten(_) => in_scales[0],
PolyOp::Pad(_) => in_scales[0],
PolyOp::Pow(pow) => in_scales[0] * (*pow as crate::Scale),
PolyOp::Pack(_, _) => in_scales[0],
PolyOp::GlobalSumPool => in_scales[0],
PolyOp::Concat { axis: _ } => in_scales[0],
PolyOp::Slice { .. } => in_scales[0],
_ => in_scales[0],
};
Ok(scale)
}

View File

@@ -19,6 +19,9 @@ use crate::circuit::lookup::LookupOp;
use super::Op;
/// The range of the lookup table.
pub type Range = (i128, i128);
/// The safety factor for the range of the lookup table.
pub const RANGE_MULTIPLIER: i128 = 2;
/// The safety factor offset for the number of rows in the lookup table.
@@ -91,7 +94,7 @@ pub struct Table<F: PrimeField> {
/// Flags if table has been previously assigned to.
pub is_assigned: bool,
/// Number of bits used in lookup table.
pub range: (i128, i128),
pub range: Range,
_marker: PhantomData<F>,
}
@@ -129,7 +132,7 @@ impl<F: PrimeField + TensorType + PartialOrd> Table<F> {
}
///
pub fn num_cols_required(range: (i128, i128), col_size: usize) -> usize {
pub fn num_cols_required(range: Range, col_size: usize) -> usize {
// double it to be safe
let range_len = range.1 - range.0;
// number of cols needed to store the range
@@ -141,7 +144,7 @@ impl<F: PrimeField + TensorType + PartialOrd> Table<F> {
/// Configures the table.
pub fn configure(
cs: &mut ConstraintSystem<F>,
range: (i128, i128),
range: Range,
logrows: usize,
nonlinearity: &LookupOp,
preexisting_inputs: Option<Vec<TableColumn>>,
@@ -257,3 +260,86 @@ impl<F: PrimeField + TensorType + PartialOrd> Table<F> {
Ok(())
}
}
/// Halo2 range check column
#[derive(Clone, Debug)]
pub struct RangeCheck<F: PrimeField> {
/// Input to table.
pub input: TableColumn,
/// selector cn
pub selector_constructor: SelectorConstructor<F>,
/// Flags if table has been previously assigned to.
pub is_assigned: bool,
/// Number of bits used in lookup table.
pub range: Range,
_marker: PhantomData<F>,
}
impl<F: PrimeField + TensorType + PartialOrd> RangeCheck<F> {
/// get first_element of column
pub fn get_first_element(&self) -> F {
i128_to_felt(self.range.0)
}
///
pub fn cal_col_size(logrows: usize, reserved_blinding_rows: usize) -> usize {
2usize.pow(logrows as u32) - reserved_blinding_rows
}
///
pub fn cal_bit_range(bits: usize, reserved_blinding_rows: usize) -> usize {
2usize.pow(bits as u32) - reserved_blinding_rows
}
}
impl<F: PrimeField + TensorType + PartialOrd> RangeCheck<F> {
/// Configures the table.
pub fn configure(cs: &mut ConstraintSystem<F>, range: Range) -> RangeCheck<F> {
log::debug!("range check range: {:?}", range);
let inputs = cs.lookup_table_column();
RangeCheck {
input: inputs,
is_assigned: false,
selector_constructor: SelectorConstructor::new(2),
range,
_marker: PhantomData,
}
}
/// Assigns values to the constraints generated when calling `configure`.
pub fn layout(&mut self, layouter: &mut impl Layouter<F>) -> Result<(), Box<dyn Error>> {
if self.is_assigned {
return Err(Box::new(CircuitError::TableAlreadyAssigned));
}
let smallest = self.range.0;
let largest = self.range.1;
let inputs: Tensor<F> = Tensor::from(smallest..=largest).map(|x| i128_to_felt(x));
self.is_assigned = true;
layouter.assign_table(
|| "range check table",
|mut table| {
let _ = inputs
.iter()
.enumerate()
.map(|(row_offset, input)| {
table.assign_cell(
|| format!("rc_i_col row {}", row_offset),
self.input,
row_offset,
|| Value::known(*input),
)?;
Ok(())
})
.collect::<Result<Vec<()>, halo2_proofs::plonk::Error>>()?;
Ok(())
},
)?;
Ok(())
}
}

View File

@@ -90,7 +90,7 @@ mod matmul {
};
let prover = MockProver::run(K as u32, &circuit, vec![]).unwrap();
prover.assert_satisfied_par();
prover.assert_satisfied();
}
}
@@ -165,7 +165,7 @@ mod matmul_col_overflow_double_col {
};
let prover = MockProver::run(K as u32, &circuit, vec![]).unwrap();
prover.assert_satisfied_par();
prover.assert_satisfied();
}
}
@@ -239,7 +239,7 @@ mod matmul_col_overflow {
};
let prover = MockProver::run(K as u32, &circuit, vec![]).unwrap();
prover.assert_satisfied_par();
prover.assert_satisfied();
}
}
@@ -327,7 +327,7 @@ mod matmul_col_ultra_overflow_double_col {
halo2_proofs::poly::kzg::commitment::KZGCommitmentScheme<halo2curves::bn256::Bn256>,
F,
MatmulCircuit<F>,
>(&circuit, &params)
>(&circuit, &params, true)
.unwrap();
let prover = crate::pfsys::create_proof_circuit_kzg(
@@ -441,7 +441,7 @@ mod matmul_col_ultra_overflow {
halo2_proofs::poly::kzg::commitment::KZGCommitmentScheme<halo2curves::bn256::Bn256>,
F,
MatmulCircuit<F>,
>(&circuit, &params)
>(&circuit, &params, true)
.unwrap();
let prover = crate::pfsys::create_proof_circuit_kzg(
@@ -543,7 +543,7 @@ mod dot {
};
let prover = MockProver::run(K as u32, &circuit, vec![]).unwrap();
prover.assert_satisfied_par();
prover.assert_satisfied();
}
}
@@ -620,7 +620,7 @@ mod dot_col_overflow_triple_col {
};
let prover = MockProver::run(K as u32, &circuit, vec![]).unwrap();
prover.assert_satisfied_par();
prover.assert_satisfied();
}
}
@@ -693,7 +693,7 @@ mod dot_col_overflow {
};
let prover = MockProver::run(K as u32, &circuit, vec![]).unwrap();
prover.assert_satisfied_par();
prover.assert_satisfied();
}
}
@@ -762,7 +762,7 @@ mod sum {
};
let prover = MockProver::run(K as u32, &circuit, vec![]).unwrap();
prover.assert_satisfied_par();
prover.assert_satisfied();
}
}
@@ -832,7 +832,7 @@ mod sum_col_overflow_double_col {
};
let prover = MockProver::run(K as u32, &circuit, vec![]).unwrap();
prover.assert_satisfied_par();
prover.assert_satisfied();
}
}
@@ -901,7 +901,7 @@ mod sum_col_overflow {
};
let prover = MockProver::run(K as u32, &circuit, vec![]).unwrap();
prover.assert_satisfied_par();
prover.assert_satisfied();
}
}
@@ -994,7 +994,7 @@ mod composition {
};
let prover = MockProver::run(K as u32, &circuit, vec![]).unwrap();
prover.assert_satisfied_par();
prover.assert_satisfied();
}
}
@@ -1095,7 +1095,7 @@ mod conv {
};
let prover = MockProver::run(K as u32, &circuit, vec![]).unwrap();
prover.assert_satisfied_par();
prover.assert_satisfied();
}
#[test]
@@ -1133,7 +1133,7 @@ mod conv {
};
let prover = MockProver::run(K as u32, &circuit, vec![]).unwrap();
prover.assert_satisfied_par();
prover.assert_satisfied();
}
}
@@ -1240,7 +1240,7 @@ mod conv_col_ultra_overflow {
halo2_proofs::poly::kzg::commitment::KZGCommitmentScheme<halo2curves::bn256::Bn256>,
F,
ConvCircuit<F>,
>(&circuit, &params)
>(&circuit, &params, true)
.unwrap();
let prover = crate::pfsys::create_proof_circuit_kzg(
@@ -1390,7 +1390,7 @@ mod conv_relu_col_ultra_overflow {
halo2_proofs::poly::kzg::commitment::KZGCommitmentScheme<halo2curves::bn256::Bn256>,
F,
ConvCircuit<F>,
>(&circuit, &params)
>(&circuit, &params, true)
.unwrap();
let prover = crate::pfsys::create_proof_circuit_kzg(
@@ -1484,7 +1484,7 @@ mod add_w_shape_casting {
};
let prover = MockProver::run(K as u32, &circuit, vec![]).unwrap();
prover.assert_satisfied_par();
prover.assert_satisfied();
}
}
@@ -1551,7 +1551,7 @@ mod add {
};
let prover = MockProver::run(K as u32, &circuit, vec![]).unwrap();
prover.assert_satisfied_par();
prover.assert_satisfied();
}
}
@@ -1618,7 +1618,7 @@ mod add_with_overflow {
};
let prover = MockProver::run(K as u32, &circuit, vec![]).unwrap();
prover.assert_satisfied_par();
prover.assert_satisfied();
}
}
@@ -1727,7 +1727,7 @@ mod add_with_overflow_and_poseidon {
let prover =
MockProver::run(K as u32, &circuit, vec![vec![commitment_a, commitment_b]]).unwrap();
prover.assert_satisfied_par();
prover.assert_satisfied();
}
#[test]
@@ -1822,7 +1822,7 @@ mod sub {
};
let prover = MockProver::run(K as u32, &circuit, vec![]).unwrap();
prover.assert_satisfied_par();
prover.assert_satisfied();
}
}
@@ -1889,7 +1889,7 @@ mod mult {
};
let prover = MockProver::run(K as u32, &circuit, vec![]).unwrap();
prover.assert_satisfied_par();
prover.assert_satisfied();
}
}
@@ -1954,7 +1954,7 @@ mod pow {
};
let prover = MockProver::run(K as u32, &circuit, vec![]).unwrap();
prover.assert_satisfied_par();
prover.assert_satisfied();
}
}
@@ -2023,7 +2023,7 @@ mod pack {
};
let prover = MockProver::run(K as u32, &circuit, vec![]).unwrap();
prover.assert_satisfied_par();
prover.assert_satisfied();
}
}
@@ -2116,7 +2116,7 @@ mod matmul_relu {
};
let prover = MockProver::run(K as u32, &circuit, vec![]).unwrap();
prover.assert_satisfied_par();
prover.assert_satisfied();
}
}
@@ -2222,7 +2222,7 @@ mod rangecheckpercent {
_marker: PhantomData,
};
let prover = MockProver::run(K as u32, &circuit, vec![]).unwrap();
prover.assert_satisfied_par();
prover.assert_satisfied();
}
{
let inp = Tensor::new(Some(&[Value::<F>::known(F::from(200_u64))]), &[1]).unwrap();
@@ -2233,7 +2233,7 @@ mod rangecheckpercent {
_marker: PhantomData,
};
let prover = MockProver::run(K as u32, &circuit, vec![]).unwrap();
prover.assert_satisfied_par();
prover.assert_satisfied();
}
// Unsuccessful case
@@ -2328,7 +2328,7 @@ mod relu {
};
let prover = MockProver::run(4_u32, &circuit, vec![]).unwrap();
prover.assert_satisfied_par();
prover.assert_satisfied();
}
}
@@ -2421,7 +2421,7 @@ mod lookup_ultra_overflow {
halo2_proofs::poly::kzg::commitment::KZGCommitmentScheme<halo2curves::bn256::Bn256>,
F,
ReLUCircuit<F>,
>(&circuit, &params)
>(&circuit, &params, true)
.unwrap();
let prover = crate::pfsys::create_proof_circuit_kzg(
@@ -2557,6 +2557,6 @@ mod softmax {
_marker: PhantomData,
};
let prover = MockProver::run(K as u32, &circuit, vec![]).unwrap();
prover.assert_satisfied_par();
prover.assert_satisfied();
}
}

View File

@@ -59,6 +59,8 @@ pub const DEFAULT_SOL_CODE_DA: &str = "evm_deploy_da.sol";
pub const DEFAULT_CONTRACT_ADDRESS: &str = "contract.address";
/// Default contract address for data attestation
pub const DEFAULT_CONTRACT_ADDRESS_DA: &str = "contract_da.address";
/// Default contract address for vk
pub const DEFAULT_CONTRACT_ADDRESS_VK: &str = "contract_vk.address";
/// Default check mode
pub const DEFAULT_CHECKMODE: &str = "safe";
/// Default calibration target
@@ -73,6 +75,16 @@ pub const DEFAULT_FUZZ_RUNS: &str = "10";
pub const DEFAULT_CALIBRATION_FILE: &str = "calibration.json";
/// Default lookup safety margin
pub const DEFAULT_LOOKUP_SAFETY_MARGIN: &str = "2";
/// Default Compress selectors
pub const DEFAULT_COMPRESS_SELECTORS: &str = "false";
/// Default render vk seperately
pub const DEFAULT_RENDER_VK_SEPERATELY: &str = "false";
/// Default VK sol path
pub const DEFAULT_VK_SOL: &str = "vk.sol";
/// Default VK abi path
pub const DEFAULT_VK_ABI: &str = "vk.abi";
/// Default scale rebase multipliers for calibration
pub const DEFAULT_SCALE_REBASE_MULTIPLIERS: &str = "1,2,10";
impl std::fmt::Display for TranscriptType {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
@@ -313,9 +325,20 @@ pub enum Commands {
/// Optional scales to specifically try for calibration. Example, --scales 0,4
#[arg(long, value_delimiter = ',', allow_hyphen_values = true)]
scales: Option<Vec<crate::Scale>>,
/// Optional scale rebase multipliers to specifically try for calibration. This is the multiplier at which we divide to return to the input scale. Example, --scale-rebase-multipliers 0,4
#[arg(
long,
value_delimiter = ',',
allow_hyphen_values = true,
default_value = DEFAULT_SCALE_REBASE_MULTIPLIERS
)]
scale_rebase_multiplier: Vec<u32>,
/// max logrows to use for calibration, 26 is the max public SRS size
#[arg(long)]
max_logrows: Option<u32>,
// whether to fix the div_rebasing value truthiness during calibration. this changes how we rebase
#[arg(long)]
div_rebasing: Option<bool>,
},
/// Generates a dummy SRS
@@ -389,6 +412,9 @@ pub enum Commands {
/// whether the accumulated are segments of a larger proof
#[arg(long, default_value = DEFAULT_SPLIT)]
split_proofs: bool,
/// compress selectors
#[arg(long, default_value = DEFAULT_COMPRESS_SELECTORS)]
compress_selectors: bool,
},
/// Aggregates proofs :)
Aggregate {
@@ -451,6 +477,9 @@ pub enum Commands {
/// The graph witness (optional - used to override fixed values in the circuit)
#[arg(short = 'W', long)]
witness: Option<PathBuf>,
/// compress selectors
#[arg(long, default_value = DEFAULT_COMPRESS_SELECTORS)]
compress_selectors: bool,
},
#[cfg(not(target_arch = "wasm32"))]
@@ -473,6 +502,9 @@ pub enum Commands {
/// number of fuzz iterations
#[arg(long, default_value = DEFAULT_FUZZ_RUNS)]
num_runs: usize,
/// compress selectors
#[arg(long, default_value = DEFAULT_COMPRESS_SELECTORS)]
compress_selectors: bool,
},
#[cfg(not(target_arch = "wasm32"))]
/// Deploys a test contact that the data attester reads from and creates a data attestation formatted input.json file that contains call data information
@@ -573,6 +605,31 @@ pub enum Commands {
/// The path to output the Solidity verifier ABI
#[arg(long, default_value = DEFAULT_VERIFIER_ABI)]
abi_path: PathBuf,
/// Whether the verifier key should be rendered as a separate contract.
/// We recommend disabling selector compression if this is enabled.
/// To save the verifier key as a separate contract, set this to true and then call the create-evm-vk command.
#[arg(long, default_value = DEFAULT_RENDER_VK_SEPERATELY)]
render_vk_seperately: bool,
},
#[cfg(not(target_arch = "wasm32"))]
/// Creates an EVM verifier for a single proof
#[command(name = "create-evm-vk")]
CreateEVMVK {
/// The path to SRS, if None will use $EZKL_REPO_PATH/srs/kzg{logrows}.srs
#[arg(long)]
srs_path: Option<PathBuf>,
/// The path to load circuit settings .json file from (generated using the gen-settings command)
#[arg(short = 'S', long, default_value = DEFAULT_SETTINGS)]
settings_path: PathBuf,
/// The path to load the desired verification key file
#[arg(long, default_value = DEFAULT_VK)]
vk_path: PathBuf,
/// The path to output the Solidity code
#[arg(long, default_value = DEFAULT_VK_SOL)]
sol_code_path: PathBuf,
/// The path to output the Solidity verifier ABI
#[arg(long, default_value = DEFAULT_VK_ABI)]
abi_path: PathBuf,
},
#[cfg(not(target_arch = "wasm32"))]
/// Creates an EVM verifier that attests to on-chain inputs for a single proof
@@ -618,6 +675,11 @@ pub enum Commands {
// logrows used for aggregation circuit
#[arg(long, default_value = DEFAULT_AGGREGATED_LOGROWS)]
logrows: u32,
/// Whether the verifier key should be rendered as a separate contract.
/// We recommend disabling selector compression if this is enabled.
/// To save the verifier key as a separate contract, set this to true and then call the create-evm-vk command.
#[arg(long, default_value = DEFAULT_RENDER_VK_SEPERATELY)]
render_vk_seperately: bool,
},
/// Verifies a proof, returning accept or reject
Verify {
@@ -669,6 +731,25 @@ pub enum Commands {
private_key: Option<String>,
},
#[cfg(not(target_arch = "wasm32"))]
/// Deploys an evm verifier that is generated by ezkl
DeployEvmVK {
/// The path to the Solidity code (generated using the create-evm-verifier command)
#[arg(long, default_value = DEFAULT_VK_SOL)]
sol_code_path: PathBuf,
/// RPC URL for an Ethereum node, if None will use Anvil but WON'T persist state
#[arg(short = 'U', long)]
rpc_url: Option<String>,
#[arg(long, default_value = DEFAULT_CONTRACT_ADDRESS_VK)]
/// The path to output the contract address
addr_path: PathBuf,
/// The optimizer runs to set on the verifier. Lower values optimize for deployment cost, while higher values optimize for gas cost.
#[arg(long, default_value = DEFAULT_OPTIMIZER_RUNS)]
optimizer_runs: usize,
/// Private secp256K1 key in hex format, 64 chars, no 0x prefix, of the account signing transactions. If None the private key will be generated by Anvil
#[arg(short = 'P', long)]
private_key: Option<String>,
},
#[cfg(not(target_arch = "wasm32"))]
/// Deploys an evm verifier that allows for data attestation
#[command(name = "deploy-evm-da")]
DeployEvmDataAttestation {
@@ -710,6 +791,9 @@ pub enum Commands {
/// does the verifier use data attestation ?
#[arg(long)]
addr_da: Option<H160>,
// is the vk rendered seperately, if so specify an address
#[arg(long)]
addr_vk: Option<H160>,
},
/// Print the proof in hexadecimal

View File

@@ -101,17 +101,18 @@ pub async fn setup_eth_backend(
}
///
pub async fn deploy_verifier_via_solidity(
pub async fn deploy_contract_via_solidity(
sol_code_path: PathBuf,
rpc_url: Option<&str>,
runs: usize,
private_key: Option<&str>,
contract_name: &str,
) -> Result<ethers::types::Address, Box<dyn Error>> {
// anvil instance must be alive at least until the factory completes the deploy
let (anvil, client) = setup_eth_backend(rpc_url, private_key).await?;
let (abi, bytecode, runtime_bytecode) =
get_contract_artifacts(sol_code_path, "Halo2Verifier", runs)?;
get_contract_artifacts(sol_code_path, contract_name, runs)?;
let factory = get_sol_contract_factory(abi, bytecode, runtime_bytecode, client.clone())?;
let contract = factory.deploy(())?.send().await?;
@@ -335,11 +336,16 @@ pub async fn update_account_calls(
pub async fn verify_proof_via_solidity(
proof: Snark<Fr, G1Affine>,
addr: ethers::types::Address,
addr_vk: Option<H160>,
rpc_url: Option<&str>,
) -> Result<bool, Box<dyn Error>> {
let flattened_instances = proof.instances.into_iter().flatten();
let encoded = encode_calldata(None, &proof.proof, &flattened_instances.collect::<Vec<_>>());
let encoded = encode_calldata(
addr_vk.as_ref().map(|x| x.0),
&proof.proof,
&flattened_instances.collect::<Vec<_>>(),
);
info!("encoded: {:#?}", hex::encode(&encoded));
let (anvil, client) = setup_eth_backend(rpc_url, None).await?;
@@ -439,6 +445,7 @@ pub async fn verify_proof_with_data_attestation(
proof: Snark<Fr, G1Affine>,
addr_verifier: ethers::types::Address,
addr_da: ethers::types::Address,
addr_vk: Option<H160>,
rpc_url: Option<&str>,
) -> Result<bool, Box<dyn Error>> {
use ethers::abi::{Function, Param, ParamType, StateMutability, Token};
@@ -452,8 +459,11 @@ pub async fn verify_proof_with_data_attestation(
public_inputs.push(u);
}
let encoded_verifier =
encode_calldata(None, &proof.proof, &flattened_instances.collect::<Vec<_>>());
let encoded_verifier = encode_calldata(
addr_vk.as_ref().map(|x| x.0),
&proof.proof,
&flattened_instances.collect::<Vec<_>>(),
);
info!("encoded: {:#?}", hex::encode(&encoded_verifier));

View File

@@ -3,7 +3,7 @@ use crate::circuit::CheckMode;
use crate::commands::CalibrationTarget;
use crate::commands::Commands;
#[cfg(not(target_arch = "wasm32"))]
use crate::eth::{deploy_da_verifier_via_solidity, deploy_verifier_via_solidity};
use crate::eth::{deploy_contract_via_solidity, deploy_da_verifier_via_solidity};
#[cfg(not(target_arch = "wasm32"))]
#[allow(unused_imports)]
use crate::eth::{fix_da_sol, get_contract_artifacts, verify_proof_via_solidity};
@@ -140,8 +140,14 @@ pub async fn run(command: Commands) -> Result<String, Box<dyn Error>> {
compiled_circuit,
transcript,
num_runs,
} => fuzz(compiled_circuit, witness, transcript, num_runs),
compress_selectors,
} => fuzz(
compiled_circuit,
witness,
transcript,
num_runs,
compress_selectors,
),
Commands::GenSrs { srs_path, logrows } => gen_srs_cmd(srs_path, logrows as u32),
#[cfg(not(target_arch = "wasm32"))]
Commands::GetSrs {
@@ -170,7 +176,9 @@ pub async fn run(command: Commands) -> Result<String, Box<dyn Error>> {
target,
lookup_safety_margin,
scales,
scale_rebase_multiplier,
max_logrows,
div_rebasing,
} => calibrate(
model,
data,
@@ -178,6 +186,8 @@ pub async fn run(command: Commands) -> Result<String, Box<dyn Error>> {
target,
lookup_safety_margin,
scales,
scale_rebase_multiplier,
div_rebasing,
max_logrows,
)
.map(|e| serde_json::to_string(&e).unwrap()),
@@ -198,7 +208,22 @@ pub async fn run(command: Commands) -> Result<String, Box<dyn Error>> {
settings_path,
sol_code_path,
abi_path,
} => create_evm_verifier(vk_path, srs_path, settings_path, sol_code_path, abi_path),
render_vk_seperately,
} => create_evm_verifier(
vk_path,
srs_path,
settings_path,
sol_code_path,
abi_path,
render_vk_seperately,
),
Commands::CreateEVMVK {
vk_path,
srs_path,
settings_path,
sol_code_path,
abi_path,
} => create_evm_vk(vk_path, srs_path, settings_path, sol_code_path, abi_path),
#[cfg(not(target_arch = "wasm32"))]
Commands::CreateEVMDataAttestation {
settings_path,
@@ -214,6 +239,7 @@ pub async fn run(command: Commands) -> Result<String, Box<dyn Error>> {
abi_path,
aggregation_settings,
logrows,
render_vk_seperately,
} => create_evm_aggregate_verifier(
vk_path,
srs_path,
@@ -221,6 +247,7 @@ pub async fn run(command: Commands) -> Result<String, Box<dyn Error>> {
abi_path,
aggregation_settings,
logrows,
render_vk_seperately,
),
Commands::CompileCircuit {
model,
@@ -233,7 +260,15 @@ pub async fn run(command: Commands) -> Result<String, Box<dyn Error>> {
vk_path,
pk_path,
witness,
} => setup(compiled_circuit, srs_path, vk_path, pk_path, witness),
compress_selectors,
} => setup(
compiled_circuit,
srs_path,
vk_path,
pk_path,
witness,
compress_selectors,
),
#[cfg(not(target_arch = "wasm32"))]
Commands::SetupTestEVMData {
data,
@@ -296,6 +331,7 @@ pub async fn run(command: Commands) -> Result<String, Box<dyn Error>> {
srs_path,
logrows,
split_proofs,
compress_selectors,
} => setup_aggregate(
sample_snarks,
vk_path,
@@ -303,6 +339,7 @@ pub async fn run(command: Commands) -> Result<String, Box<dyn Error>> {
srs_path,
logrows,
split_proofs,
compress_selectors,
),
Commands::Aggregate {
proof_path,
@@ -352,6 +389,25 @@ pub async fn run(command: Commands) -> Result<String, Box<dyn Error>> {
addr_path,
optimizer_runs,
private_key,
"Halo2Verifier",
)
.await
}
#[cfg(not(target_arch = "wasm32"))]
Commands::DeployEvmVK {
sol_code_path,
rpc_url,
addr_path,
optimizer_runs,
private_key,
} => {
deploy_evm(
sol_code_path,
rpc_url,
addr_path,
optimizer_runs,
private_key,
"Halo2VerifyingKey",
)
.await
}
@@ -382,7 +438,8 @@ pub async fn run(command: Commands) -> Result<String, Box<dyn Error>> {
addr_verifier,
rpc_url,
addr_da,
} => verify_evm(proof_path, addr_verifier, rpc_url, addr_da).await,
addr_vk,
} => verify_evm(proof_path, addr_verifier, rpc_url, addr_da, addr_vk).await,
Commands::PrintProofHex { proof_path } => print_proof_hex(proof_path),
}
}
@@ -432,7 +489,7 @@ async fn fetch_srs(uri: &str) -> Result<Vec<u8>, Box<dyn Error>> {
#[cfg(not(target_arch = "wasm32"))]
fn check_srs_hash(logrows: u32, srs_path: Option<PathBuf>) -> Result<String, Box<dyn Error>> {
let path = get_srs_path(logrows, srs_path);
let hash = sha256::digest(&std::fs::read(path.clone())?);
let hash = sha256::digest(std::fs::read(path.clone())?);
info!("SRS hash: {}", hash);
let predefined_hash = match { crate::srs_sha::PUBLIC_SRS_SHA256_HASHES.get(&logrows) } {
@@ -440,7 +497,7 @@ fn check_srs_hash(logrows: u32, srs_path: Option<PathBuf>) -> Result<String, Box
None => return Err(format!("SRS (k={}) hash not found in public set", logrows).into()),
};
if hash != predefined_hash.to_string() {
if hash != *predefined_hash {
// delete file
warn!("removing SRS file at {}", path.display());
std::fs::remove_file(path)?;
@@ -662,8 +719,7 @@ impl AccuracyResults {
let error = (original.clone() - calibrated.clone())?;
let abs_error = error.map(|x| x.abs());
let squared_error = error.map(|x| x.powi(2));
let percentage_error =
error.enum_map(|i, x| Ok::<_, TensorError>(x / original[i].clone()))?;
let percentage_error = error.enum_map(|i, x| Ok::<_, TensorError>(x / original[i]))?;
let abs_percentage_error = percentage_error.map(|x| x.abs());
errors.extend(error.into_iter());
@@ -679,29 +735,25 @@ impl AccuracyResults {
abs_percentage_errors.iter().sum::<f32>() / abs_percentage_errors.len() as f32;
let mean_error = errors.iter().sum::<f32>() / errors.len() as f32;
let median_error = errors[errors.len() / 2];
let max_error = errors
let max_error = *errors
.iter()
.max_by(|a, b| a.partial_cmp(b).unwrap())
.unwrap()
.clone();
let min_error = errors
.unwrap();
let min_error = *errors
.iter()
.min_by(|a, b| a.partial_cmp(b).unwrap())
.unwrap()
.clone();
.unwrap();
let mean_abs_error = abs_errors.iter().sum::<f32>() / abs_errors.len() as f32;
let median_abs_error = abs_errors[abs_errors.len() / 2];
let max_abs_error = abs_errors
let max_abs_error = *abs_errors
.iter()
.max_by(|a, b| a.partial_cmp(b).unwrap())
.unwrap()
.clone();
let min_abs_error = abs_errors
.unwrap();
let min_abs_error = *abs_errors
.iter()
.min_by(|a, b| a.partial_cmp(b).unwrap())
.unwrap()
.clone();
.unwrap();
let mean_squared_error = squared_errors.iter().sum::<f32>() / squared_errors.len() as f32;
@@ -731,6 +783,8 @@ pub(crate) fn calibrate(
target: CalibrationTarget,
lookup_safety_margin: i128,
scales: Option<Vec<crate::Scale>>,
scale_rebase_multiplier: Vec<u32>,
div_rebasing: Option<bool>,
max_logrows: Option<u32>,
) -> Result<GraphSettings, Box<dyn Error>> {
use std::collections::HashMap;
@@ -774,9 +828,13 @@ pub(crate) fn calibrate(
}
};
let mut found_params: Vec<GraphSettings> = vec![];
let div_rebasing = if let Some(div_rebasing) = div_rebasing {
vec![div_rebasing]
} else {
vec![true, false]
};
let scale_rebase_multiplier = [1, 2, 10];
let mut found_params: Vec<GraphSettings> = vec![];
// 2 x 2 grid
let range_grid = range
@@ -813,15 +871,21 @@ pub(crate) fn calibrate(
.map(|(a, b)| (*a, *b))
.collect::<Vec<((crate::Scale, crate::Scale), u32)>>();
let range_grid = range_grid
.iter()
.cartesian_product(div_rebasing.iter())
.map(|(a, b)| (*a, *b))
.collect::<Vec<(((crate::Scale, crate::Scale), u32), bool)>>();
let mut forward_pass_res = HashMap::new();
let pb = init_bar(range_grid.len() as u64);
pb.set_message("calibrating...");
for ((input_scale, param_scale), scale_rebase_multiplier) in range_grid {
for (((input_scale, param_scale), scale_rebase_multiplier), div_rebasing) in range_grid {
pb.set_message(format!(
"input scale: {}, param scale: {}, scale rebase multiplier: {}",
input_scale, param_scale, scale_rebase_multiplier
"input scale: {}, param scale: {}, scale rebase multiplier: {}, div rebasing: {}",
input_scale, param_scale, scale_rebase_multiplier, div_rebasing
));
#[cfg(unix)]
@@ -834,7 +898,6 @@ pub(crate) fn calibrate(
Ok(r) => Some(r),
Err(_) => None,
};
let key = (input_scale, param_scale, scale_rebase_multiplier);
forward_pass_res.insert(key, vec![]);
@@ -842,12 +905,21 @@ pub(crate) fn calibrate(
input_scale,
param_scale,
scale_rebase_multiplier,
div_rebasing,
..settings.run_args.clone()
};
let mut circuit = match GraphCircuit::from_run_args(&local_run_args, &model_path) {
Ok(c) => c,
Err(_) => return Err(format!("failed to create circuit from run args").into()),
Err(e) => {
// drop the gag
#[cfg(unix)]
std::mem::drop(_r);
#[cfg(unix)]
std::mem::drop(_q);
debug!("circuit creation from run args failed: {:?}", e);
continue;
}
};
chunks
@@ -874,16 +946,18 @@ pub(crate) fn calibrate(
.collect::<Result<Vec<()>, String>>()?;
let min_lookup_range = forward_pass_res
.get(&key)
.unwrap()
.iter()
.map(|x| x.1.iter().map(|x| x.min_lookup_inputs))
.flatten()
.map(|x| x.min_lookup_inputs)
.min()
.unwrap_or(0);
let max_lookup_range = forward_pass_res
.get(&key)
.unwrap()
.iter()
.map(|x| x.1.iter().map(|x| x.max_lookup_inputs))
.flatten()
.map(|x| x.max_lookup_inputs)
.max()
.unwrap_or(0);
@@ -906,6 +980,7 @@ pub(crate) fn calibrate(
let found_run_args = RunArgs {
input_scale: new_settings.run_args.input_scale,
param_scale: new_settings.run_args.param_scale,
div_rebasing: new_settings.run_args.div_rebasing,
lookup_range: new_settings.run_args.lookup_range,
logrows: new_settings.run_args.logrows,
scale_rebase_multiplier: new_settings.run_args.scale_rebase_multiplier,
@@ -915,6 +990,7 @@ pub(crate) fn calibrate(
let found_settings = GraphSettings {
run_args: found_run_args,
required_lookups: new_settings.required_lookups,
required_range_checks: new_settings.required_range_checks,
model_output_scales: new_settings.model_output_scales,
model_input_scales: new_settings.model_input_scales,
num_rows: new_settings.num_rows,
@@ -930,7 +1006,7 @@ pub(crate) fn calibrate(
found_settings.as_json()?.to_colored_json_auto()?
);
} else {
debug!("calibration failed");
debug!("calibration failed {}", res.err().unwrap());
}
pb.inc(1);
@@ -1023,7 +1099,7 @@ pub(crate) fn calibrate(
let tear_sheet_table = Table::new(vec![accuracy_res]);
println!(
warn!(
"\n\n <------------- Numerical Fidelity Report (input_scale: {}, param_scale: {}, scale_input_multiplier: {}) ------------->\n\n{}\n\n",
best_params.run_args.input_scale,
best_params.run_args.param_scale,
@@ -1087,7 +1163,7 @@ pub(crate) fn mock(
)
.map_err(Box::<dyn Error>::from)?;
prover
.verify_par()
.verify()
.map_err(|e| Box::<dyn Error>::from(ExecutionError::VerifyError(e)))?;
Ok(String::new())
}
@@ -1132,6 +1208,7 @@ pub(crate) fn create_evm_verifier(
settings_path: PathBuf,
sol_code_path: PathBuf,
abi_path: PathBuf,
render_vk_seperately: bool,
) -> Result<String, Box<dyn Error>> {
check_solc_requirement();
let circuit_settings = GraphSettings::load(&settings_path)?;
@@ -1149,7 +1226,11 @@ pub(crate) fn create_evm_verifier(
halo2_solidity_verifier::BatchOpenScheme::Bdfg21,
num_instance,
);
let verifier_solidity = generator.render()?;
let verifier_solidity = if render_vk_seperately {
generator.render_separately()?.0 // ignore the rendered vk for now and generate it in create_evm_vk
} else {
generator.render()?
};
File::create(sol_code_path.clone())?.write_all(verifier_solidity.as_bytes())?;
@@ -1161,6 +1242,43 @@ pub(crate) fn create_evm_verifier(
Ok(String::new())
}
#[cfg(not(target_arch = "wasm32"))]
pub(crate) fn create_evm_vk(
vk_path: PathBuf,
srs_path: Option<PathBuf>,
settings_path: PathBuf,
sol_code_path: PathBuf,
abi_path: PathBuf,
) -> Result<String, Box<dyn Error>> {
check_solc_requirement();
let circuit_settings = GraphSettings::load(&settings_path)?;
let params = load_params_cmd(srs_path, circuit_settings.run_args.logrows)?;
let num_instance = circuit_settings.total_instances();
let num_instance: usize = num_instance.iter().sum::<usize>();
let vk = load_vk::<KZGCommitmentScheme<Bn256>, Fr, GraphCircuit>(vk_path, circuit_settings)?;
trace!("params computed");
let generator = halo2_solidity_verifier::SolidityGenerator::new(
&params,
&vk,
halo2_solidity_verifier::BatchOpenScheme::Bdfg21,
num_instance,
);
let vk_solidity = generator.render_separately()?.1;
File::create(sol_code_path.clone())?.write_all(vk_solidity.as_bytes())?;
// fetch abi of the contract
let (abi, _, _) = get_contract_artifacts(sol_code_path, "Halo2VerifyingKey", 0)?;
// save abi to file
serde_json::to_writer(std::fs::File::create(abi_path)?, &abi)?;
Ok(String::new())
}
#[cfg(not(target_arch = "wasm32"))]
pub(crate) fn create_evm_data_attestation(
settings_path: PathBuf,
@@ -1256,13 +1374,15 @@ pub(crate) async fn deploy_evm(
addr_path: PathBuf,
runs: usize,
private_key: Option<String>,
contract_name: &str,
) -> Result<String, Box<dyn Error>> {
check_solc_requirement();
let contract_address = deploy_verifier_via_solidity(
let contract_address = deploy_contract_via_solidity(
sol_code_path,
rpc_url.as_deref(),
runs,
private_key.as_deref(),
contract_name,
)
.await?;
@@ -1279,6 +1399,7 @@ pub(crate) async fn verify_evm(
addr_verifier: H160,
rpc_url: Option<String>,
addr_da: Option<H160>,
addr_vk: Option<H160>,
) -> Result<String, Box<dyn Error>> {
use crate::eth::verify_proof_with_data_attestation;
check_solc_requirement();
@@ -1290,11 +1411,12 @@ pub(crate) async fn verify_evm(
proof.clone(),
addr_verifier,
addr_da,
addr_vk,
rpc_url.as_deref(),
)
.await?
} else {
verify_proof_via_solidity(proof.clone(), addr_verifier, rpc_url.as_deref()).await?
verify_proof_via_solidity(proof.clone(), addr_verifier, addr_vk, rpc_url.as_deref()).await?
};
info!("Solidity verification result: {}", result);
@@ -1314,6 +1436,7 @@ pub(crate) fn create_evm_aggregate_verifier(
abi_path: PathBuf,
circuit_settings: Vec<PathBuf>,
logrows: u32,
render_vk_seperately: bool,
) -> Result<String, Box<dyn Error>> {
check_solc_requirement();
let srs_path = get_srs_path(logrows, srs_path);
@@ -1352,7 +1475,11 @@ pub(crate) fn create_evm_aggregate_verifier(
generator = generator.set_acc_encoding(Some(acc_encoding));
let verifier_solidity = generator.render()?;
let verifier_solidity = if render_vk_seperately {
generator.render_separately()?.0 // ignore the rendered vk for now and generate it in create_evm_vk
} else {
generator.render()?
};
File::create(sol_code_path.clone())?.write_all(verifier_solidity.as_bytes())?;
@@ -1381,6 +1508,7 @@ pub(crate) fn setup(
vk_path: PathBuf,
pk_path: PathBuf,
witness: Option<PathBuf>,
compress_selectors: bool,
) -> Result<String, Box<dyn Error>> {
// these aren't real values so the sanity checks are mostly meaningless
let mut circuit = GraphCircuit::load(compiled_circuit)?;
@@ -1391,8 +1519,12 @@ pub(crate) fn setup(
let params = load_params_cmd(srs_path, circuit.settings().run_args.logrows)?;
let pk = create_keys::<KZGCommitmentScheme<Bn256>, Fr, GraphCircuit>(&circuit, &params)
.map_err(Box::<dyn Error>::from)?;
let pk = create_keys::<KZGCommitmentScheme<Bn256>, Fr, GraphCircuit>(
&circuit,
&params,
compress_selectors,
)
.map_err(Box::<dyn Error>::from)?;
save_vk::<KZGCommitmentScheme<Bn256>>(&vk_path, pk.get_vk())?;
save_pk::<KZGCommitmentScheme<Bn256>>(&pk_path, &pk)?;
@@ -1531,6 +1663,7 @@ pub(crate) fn fuzz(
data_path: PathBuf,
transcript: TranscriptType,
num_runs: usize,
compress_selectors: bool,
) -> Result<String, Box<dyn Error>> {
check_solc_requirement();
let passed = AtomicBool::new(true);
@@ -1546,8 +1679,12 @@ pub(crate) fn fuzz(
let data = GraphWitness::from_path(data_path)?;
let pk = create_keys::<KZGCommitmentScheme<Bn256>, Fr, GraphCircuit>(&circuit, &params)
.map_err(Box::<dyn Error>::from)?;
let pk = create_keys::<KZGCommitmentScheme<Bn256>, Fr, GraphCircuit>(
&circuit,
&params,
compress_selectors,
)
.map_err(Box::<dyn Error>::from)?;
circuit.load_graph_witness(&data)?;
@@ -1563,9 +1700,12 @@ pub(crate) fn fuzz(
let fuzz_pk = || {
let new_params = gen_srs::<KZGCommitmentScheme<Bn256>>(logrows);
let bad_pk =
create_keys::<KZGCommitmentScheme<Bn256>, Fr, GraphCircuit>(&circuit, &new_params)
.map_err(|_| ())?;
let bad_pk = create_keys::<KZGCommitmentScheme<Bn256>, Fr, GraphCircuit>(
&circuit,
&new_params,
compress_selectors,
)
.map_err(|_| ())?;
let bad_proof = create_proof_circuit_kzg(
circuit.clone(),
@@ -1636,9 +1776,12 @@ pub(crate) fn fuzz(
let fuzz_vk = || {
let new_params = gen_srs::<KZGCommitmentScheme<Bn256>>(logrows);
let bad_pk =
create_keys::<KZGCommitmentScheme<Bn256>, Fr, GraphCircuit>(&circuit, &new_params)
.map_err(|_| ())?;
let bad_pk = create_keys::<KZGCommitmentScheme<Bn256>, Fr, GraphCircuit>(
&circuit,
&new_params,
compress_selectors,
)
.map_err(|_| ())?;
let bad_vk = bad_pk.get_vk();
@@ -1798,7 +1941,7 @@ pub(crate) fn mock_aggregate(
let prover = halo2_proofs::dev::MockProver::run(logrows, &circuit, vec![circuit.instances()])
.map_err(Box::<dyn Error>::from)?;
prover
.verify_par()
.verify()
.map_err(|e| Box::<dyn Error>::from(ExecutionError::VerifyError(e)))?;
#[cfg(not(target_arch = "wasm32"))]
pb.finish_with_message("Done.");
@@ -1812,6 +1955,7 @@ pub(crate) fn setup_aggregate(
srs_path: Option<PathBuf>,
logrows: u32,
split_proofs: bool,
compress_selectors: bool,
) -> Result<String, Box<dyn Error>> {
// the K used for the aggregation circuit
let params = load_params_cmd(srs_path, logrows)?;
@@ -1822,8 +1966,11 @@ pub(crate) fn setup_aggregate(
}
let agg_circuit = AggregationCircuit::new(&params.get_g()[0].into(), snarks, split_proofs)?;
let agg_pk =
create_keys::<KZGCommitmentScheme<Bn256>, Fr, AggregationCircuit>(&agg_circuit, &params)?;
let agg_pk = create_keys::<KZGCommitmentScheme<Bn256>, Fr, AggregationCircuit>(
&agg_circuit,
&params,
compress_selectors,
)?;
let agg_vk = agg_pk.get_vk();

View File

@@ -617,13 +617,13 @@ impl ToPyObject for DataSource {
}
#[cfg(feature = "python-bindings")]
use crate::pfsys::field_to_vecu64_montgomery;
use crate::pfsys::field_to_string_montgomery;
#[cfg(feature = "python-bindings")]
impl ToPyObject for FileSourceInner {
fn to_object(&self, py: Python) -> PyObject {
match self {
FileSourceInner::Field(data) => field_to_vecu64_montgomery(data).to_object(py),
FileSourceInner::Field(data) => field_to_string_montgomery(data).to_object(py),
FileSourceInner::Bool(data) => data.to_object(py),
FileSourceInner::Float(data) => data.to_object(py),
}

View File

@@ -23,7 +23,7 @@ use self::input::{FileSource, GraphData};
use self::modules::{GraphModules, ModuleConfigs, ModuleForwardResult, ModuleSizes};
use crate::circuit::lookup::LookupOp;
use crate::circuit::modules::ModulePlanner;
use crate::circuit::table::{Table, RESERVED_BLINDING_ROWS_PAD};
use crate::circuit::table::{Range, Table, RESERVED_BLINDING_ROWS_PAD};
use crate::circuit::{CheckMode, InputType};
use crate::fieldutils::felt_to_f64;
use crate::pfsys::PrettyElements;
@@ -53,7 +53,7 @@ pub use utilities::*;
pub use vars::*;
#[cfg(feature = "python-bindings")]
use crate::pfsys::field_to_vecu64_montgomery;
use crate::pfsys::field_to_string_montgomery;
/// The safety factor for the range of the lookup table.
pub const RANGE_MULTIPLIER: i128 = 2;
@@ -332,16 +332,16 @@ impl ToPyObject for GraphWitness {
let dict_params = PyDict::new(py);
let dict_outputs = PyDict::new(py);
let inputs: Vec<Vec<[u64; 4]>> = self
let inputs: Vec<Vec<String>> = self
.inputs
.iter()
.map(|x| x.iter().map(field_to_vecu64_montgomery).collect())
.map(|x| x.iter().map(field_to_string_montgomery).collect())
.collect();
let outputs: Vec<Vec<[u64; 4]>> = self
let outputs: Vec<Vec<String>> = self
.outputs
.iter()
.map(|x| x.iter().map(field_to_vecu64_montgomery).collect())
.map(|x| x.iter().map(field_to_string_montgomery).collect())
.collect();
dict.set_item("inputs", inputs).unwrap();
@@ -389,9 +389,9 @@ impl ToPyObject for GraphWitness {
#[cfg(feature = "python-bindings")]
fn insert_poseidon_hash_pydict(pydict: &PyDict, poseidon_hash: &Vec<Fp>) -> Result<(), PyErr> {
let poseidon_hash: Vec<[u64; 4]> = poseidon_hash
let poseidon_hash: Vec<String> = poseidon_hash
.iter()
.map(field_to_vecu64_montgomery)
.map(field_to_string_montgomery)
.collect();
pydict.set_item("poseidon_hash", poseidon_hash)?;
@@ -431,6 +431,8 @@ pub struct GraphSettings {
pub module_sizes: ModuleSizes,
/// required_lookups
pub required_lookups: Vec<LookupOp>,
/// required range_checks
pub required_range_checks: Vec<Range>,
/// check mode
pub check_mode: CheckMode,
/// ezkl version used
@@ -639,7 +641,7 @@ impl GraphCircuit {
}
// dummy module settings, must load from GraphData after
let mut settings = model.gen_params(run_args, CheckMode::UNSAFE)?;
let mut settings = model.gen_params(run_args, run_args.check_mode)?;
let mut num_params = 0;
if !model.const_shapes().is_empty() {
@@ -763,18 +765,18 @@ impl GraphCircuit {
if self.settings().run_args.input_visibility.is_public() {
public_inputs.rescaled_inputs = elements.rescaled_inputs.clone();
public_inputs.inputs = elements.inputs.clone();
} else if let Some(_) = &data.processed_inputs {
} else if data.processed_inputs.is_some() {
public_inputs.processed_inputs = elements.processed_inputs.clone();
}
if let Some(_) = &data.processed_params {
if data.processed_params.is_some() {
public_inputs.processed_params = elements.processed_params.clone();
}
if self.settings().run_args.output_visibility.is_public() {
public_inputs.rescaled_outputs = elements.rescaled_outputs.clone();
public_inputs.outputs = elements.outputs.clone();
} else if let Some(_) = &data.processed_outputs {
} else if data.processed_outputs.is_some() {
public_inputs.processed_outputs = elements.processed_outputs.clone();
}
@@ -960,19 +962,20 @@ impl GraphCircuit {
min_lookup_inputs: i128,
max_lookup_inputs: i128,
lookup_safety_margin: i128,
) -> (i128, i128) {
) -> Range {
let mut margin = (
lookup_safety_margin * min_lookup_inputs,
lookup_safety_margin * max_lookup_inputs,
);
if lookup_safety_margin == 1 {
margin.0 -= 1;
margin.0 += 1;
margin.1 += 1;
}
margin
}
fn calc_num_cols(safe_range: (i128, i128), max_logrows: u32) -> usize {
fn calc_num_cols(safe_range: Range, max_logrows: u32) -> usize {
let max_col_size = Table::<Fp>::cal_col_size(
max_logrows as usize,
Self::reserved_blinding_rows() as usize,
@@ -999,7 +1002,6 @@ impl GraphCircuit {
|| min_lookup_inputs < -MAX_LOOKUP_ABS / lookup_safety_margin
{
let err_string = format!("max lookup input ({}) is too large", max_lookup_inputs);
error!("{}", err_string);
return Err(err_string.into());
}
@@ -1134,7 +1136,7 @@ impl GraphCircuit {
while (1 << extended_k) < (n * quotient_poly_degree) {
extended_k += 1;
if !(extended_k <= bn256::Fr::S) {
if extended_k > bn256::Fr::S {
return false;
}
}
@@ -1457,6 +1459,7 @@ impl Circuit<Fp> for GraphCircuit {
params.run_args.lookup_range,
params.run_args.logrows as usize,
params.required_lookups,
params.required_range_checks,
params.check_mode,
)
.unwrap();

View File

@@ -6,6 +6,7 @@ use super::GraphError;
use super::GraphSettings;
use crate::circuit::hybrid::HybridOp;
use crate::circuit::region::RegionCtx;
use crate::circuit::table::Range;
use crate::circuit::Input;
use crate::circuit::InputType;
use crate::circuit::Unknown;
@@ -240,6 +241,13 @@ impl NodeType {
NodeType::SubGraph { model, .. } => model.required_lookups(),
}
}
/// Returns the lookups required by a graph
pub fn required_range_checks(&self) -> Vec<Range> {
match self {
NodeType::Node(n) => n.opkind.required_range_checks(),
NodeType::SubGraph { model, .. } => model.required_range_checks(),
}
}
/// Returns the scales of the node's output.
pub fn out_scales(&self) -> Vec<crate::Scale> {
match self {
@@ -432,6 +440,15 @@ impl Model {
.collect_vec()
}
///
fn required_range_checks(&self) -> Vec<Range> {
self.graph
.nodes
.values()
.flat_map(|n| n.required_range_checks())
.collect_vec()
}
/// Creates a `Model` from a specified path to an Onnx file.
/// # Arguments
/// * `reader` - A reader for an Onnx file.
@@ -489,6 +506,8 @@ impl Model {
// extract the requisite lookup ops from the model
let mut lookup_ops: Vec<LookupOp> = self.required_lookups();
// extract the requisite lookup ops from the model
let mut range_checks: Vec<Range> = self.required_range_checks();
// if we're using percentage tolerance, we need to add the necessary range check ops for it.
@@ -504,6 +523,9 @@ impl Model {
let set: HashSet<_> = lookup_ops.drain(..).collect(); // dedup
lookup_ops.extend(set.into_iter().sorted());
let set: HashSet<_> = range_checks.drain(..).collect(); // dedup
range_checks.extend(set.into_iter().sorted());
Ok(GraphSettings {
run_args: run_args.clone(),
model_instance_shapes: instance_shapes,
@@ -511,6 +533,7 @@ impl Model {
num_rows,
total_assignments: linear_coord,
required_lookups: lookup_ops,
required_range_checks: range_checks,
model_output_scales: self.graph.get_output_scales()?,
model_input_scales: self.graph.get_input_scales(),
total_const_size,
@@ -611,7 +634,7 @@ impl Model {
debug!("intermediate min lookup inputs: {}", min);
}
debug!(
"------------ output node int {}: {} \n ------------ float: {} \n ------------ max: {} \n ------------ min: {} ------------ scale: {}",
"------------ output node int {}: {} \n ------------ float: {} \n ------------ max: {} \n ------------ min: {} \n ------------ scale: {}",
idx,
res.output.map(crate::fieldutils::felt_to_i32).show(),
res.output
@@ -1042,6 +1065,7 @@ impl Model {
&run_args.param_visibility,
i,
symbol_values,
run_args.div_rebasing,
)?;
if let Some(ref scales) = override_input_scales {
if let Some(inp) = n.opkind.get_input() {
@@ -1058,9 +1082,20 @@ impl Model {
if scales.contains_key(&i) {
let scale_diff = n.out_scale - scales[&i];
n.opkind = if scale_diff > 0 {
RebaseScale::rebase(n.opkind, scales[&i], n.out_scale, 1)
RebaseScale::rebase(
n.opkind,
scales[&i],
n.out_scale,
1,
run_args.div_rebasing,
)
} else {
RebaseScale::rebase_up(n.opkind, scales[&i], n.out_scale)
RebaseScale::rebase_up(
n.opkind,
scales[&i],
n.out_scale,
run_args.div_rebasing,
)
};
n.out_scale = scales[&i];
}
@@ -1155,9 +1190,10 @@ impl Model {
pub fn configure(
meta: &mut ConstraintSystem<Fp>,
vars: &ModelVars<Fp>,
lookup_range: (i128, i128),
lookup_range: Range,
logrows: usize,
required_lookups: Vec<LookupOp>,
required_range_checks: Vec<Range>,
check_mode: CheckMode,
) -> Result<PolyConfig<Fp>, Box<dyn Error>> {
info!("configuring model");
@@ -1176,6 +1212,10 @@ impl Model {
base_gate.configure_lookup(meta, input, output, index, lookup_range, logrows, &op)?;
}
for range in required_range_checks {
base_gate.configure_range_check(meta, input, range)?;
}
Ok(base_gate)
}
@@ -1216,6 +1256,7 @@ impl Model {
let instance_idx = vars.get_instance_idx();
config.base.layout_tables(layouter)?;
config.base.layout_range_checks(layouter)?;
let mut num_rows = 0;
let mut linear_coord = 0;

View File

@@ -132,6 +132,8 @@ pub struct RebaseScale {
pub target_scale: i32,
/// The original scale of the operation's inputs.
pub original_scale: i32,
/// if true then the operation is a multiplicative division
pub div_rebasing: bool,
}
impl RebaseScale {
@@ -141,6 +143,7 @@ impl RebaseScale {
global_scale: crate::Scale,
op_out_scale: crate::Scale,
scale_rebase_multiplier: u32,
div_rebasing: bool,
) -> SupportedOp {
if (op_out_scale > (global_scale * scale_rebase_multiplier as i32))
&& !inner.is_constant()
@@ -154,6 +157,7 @@ impl RebaseScale {
target_scale: op.target_scale,
multiplier: op.multiplier * multiplier,
original_scale: op.original_scale,
div_rebasing,
})
} else {
SupportedOp::RebaseScale(RebaseScale {
@@ -161,6 +165,7 @@ impl RebaseScale {
target_scale: global_scale * scale_rebase_multiplier as i32,
multiplier,
original_scale: op_out_scale,
div_rebasing,
})
}
} else {
@@ -173,6 +178,7 @@ impl RebaseScale {
inner: SupportedOp,
target_scale: crate::Scale,
op_out_scale: crate::Scale,
div_rebasing: bool,
) -> SupportedOp {
if (op_out_scale < (target_scale)) && !inner.is_constant() && !inner.is_input() {
let multiplier = scale_to_multiplier(op_out_scale - target_scale);
@@ -182,6 +188,7 @@ impl RebaseScale {
target_scale: op.target_scale,
multiplier: op.multiplier * multiplier,
original_scale: op.original_scale,
div_rebasing,
})
} else {
SupportedOp::RebaseScale(RebaseScale {
@@ -189,12 +196,22 @@ impl RebaseScale {
target_scale,
multiplier,
original_scale: op_out_scale,
div_rebasing,
})
}
} else {
inner
}
}
/// Calculate the require range bracket for the operation
fn range_bracket(&self) -> i128 {
if self.div_rebasing {
0
} else {
self.multiplier as i128 - 1
}
}
}
impl Op<Fp> for RebaseScale {
@@ -203,19 +220,28 @@ impl Op<Fp> for RebaseScale {
}
fn f(&self, x: &[Tensor<Fp>]) -> Result<crate::circuit::ForwardResult<Fp>, TensorError> {
let mut res = Op::<Fp>::f(&*self.inner, x)?;
let ri = res.output.map(felt_to_i128);
let rescaled = crate::tensor::ops::nonlinearities::const_div(&ri, self.multiplier);
res.output = rescaled.map(i128_to_felt);
res.intermediate_lookups.push(ri);
if self.div_rebasing {
let ri = res.output.map(felt_to_i128);
let rescaled = crate::tensor::ops::nonlinearities::const_div(&ri, self.multiplier);
res.output = rescaled.map(i128_to_felt);
res.intermediate_lookups.push(ri);
} else {
let ri = res.output.map(felt_to_i128);
let divisor = Tensor::from(vec![self.multiplier as i128].into_iter());
let rescaled = crate::tensor::ops::div(&[ri, divisor.clone()])?;
res.output = rescaled.map(i128_to_felt);
res.intermediate_lookups.extend([-divisor.clone(), divisor]);
}
Ok(res)
}
fn as_string(&self) -> String {
format!(
"REBASED (div={:?}) ({})",
"REBASED (div={:?}, div_r={}) ({})",
self.multiplier,
self.div_rebasing,
self.inner.as_string()
)
}
@@ -225,13 +251,24 @@ impl Op<Fp> for RebaseScale {
}
fn required_lookups(&self) -> Vec<LookupOp> {
let mut lookups = self.inner.required_lookups();
lookups.push(LookupOp::Div {
denom: crate::circuit::utils::F32(self.multiplier as f32),
});
let mut lookups: Vec<LookupOp> = self.inner.required_lookups();
if self.div_rebasing {
lookups.push(LookupOp::Div {
denom: crate::circuit::utils::F32(self.multiplier as f32),
});
}
lookups
}
fn required_range_checks(&self) -> Vec<crate::circuit::table::Range> {
let mut range_checks = self.inner.required_range_checks();
if !self.div_rebasing {
let bracket = self.range_bracket();
range_checks.push((-bracket, bracket));
}
range_checks
}
fn layout(
&self,
config: &mut crate::circuit::BaseConfig<Fp>,
@@ -243,14 +280,23 @@ impl Op<Fp> for RebaseScale {
.layout(config, region, values)?
.ok_or("no layout")?;
Ok(Some(crate::circuit::layouts::nonlinearity(
config,
region,
&[original_res],
&LookupOp::Div {
denom: crate::circuit::utils::F32(self.multiplier as f32),
},
)?))
if !self.div_rebasing {
Ok(Some(crate::circuit::layouts::div(
config,
region,
&[original_res],
Fp::from(self.multiplier as u64),
)?))
} else {
Ok(Some(crate::circuit::layouts::nonlinearity(
config,
region,
&[original_res],
&LookupOp::Div {
denom: crate::circuit::utils::F32(self.multiplier as f32),
},
)?))
}
}
fn clone_dyn(&self) -> Box<dyn Op<Fp>> {
@@ -437,6 +483,10 @@ impl Op<Fp> for SupportedOp {
self.as_op().required_lookups()
}
fn required_range_checks(&self) -> Vec<crate::circuit::table::Range> {
self.as_op().required_range_checks()
}
fn out_scale(&self, in_scales: Vec<crate::Scale>) -> Result<crate::Scale, Box<dyn Error>> {
self.as_op().out_scale(in_scales)
}
@@ -477,6 +527,7 @@ impl Tabled for Node {
"inputs",
"out_dims",
"required_lookups",
"required_range_checks",
] {
headers.push(std::borrow::Cow::Borrowed(i));
}
@@ -498,6 +549,10 @@ impl Tabled for Node {
.map(<LookupOp as Op<Fp>>::as_string)
.collect_vec()
)));
fields.push(std::borrow::Cow::Owned(format!(
"{:?}",
self.opkind.required_range_checks()
)));
fields
}
}
@@ -527,6 +582,7 @@ impl Node {
param_visibility: &Visibility,
idx: usize,
symbol_values: &SymbolValues,
div_rebasing: bool,
) -> Result<Self, Box<dyn Error>> {
use log::warn;
@@ -631,7 +687,13 @@ impl Node {
let mut out_scale = opkind.out_scale(in_scales.clone())?;
// rescale the inputs if necessary to get consistent fixed points, we select the largest scale (highest precision)
let global_scale = scales.get_max();
opkind = RebaseScale::rebase(opkind, global_scale, out_scale, scales.rebase_multiplier);
opkind = RebaseScale::rebase(
opkind,
global_scale,
out_scale,
scales.rebase_multiplier,
div_rebasing,
);
out_scale = opkind.out_scale(in_scales)?;

View File

@@ -71,8 +71,7 @@ pub fn quantize_float(elem: &f64, shift: f64, scale: crate::Scale) -> Result<i12
pub fn dequantize(felt: Fp, scale: crate::Scale, shift: f64) -> f64 {
let int_rep = crate::fieldutils::felt_to_i128(felt);
let multiplier = scale_to_multiplier(scale);
let float_rep = int_rep as f64 / multiplier - shift;
float_rep
int_rep as f64 / multiplier - shift
}
/// Converts a scale (log base 2) to a fixed point multiplier.
@@ -717,8 +716,8 @@ pub fn new_op_from_onnx(
}
}
"Recip" => {
// Extract the slope layer hyperparams
let in_scale = inputs[0].out_scales()[0];
// If the input scale is larger than the params scale
let scale_diff = std::cmp::max(scales.input, scales.params) - inputs[0].out_scales()[0];
let additional_scale = if scale_diff > 0 {
scale_to_multiplier(scale_diff)

View File

@@ -29,7 +29,7 @@
//! A library for turning computational graphs, such as neural networks, into ZK-circuits.
//!
use circuit::Tolerance;
use circuit::{table::Range, CheckMode, Tolerance};
use clap::Args;
use graph::Visibility;
use serde::{Deserialize, Serialize};
@@ -91,7 +91,7 @@ pub struct RunArgs {
pub scale_rebase_multiplier: u32,
/// The min and max elements in the lookup table input column
#[arg(short = 'B', long, value_parser = parse_tuple::<i128>, default_value = "(-32768,32768)")]
pub lookup_range: (i128, i128),
pub lookup_range: Range,
/// The log_2 number of rows
#[arg(short = 'K', long, default_value = "17")]
pub logrows: u32,
@@ -110,6 +110,12 @@ pub struct RunArgs {
/// Flags whether params are public, private, hashed
#[arg(long, default_value = "private")]
pub param_visibility: Visibility,
#[arg(long, default_value = "false")]
/// Multiplicative division
pub div_rebasing: bool,
/// check mode (safe, unsafe, etc)
#[arg(long, default_value = "unsafe")]
pub check_mode: CheckMode,
}
impl Default for RunArgs {
@@ -126,6 +132,8 @@ impl Default for RunArgs {
input_visibility: Visibility::Private,
output_visibility: Visibility::Public,
param_visibility: Visibility::Private,
div_rebasing: false,
check_mode: CheckMode::UNSAFE,
}
}
}

View File

@@ -11,7 +11,7 @@ use crate::tensor::TensorType;
use clap::ValueEnum;
use halo2_proofs::circuit::Value;
use halo2_proofs::plonk::{
create_proof, keygen_pk, keygen_vk, verify_proof, Circuit, ProvingKey, VerifyingKey,
create_proof, keygen_pk, keygen_vk_custom, verify_proof, Circuit, ProvingKey, VerifyingKey,
};
use halo2_proofs::poly::commitment::{CommitmentScheme, Params, ParamsProver, Prover, Verifier};
use halo2_proofs::poly::kzg::commitment::{KZGCommitmentScheme, ParamsKZG};
@@ -167,8 +167,8 @@ impl ToPyObject for TranscriptType {
#[cfg(feature = "python-bindings")]
///
pub fn g1affine_to_pydict(g1affine_dict: &PyDict, g1affine: &G1Affine) {
let g1affine_x = field_to_vecu64_montgomery(&g1affine.x);
let g1affine_y = field_to_vecu64_montgomery(&g1affine.y);
let g1affine_x = field_to_string_montgomery(&g1affine.x);
let g1affine_y = field_to_string_montgomery(&g1affine.y);
g1affine_dict.set_item("x", g1affine_x).unwrap();
g1affine_dict.set_item("y", g1affine_y).unwrap();
}
@@ -178,24 +178,24 @@ use halo2curves::bn256::G1;
#[cfg(feature = "python-bindings")]
///
pub fn g1_to_pydict(g1_dict: &PyDict, g1: &G1) {
let g1_x = field_to_vecu64_montgomery(&g1.x);
let g1_y = field_to_vecu64_montgomery(&g1.y);
let g1_z = field_to_vecu64_montgomery(&g1.z);
let g1_x = field_to_string_montgomery(&g1.x);
let g1_y = field_to_string_montgomery(&g1.y);
let g1_z = field_to_string_montgomery(&g1.z);
g1_dict.set_item("x", g1_x).unwrap();
g1_dict.set_item("y", g1_y).unwrap();
g1_dict.set_item("z", g1_z).unwrap();
}
/// converts fp into `Vec<u64>` in Montgomery form
pub fn field_to_vecu64_montgomery<F: PrimeField + SerdeObject + Serialize>(fp: &F) -> [u64; 4] {
pub fn field_to_string_montgomery<F: PrimeField + SerdeObject + Serialize>(fp: &F) -> String {
let repr = serde_json::to_string(&fp).unwrap();
let b: [u64; 4] = serde_json::from_str(&repr).unwrap();
let b: String = serde_json::from_str(&repr).unwrap();
b
}
/// converts `Vec<u64>` in Montgomery form into fp
pub fn vecu64_to_field_montgomery<F: PrimeField + SerdeObject + Serialize + DeserializeOwned>(
b: &[u64; 4],
pub fn string_to_field_montgomery<F: PrimeField + SerdeObject + Serialize + DeserializeOwned>(
b: &String,
) -> F {
let repr = serde_json::to_string(&b).unwrap();
let fp: F = serde_json::from_str(&repr).unwrap();
@@ -256,10 +256,10 @@ where
{
fn to_object(&self, py: Python) -> PyObject {
let dict = PyDict::new(py);
let field_elems: Vec<Vec<[u64; 4]>> = self
let field_elems: Vec<Vec<String>> = self
.instances
.iter()
.map(|x| x.iter().map(|fp| field_to_vecu64_montgomery(fp)).collect())
.map(|x| x.iter().map(|fp| field_to_string_montgomery(fp)).collect())
.collect::<Vec<_>>();
dict.set_item("instances", field_elems).unwrap();
let hex_proof = hex::encode(&self.proof);
@@ -306,6 +306,12 @@ where
}
}
/// create hex proof from proof
pub fn create_hex_proof(&mut self) {
let hex_proof = hex::encode(&self.proof);
self.hex_proof = Some(format!("0x{}", hex_proof));
}
/// Saves the Proof to a specified `proof_path`.
pub fn save(&self, proof_path: &PathBuf) -> Result<(), Box<dyn Error>> {
let file = std::fs::File::create(proof_path)?;
@@ -427,6 +433,7 @@ where
pub fn create_keys<Scheme: CommitmentScheme, F: PrimeField + TensorType, C: Circuit<F>>(
circuit: &C,
params: &'_ Scheme::ParamsProver,
compress_selectors: bool,
) -> Result<ProvingKey<Scheme::Curve>, halo2_proofs::plonk::Error>
where
C: Circuit<Scheme::Scalar>,
@@ -438,7 +445,7 @@ where
// Initialize verifying key
let now = Instant::now();
trace!("preparing VK");
let vk = keygen_vk(params, &empty_circuit)?;
let vk = keygen_vk_custom(params, &empty_circuit, compress_selectors)?;
let elapsed = now.elapsed();
info!("VK took {}.{}", elapsed.as_secs(), elapsed.subsec_millis());
@@ -594,6 +601,7 @@ where
let mut snark_new = snark.clone();
// swap the proof bytes for the new ones
snark_new.proof[..proof_first_bytes.len()].copy_from_slice(&proof_first_bytes);
snark_new.create_hex_proof();
Ok(snark_new)
}

View File

@@ -30,7 +30,7 @@ use std::str::FromStr;
use std::{fs::File, path::PathBuf};
use tokio::runtime::Runtime;
type PyFelt = [u64; 4];
type PyFelt = String;
#[pyclass]
#[derive(Debug, Clone)]
@@ -65,9 +65,9 @@ struct PyG1 {
impl From<G1> for PyG1 {
fn from(g1: G1) -> Self {
PyG1 {
x: crate::pfsys::field_to_vecu64_montgomery::<Fq>(&g1.x),
y: crate::pfsys::field_to_vecu64_montgomery::<Fq>(&g1.y),
z: crate::pfsys::field_to_vecu64_montgomery::<Fq>(&g1.z),
x: crate::pfsys::field_to_string_montgomery::<Fq>(&g1.x),
y: crate::pfsys::field_to_string_montgomery::<Fq>(&g1.y),
z: crate::pfsys::field_to_string_montgomery::<Fq>(&g1.z),
}
}
}
@@ -75,9 +75,9 @@ impl From<G1> for PyG1 {
impl From<PyG1> for G1 {
fn from(val: PyG1) -> Self {
G1 {
x: crate::pfsys::vecu64_to_field_montgomery::<Fq>(&val.x),
y: crate::pfsys::vecu64_to_field_montgomery::<Fq>(&val.y),
z: crate::pfsys::vecu64_to_field_montgomery::<Fq>(&val.z),
x: crate::pfsys::string_to_field_montgomery::<Fq>(&val.x),
y: crate::pfsys::string_to_field_montgomery::<Fq>(&val.y),
z: crate::pfsys::string_to_field_montgomery::<Fq>(&val.z),
}
}
}
@@ -108,8 +108,8 @@ pub struct PyG1Affine {
impl From<G1Affine> for PyG1Affine {
fn from(g1: G1Affine) -> Self {
PyG1Affine {
x: crate::pfsys::field_to_vecu64_montgomery::<Fq>(&g1.x),
y: crate::pfsys::field_to_vecu64_montgomery::<Fq>(&g1.y),
x: crate::pfsys::field_to_string_montgomery::<Fq>(&g1.x),
y: crate::pfsys::field_to_string_montgomery::<Fq>(&g1.y),
}
}
}
@@ -117,8 +117,8 @@ impl From<G1Affine> for PyG1Affine {
impl From<PyG1Affine> for G1Affine {
fn from(val: PyG1Affine) -> Self {
G1Affine {
x: crate::pfsys::vecu64_to_field_montgomery::<Fq>(&val.x),
y: crate::pfsys::vecu64_to_field_montgomery::<Fq>(&val.y),
x: crate::pfsys::string_to_field_montgomery::<Fq>(&val.x),
y: crate::pfsys::string_to_field_montgomery::<Fq>(&val.y),
}
}
}
@@ -146,7 +146,7 @@ struct PyRunArgs {
#[pyo3(get, set)]
pub scale_rebase_multiplier: u32,
#[pyo3(get, set)]
pub lookup_range: (i128, i128),
pub lookup_range: crate::circuit::table::Range,
#[pyo3(get, set)]
pub logrows: u32,
#[pyo3(get, set)]
@@ -159,6 +159,10 @@ struct PyRunArgs {
pub param_visibility: Visibility,
#[pyo3(get, set)]
pub variables: Vec<(String, usize)>,
#[pyo3(get, set)]
pub div_rebasing: bool,
#[pyo3(get, set)]
pub check_mode: CheckMode,
}
/// default instantiation of PyRunArgs
@@ -185,6 +189,8 @@ impl From<PyRunArgs> for RunArgs {
output_visibility: py_run_args.output_visibility,
param_visibility: py_run_args.param_visibility,
variables: py_run_args.variables,
div_rebasing: py_run_args.div_rebasing,
check_mode: py_run_args.check_mode,
}
}
}
@@ -203,6 +209,8 @@ impl Into<PyRunArgs> for RunArgs {
output_visibility: self.output_visibility,
param_visibility: self.param_visibility,
variables: self.variables,
div_rebasing: self.div_rebasing,
check_mode: self.check_mode,
}
}
}
@@ -211,10 +219,10 @@ impl Into<PyRunArgs> for RunArgs {
#[pyfunction(signature = (
array,
))]
fn vecu64_to_felt(array: PyFelt) -> PyResult<String> {
fn string_to_felt(array: PyFelt) -> PyResult<String> {
Ok(format!(
"{:?}",
crate::pfsys::vecu64_to_field_montgomery::<Fr>(&array)
crate::pfsys::string_to_field_montgomery::<Fr>(&array)
))
}
@@ -222,8 +230,8 @@ fn vecu64_to_felt(array: PyFelt) -> PyResult<String> {
#[pyfunction(signature = (
array,
))]
fn vecu64_to_int(array: PyFelt) -> PyResult<i128> {
let felt = crate::pfsys::vecu64_to_field_montgomery::<Fr>(&array);
fn string_to_int(array: PyFelt) -> PyResult<i128> {
let felt = crate::pfsys::string_to_field_montgomery::<Fr>(&array);
let int_rep = felt_to_i128(felt);
Ok(int_rep)
}
@@ -233,8 +241,8 @@ fn vecu64_to_int(array: PyFelt) -> PyResult<i128> {
array,
scale
))]
fn vecu64_to_float(array: PyFelt, scale: crate::Scale) -> PyResult<f64> {
let felt = crate::pfsys::vecu64_to_field_montgomery::<Fr>(&array);
fn string_to_float(array: PyFelt, scale: crate::Scale) -> PyResult<f64> {
let felt = crate::pfsys::string_to_field_montgomery::<Fr>(&array);
let int_rep = felt_to_i128(felt);
let multiplier = scale_to_multiplier(scale);
let float_rep = int_rep as f64 / multiplier;
@@ -246,11 +254,11 @@ fn vecu64_to_float(array: PyFelt, scale: crate::Scale) -> PyResult<f64> {
input,
scale
))]
fn float_to_vecu64(input: f64, scale: crate::Scale) -> PyResult<PyFelt> {
fn float_to_string(input: f64, scale: crate::Scale) -> PyResult<PyFelt> {
let int_rep = quantize_float(&input, 0.0, scale)
.map_err(|_| PyIOError::new_err("Failed to quantize input"))?;
let felt = i128_to_felt(int_rep);
Ok(crate::pfsys::field_to_vecu64_montgomery::<Fr>(&felt))
Ok(crate::pfsys::field_to_string_montgomery::<Fr>(&felt))
}
/// Converts a buffer to vector of 4 u64s representing a fixed point field element
@@ -318,7 +326,7 @@ fn buffer_to_felts(buffer: Vec<u8>) -> PyResult<Vec<String>> {
fn poseidon_hash(message: Vec<PyFelt>) -> PyResult<Vec<PyFelt>> {
let message: Vec<Fr> = message
.iter()
.map(crate::pfsys::vecu64_to_field_montgomery::<Fr>)
.map(crate::pfsys::string_to_field_montgomery::<Fr>)
.collect::<Vec<_>>();
let output =
@@ -329,7 +337,7 @@ fn poseidon_hash(message: Vec<PyFelt>) -> PyResult<Vec<PyFelt>> {
let hash = output[0]
.iter()
.map(crate::pfsys::field_to_vecu64_montgomery::<Fr>)
.map(crate::pfsys::field_to_string_montgomery::<Fr>)
.collect::<Vec<_>>();
Ok(hash)
}
@@ -337,8 +345,8 @@ fn poseidon_hash(message: Vec<PyFelt>) -> PyResult<Vec<PyFelt>> {
/// Generate a kzg commitment.
#[pyfunction(signature = (
message,
vk_path,
settings_path,
vk_path=PathBuf::from(DEFAULT_VK),
settings_path=PathBuf::from(DEFAULT_SETTINGS),
srs_path=None
))]
fn kzg_commit(
@@ -349,7 +357,7 @@ fn kzg_commit(
) -> PyResult<Vec<PyG1Affine>> {
let message: Vec<Fr> = message
.iter()
.map(crate::pfsys::vecu64_to_field_montgomery::<Fr>)
.map(crate::pfsys::string_to_field_montgomery::<Fr>)
.collect::<Vec<_>>();
let settings = GraphSettings::load(&settings_path)
@@ -387,9 +395,9 @@ fn swap_proof_commitments(proof_path: PathBuf, witness_path: PathBuf) -> PyResul
/// Generates a vk from a pk for a model circuit and saves it to a file
#[pyfunction(signature = (
path_to_pk,
circuit_settings_path,
vk_output_path
path_to_pk=PathBuf::from(DEFAULT_PK),
circuit_settings_path=PathBuf::from(DEFAULT_SETTINGS),
vk_output_path=PathBuf::from(DEFAULT_VK),
))]
fn gen_vk_from_pk_single(
path_to_pk: PathBuf,
@@ -413,8 +421,8 @@ fn gen_vk_from_pk_single(
/// Generates a vk from a pk for an aggregate circuit and saves it to a file
#[pyfunction(signature = (
path_to_pk,
vk_output_path
path_to_pk=PathBuf::from(DEFAULT_PK_AGGREGATED),
vk_output_path=PathBuf::from(DEFAULT_VK_AGGREGATED),
))]
fn gen_vk_from_pk_aggr(path_to_pk: PathBuf, vk_output_path: PathBuf) -> PyResult<bool> {
let pk = load_pk::<KZGCommitmentScheme<Bn256>, Fr, AggregationCircuit>(path_to_pk, ())
@@ -511,7 +519,9 @@ fn gen_settings(
target = CalibrationTarget::default(), // default is "resources
lookup_safety_margin = DEFAULT_LOOKUP_SAFETY_MARGIN.parse().unwrap(),
scales = None,
scale_rebase_multiplier = DEFAULT_SCALE_REBASE_MULTIPLIERS.split(",").map(|x| x.parse().unwrap()).collect(),
max_logrows = None,
div_rebasing = None,
))]
fn calibrate_settings(
data: PathBuf,
@@ -520,7 +530,9 @@ fn calibrate_settings(
target: CalibrationTarget,
lookup_safety_margin: i128,
scales: Option<Vec<crate::Scale>>,
scale_rebase_multiplier: Vec<u32>,
max_logrows: Option<u32>,
div_rebasing: Option<bool>,
) -> Result<bool, PyErr> {
crate::execute::calibrate(
model,
@@ -529,6 +541,8 @@ fn calibrate_settings(
target,
lookup_safety_margin,
scales,
scale_rebase_multiplier,
div_rebasing,
max_logrows,
)
.map_err(|e| {
@@ -543,7 +557,7 @@ fn calibrate_settings(
#[pyfunction(signature = (
data=PathBuf::from(DEFAULT_DATA),
model=PathBuf::from(DEFAULT_MODEL),
output=None,
output=PathBuf::from(DEFAULT_WITNESS),
vk_path=None,
srs_path=None,
))]
@@ -604,7 +618,8 @@ fn mock_aggregate(
vk_path=PathBuf::from(DEFAULT_VK),
pk_path=PathBuf::from(DEFAULT_PK),
srs_path=None,
witness_path = None
witness_path = None,
compress_selectors=DEFAULT_COMPRESS_SELECTORS.parse().unwrap(),
))]
fn setup(
model: PathBuf,
@@ -612,8 +627,17 @@ fn setup(
pk_path: PathBuf,
srs_path: Option<PathBuf>,
witness_path: Option<PathBuf>,
compress_selectors: bool,
) -> Result<bool, PyErr> {
crate::execute::setup(model, srs_path, vk_path, pk_path, witness_path).map_err(|e| {
crate::execute::setup(
model,
srs_path,
vk_path,
pk_path,
witness_path,
compress_selectors,
)
.map_err(|e| {
let err_str = format!("Failed to run setup: {}", e);
PyRuntimeError::new_err(err_str)
})?;
@@ -682,7 +706,8 @@ fn verify(
pk_path=PathBuf::from(DEFAULT_PK_AGGREGATED),
logrows=DEFAULT_AGGREGATED_LOGROWS.parse().unwrap(),
split_proofs = false,
srs_path = None
srs_path = None,
compress_selectors=DEFAULT_COMPRESS_SELECTORS.parse().unwrap(),
))]
fn setup_aggregate(
sample_snarks: Vec<PathBuf>,
@@ -691,6 +716,7 @@ fn setup_aggregate(
logrows: u32,
split_proofs: bool,
srs_path: Option<PathBuf>,
compress_selectors: bool,
) -> Result<bool, PyErr> {
crate::execute::setup_aggregate(
sample_snarks,
@@ -699,6 +725,7 @@ fn setup_aggregate(
srs_path,
logrows,
split_proofs,
compress_selectors,
)
.map_err(|e| {
let err_str = format!("Failed to setup aggregate: {}", e);
@@ -794,6 +821,7 @@ fn verify_aggr(
sol_code_path=PathBuf::from(DEFAULT_SOL_CODE),
abi_path=PathBuf::from(DEFAULT_VERIFIER_ABI),
srs_path=None,
render_vk_seperately = DEFAULT_RENDER_VK_SEPERATELY.parse().unwrap(),
))]
fn create_evm_verifier(
vk_path: PathBuf,
@@ -801,12 +829,20 @@ fn create_evm_verifier(
sol_code_path: PathBuf,
abi_path: PathBuf,
srs_path: Option<PathBuf>,
render_vk_seperately: bool,
) -> Result<bool, PyErr> {
crate::execute::create_evm_verifier(vk_path, srs_path, settings_path, sol_code_path, abi_path)
.map_err(|e| {
let err_str = format!("Failed to run create_evm_verifier: {}", e);
PyRuntimeError::new_err(err_str)
})?;
crate::execute::create_evm_verifier(
vk_path,
srs_path,
settings_path,
sol_code_path,
abi_path,
render_vk_seperately,
)
.map_err(|e| {
let err_str = format!("Failed to run create_evm_verifier: {}", e);
PyRuntimeError::new_err(err_str)
})?;
Ok(true)
}
@@ -872,7 +908,7 @@ fn setup_test_evm_witness(
sol_code_path=PathBuf::from(DEFAULT_SOL_CODE),
rpc_url=None,
optimizer_runs=DEFAULT_OPTIMIZER_RUNS.parse().unwrap(),
private_key=None
private_key=None,
))]
fn deploy_evm(
addr_path: PathBuf,
@@ -889,6 +925,39 @@ fn deploy_evm(
addr_path,
optimizer_runs,
private_key,
"Halo2Verifier",
))
.map_err(|e| {
let err_str = format!("Failed to run deploy_evm: {}", e);
PyRuntimeError::new_err(err_str)
})?;
Ok(true)
}
#[pyfunction(signature = (
addr_path,
sol_code_path=PathBuf::from(DEFAULT_VK_SOL),
rpc_url=None,
optimizer_runs=DEFAULT_OPTIMIZER_RUNS.parse().unwrap(),
private_key=None,
))]
fn deploy_vk_evm(
addr_path: PathBuf,
sol_code_path: PathBuf,
rpc_url: Option<String>,
optimizer_runs: usize,
private_key: Option<String>,
) -> Result<bool, PyErr> {
Runtime::new()
.unwrap()
.block_on(crate::execute::deploy_evm(
sol_code_path,
rpc_url,
addr_path,
optimizer_runs,
private_key,
"Halo2VerifyingKey",
))
.map_err(|e| {
let err_str = format!("Failed to run deploy_evm: {}", e);
@@ -940,12 +1009,14 @@ fn deploy_da_evm(
proof_path=PathBuf::from(DEFAULT_PROOF),
rpc_url=None,
addr_da = None,
addr_vk = None,
))]
fn verify_evm(
addr_verifier: &str,
proof_path: PathBuf,
rpc_url: Option<String>,
addr_da: Option<&str>,
addr_vk: Option<&str>,
) -> Result<bool, PyErr> {
let addr_verifier = H160::from_str(addr_verifier).map_err(|e| {
let err_str = format!("address is invalid: {}", e);
@@ -960,6 +1031,15 @@ fn verify_evm(
} else {
None
};
let addr_vk = if let Some(addr_vk) = addr_vk {
let addr_vk = H160::from_str(addr_vk).map_err(|e| {
let err_str = format!("address is invalid: {}", e);
PyRuntimeError::new_err(err_str)
})?;
Some(addr_vk)
} else {
None
};
Runtime::new()
.unwrap()
@@ -968,6 +1048,7 @@ fn verify_evm(
addr_verifier,
rpc_url,
addr_da,
addr_vk,
))
.map_err(|e| {
let err_str = format!("Failed to run verify_evm: {}", e);
@@ -985,6 +1066,7 @@ fn verify_evm(
abi_path=PathBuf::from(DEFAULT_VERIFIER_ABI),
logrows=DEFAULT_AGGREGATED_LOGROWS.parse().unwrap(),
srs_path=None,
render_vk_seperately = DEFAULT_RENDER_VK_SEPERATELY.parse().unwrap(),
))]
fn create_evm_verifier_aggr(
aggregation_settings: Vec<PathBuf>,
@@ -993,6 +1075,7 @@ fn create_evm_verifier_aggr(
abi_path: PathBuf,
logrows: u32,
srs_path: Option<PathBuf>,
render_vk_seperately: bool,
) -> Result<bool, PyErr> {
crate::execute::create_evm_aggregate_verifier(
vk_path,
@@ -1001,6 +1084,7 @@ fn create_evm_verifier_aggr(
abi_path,
aggregation_settings,
logrows,
render_vk_seperately,
)
.map_err(|e| {
let err_str = format!("Failed to run create_evm_verifier_aggr: {}", e);
@@ -1022,19 +1106,18 @@ fn print_proof_hex(proof_path: PathBuf) -> Result<String, PyErr> {
// Python Module
#[pymodule]
fn ezkl(_py: Python<'_>, m: &PyModule) -> PyResult<()> {
// NOTE: DeployVerifierEVM and SendProofEVM will be implemented in python in pyezkl
pyo3_log::init();
m.add_class::<PyRunArgs>()?;
m.add_class::<PyG1Affine>()?;
m.add_class::<PyG1>()?;
m.add_class::<PyTestDataSource>()?;
m.add_function(wrap_pyfunction!(vecu64_to_felt, m)?)?;
m.add_function(wrap_pyfunction!(vecu64_to_int, m)?)?;
m.add_function(wrap_pyfunction!(vecu64_to_float, m)?)?;
m.add_function(wrap_pyfunction!(string_to_felt, m)?)?;
m.add_function(wrap_pyfunction!(string_to_int, m)?)?;
m.add_function(wrap_pyfunction!(string_to_float, m)?)?;
m.add_function(wrap_pyfunction!(kzg_commit, m)?)?;
m.add_function(wrap_pyfunction!(swap_proof_commitments, m)?)?;
m.add_function(wrap_pyfunction!(poseidon_hash, m)?)?;
m.add_function(wrap_pyfunction!(float_to_vecu64, m)?)?;
m.add_function(wrap_pyfunction!(float_to_string, m)?)?;
m.add_function(wrap_pyfunction!(buffer_to_felts, m)?)?;
m.add_function(wrap_pyfunction!(gen_vk_from_pk_aggr, m)?)?;
m.add_function(wrap_pyfunction!(gen_vk_from_pk_single, m)?)?;
@@ -1055,6 +1138,7 @@ fn ezkl(_py: Python<'_>, m: &PyModule) -> PyResult<()> {
m.add_function(wrap_pyfunction!(verify_aggr, m)?)?;
m.add_function(wrap_pyfunction!(create_evm_verifier, m)?)?;
m.add_function(wrap_pyfunction!(deploy_evm, m)?)?;
m.add_function(wrap_pyfunction!(deploy_vk_evm, m)?)?;
m.add_function(wrap_pyfunction!(deploy_da_evm, m)?)?;
m.add_function(wrap_pyfunction!(verify_evm, m)?)?;
m.add_function(wrap_pyfunction!(print_proof_hex, m)?)?;

View File

@@ -30,11 +30,11 @@ use halo2_proofs::{
poly::Rotation,
};
use itertools::Itertools;
use std::cmp::max;
use std::error::Error;
use std::fmt::Debug;
use std::iter::Iterator;
use std::ops::{Add, Deref, DerefMut, Div, Mul, Neg, Range, Sub};
use std::{cmp::max, ops::Rem};
use thiserror::Error;
/// A wrapper for tensor related errors.
#[derive(Debug, Error)]
@@ -1452,6 +1452,43 @@ impl<T: TensorType + Div<Output = T> + std::marker::Send + std::marker::Sync> Di
}
}
// implement remainder
impl<T: TensorType + Rem<Output = T> + std::marker::Send + std::marker::Sync> Rem for Tensor<T> {
type Output = Result<Tensor<T>, TensorError>;
/// Elementwise remainder of a tensor with another tensor.
/// # Arguments
/// * `self` - Tensor
/// * `rhs` - Tensor
/// # Examples
/// ```
/// use ezkl::tensor::Tensor;
/// use std::ops::Rem;
/// let x = Tensor::<i32>::new(
/// Some(&[4, 1, 4, 1, 1, 4]),
/// &[2, 3],
/// ).unwrap();
/// let y = Tensor::<i32>::new(
/// Some(&[2, 1, 2, 1, 1, 1]),
/// &[2, 3],
/// ).unwrap();
/// let result = x.rem(y).unwrap();
/// let expected = Tensor::<i32>::new(Some(&[0, 0, 0, 0, 0, 0]), &[2, 3]).unwrap();
/// assert_eq!(result, expected);
/// ```
fn rem(self, rhs: Self) -> Self::Output {
let broadcasted_shape = get_broadcasted_shape(self.dims(), rhs.dims()).unwrap();
let mut lhs = self.expand(&broadcasted_shape).unwrap();
let rhs = rhs.expand(&broadcasted_shape).unwrap();
lhs.par_iter_mut().zip(rhs).for_each(|(o, r)| {
*o = o.clone() % r;
});
Ok(lhs)
}
}
/// Returns the broadcasted shape of two tensors
/// ```
/// use ezkl::tensor::get_broadcasted_shape;

View File

@@ -950,8 +950,7 @@ pub fn neg<T: TensorType + Neg<Output = T> + std::marker::Send + std::marker::Sy
/// Elementwise multiplies multiple tensors.
/// # Arguments
///
/// * `a` - Tensor
/// * `b` - Tensor
/// * `t` - Tensors
/// # Examples
/// ```
/// use ezkl::tensor::Tensor;
@@ -993,6 +992,45 @@ pub fn mult<T: TensorType + Mul<Output = T> + std::marker::Send + std::marker::S
Ok(output)
}
/// Divides multiple tensors.
/// # Arguments
/// * `t` - Tensors
/// # Examples
/// ```
/// use ezkl::tensor::Tensor;
/// use ezkl::tensor::ops::div;
/// let x = Tensor::<i128>::new(
/// Some(&[2, 1, 2, 1, 1, 1]),
/// &[2, 3],
/// ).unwrap();
/// let k = Tensor::<i128>::new(
/// Some(&[2, 3, 2, 1, 1, 1]),
/// &[2, 3],
/// ).unwrap();
/// let result = div(&[x, k]).unwrap();
/// let expected = Tensor::<i128>::new(Some(&[1, 0, 1, 1, 1, 1]), &[2, 3]).unwrap();
/// assert_eq!(result, expected);
/// ```
pub fn div<
T: TensorType
+ Div<Output = T>
+ Mul<Output = T>
+ From<u64>
+ std::marker::Send
+ std::marker::Sync,
>(
t: &[Tensor<T>],
) -> Result<Tensor<T>, TensorError> {
// calculate value of output
let mut output: Tensor<T> = t[0].clone();
for e in t[1..].iter() {
output = (output / e.clone())?;
}
Ok(output)
}
/// Rescale a tensor with a const integer (similar to const_mult).
/// # Arguments
///

View File

@@ -72,7 +72,7 @@ pub fn encodeVerifierCalldata(
/// Converts 4 u64s to a field element
#[wasm_bindgen]
#[allow(non_snake_case)]
pub fn vecU64ToFelt(array: wasm_bindgen::Clamped<Vec<u8>>) -> Result<String, JsError> {
pub fn stringToFelt(array: wasm_bindgen::Clamped<Vec<u8>>) -> Result<String, JsError> {
let felt: Fr = serde_json::from_slice(&array[..])
.map_err(|e| JsError::new(&format!("Failed to deserialize field element: {}", e)))?;
Ok(format!("{:?}", felt))
@@ -81,7 +81,7 @@ pub fn vecU64ToFelt(array: wasm_bindgen::Clamped<Vec<u8>>) -> Result<String, JsE
/// Converts 4 u64s representing a field element directly to an integer
#[wasm_bindgen]
#[allow(non_snake_case)]
pub fn vecU64ToInt(
pub fn stringToInt(
array: wasm_bindgen::Clamped<Vec<u8>>,
) -> Result<wasm_bindgen::Clamped<Vec<u8>>, JsError> {
let felt: Fr = serde_json::from_slice(&array[..])
@@ -95,7 +95,7 @@ pub fn vecU64ToInt(
/// Converts 4 u64s representing a field element directly to a (rescaled from fixed point scaling) floating point
#[wasm_bindgen]
#[allow(non_snake_case)]
pub fn vecU64ToFloat(
pub fn stringToFloat(
array: wasm_bindgen::Clamped<Vec<u8>>,
scale: crate::Scale,
) -> Result<f64, JsError> {
@@ -109,23 +109,23 @@ pub fn vecU64ToFloat(
/// Converts a floating point element to 4 u64s representing a fixed point field element
#[wasm_bindgen]
#[allow(non_snake_case)]
pub fn floatToVecU64(
pub fn floatTostring(
input: f64,
scale: crate::Scale,
) -> Result<wasm_bindgen::Clamped<Vec<u8>>, JsError> {
let int_rep =
quantize_float(&input, 0.0, scale).map_err(|e| JsError::new(&format!("{}", e)))?;
let felt = i128_to_felt(int_rep);
let vec = crate::pfsys::field_to_vecu64_montgomery::<halo2curves::bn256::Fr>(&felt);
let vec = crate::pfsys::field_to_string_montgomery::<halo2curves::bn256::Fr>(&felt);
Ok(wasm_bindgen::Clamped(serde_json::to_vec(&vec).map_err(
|e| JsError::new(&format!("Failed to serialize vecu64_montgomery{}", e)),
|e| JsError::new(&format!("Failed to serialize string_montgomery{}", e)),
)?))
}
/// Converts a buffer to vector of 4 u64s representing a fixed point field element
#[wasm_bindgen]
#[allow(non_snake_case)]
pub fn bufferToVecOfVecU64(
pub fn bufferToVecOfstring(
buffer: wasm_bindgen::Clamped<Vec<u8>>,
) -> Result<wasm_bindgen::Clamped<Vec<u8>>, JsError> {
// Convert the buffer to a slice
@@ -224,6 +224,7 @@ pub fn genWitness(
pub fn genVk(
compiled_circuit: wasm_bindgen::Clamped<Vec<u8>>,
params_ser: wasm_bindgen::Clamped<Vec<u8>>,
compress_selectors: bool,
) -> Result<Vec<u8>, JsError> {
// Read in kzg params
let mut reader = std::io::BufReader::new(&params_ser[..]);
@@ -235,9 +236,13 @@ pub fn genVk(
.map_err(|e| JsError::new(&format!("Failed to deserialize compiled model: {}", e)))?;
// Create verifying key
let vk = create_vk_wasm::<KZGCommitmentScheme<Bn256>, Fr, GraphCircuit>(&circuit, &params)
.map_err(Box::<dyn std::error::Error>::from)
.map_err(|e| JsError::new(&format!("Failed to create verifying key: {}", e)))?;
let vk = create_vk_wasm::<KZGCommitmentScheme<Bn256>, Fr, GraphCircuit>(
&circuit,
&params,
compress_selectors,
)
.map_err(Box::<dyn std::error::Error>::from)
.map_err(|e| JsError::new(&format!("Failed to create verifying key: {}", e)))?;
let mut serialized_vk = Vec::new();
vk.write(&mut serialized_vk, halo2_proofs::SerdeFormat::RawBytes)
@@ -497,6 +502,7 @@ pub fn srsValidation(srs: wasm_bindgen::Clamped<Vec<u8>>) -> Result<bool, JsErro
pub fn create_vk_wasm<Scheme: CommitmentScheme, F: PrimeField + TensorType, C: Circuit<F>>(
circuit: &C,
params: &'_ Scheme::ParamsProver,
compress_selectors: bool,
) -> Result<VerifyingKey<Scheme::Curve>, halo2_proofs::plonk::Error>
where
C: Circuit<Scheme::Scalar>,
@@ -506,7 +512,7 @@ where
let empty_circuit = <C as Circuit<F>>::without_witnesses(circuit);
// Initialize the verifying key
let vk = keygen_vk(params, &empty_circuit)?;
let vk = keygen_vk_custom(params, &empty_circuit, compress_selectors)?;
Ok(vk)
}
/// Creates a [ProvingKey] from a [VerifyingKey] for a [GraphCircuit] (`circuit`) with specific [CommitmentScheme] parameters (`params`) for the WASM target

View File

@@ -170,9 +170,9 @@ mod native_tests {
}
}
const PF_FAILURE: &str = "examples/test_failure.proof";
const PF_FAILURE: &str = "examples/test_failure_proof.json";
const PF_FAILURE_AGGR: &str = "examples/test_failure_aggr.proof";
const PF_FAILURE_AGGR: &str = "examples/test_failure_aggr_proof.json";
const LARGE_TESTS: [&str; 5] = [
"self_attention",
@@ -360,7 +360,7 @@ mod native_tests {
#[cfg(feature = "icicle")]
const TESTS_AGGR: [&str; 3] = ["1l_mlp", "1l_flatten", "1l_average"];
const TESTS_EVM: [&str; 21] = [
const TESTS_EVM: [&str; 23] = [
"1l_mlp",
"1l_flatten",
"1l_average",
@@ -376,12 +376,14 @@ mod native_tests {
"1l_tanh",
"2l_relu_sigmoid_small",
"2l_relu_small",
"2l_relu_fc",
"min",
"max",
"1l_max_pool",
"idolmodel",
"1l_identity",
"lstm",
"rnn",
"quantize_dequantize",
];
const TESTS_EVM_AGGR: [&str; 18] = [
@@ -510,13 +512,23 @@ mod native_tests {
test_dir.close().unwrap();
}
#(#[test_case(TESTS[N])])*
fn accuracy_measurement_div_rebase_(test: &str) {
crate::native_tests::init_binary();
crate::native_tests::setup_py_env();
let test_dir = TempDir::new(test).unwrap();
let path = test_dir.path().to_str().unwrap(); crate::native_tests::mv_test_(path, test);
accuracy_measurement(path, test.to_string(), "private", "private", "public", 1, "accuracy", 2.6, true);
test_dir.close().unwrap();
}
#(#[test_case(TESTS[N])])*
fn accuracy_measurement_public_outputs_(test: &str) {
crate::native_tests::init_binary();
crate::native_tests::setup_py_env();
let test_dir = TempDir::new(test).unwrap();
let path = test_dir.path().to_str().unwrap(); crate::native_tests::mv_test_(path, test);
accuracy_measurement(path, test.to_string(), "private", "private", "public", 1, "accuracy", 2.6);
accuracy_measurement(path, test.to_string(), "private", "private", "public", 1, "accuracy", 2.6, false);
test_dir.close().unwrap();
}
@@ -526,7 +538,7 @@ mod native_tests {
crate::native_tests::setup_py_env();
let test_dir = TempDir::new(test).unwrap();
let path = test_dir.path().to_str().unwrap(); crate::native_tests::mv_test_(path, test);
accuracy_measurement(path, test.to_string(), "private", "fixed", "private", 1, "accuracy", 2.6);
accuracy_measurement(path, test.to_string(), "private", "fixed", "private", 1, "accuracy", 2.6 , false);
test_dir.close().unwrap();
}
@@ -536,7 +548,7 @@ mod native_tests {
crate::native_tests::setup_py_env();
let test_dir = TempDir::new(test).unwrap();
let path = test_dir.path().to_str().unwrap(); crate::native_tests::mv_test_(path, test);
accuracy_measurement(path, test.to_string(), "public", "private", "private", 1, "accuracy", 2.6);
accuracy_measurement(path, test.to_string(), "public", "private", "private", 1, "accuracy", 2.6, false);
test_dir.close().unwrap();
}
@@ -547,7 +559,7 @@ mod native_tests {
crate::native_tests::setup_py_env();
let test_dir = TempDir::new(test).unwrap();
let path = test_dir.path().to_str().unwrap(); crate::native_tests::mv_test_(path, test);
accuracy_measurement(path, test.to_string(), "private", "private", "public", 1, "resources", 18.0);
accuracy_measurement(path, test.to_string(), "private", "private", "public", 1, "resources", 18.0, false);
test_dir.close().unwrap();
}
@@ -824,10 +836,10 @@ mod native_tests {
let test_dir = TempDir::new(test).unwrap();
env_logger::init();
let path = test_dir.path().to_str().unwrap(); crate::native_tests::mv_test_(path, test);
kzg_prove_and_verify(path, test.to_string(), "safe", "private", "private", "public", 1, Some(vec![0,1]), true, "single");
kzg_prove_and_verify(path, test.to_string(), "safe", "private", "private", "public", 1, None, true, "single");
#[cfg(not(feature = "icicle"))]
run_js_tests(path, test.to_string(), "testWasm");
test_dir.close().unwrap();
// test_dir.close().unwrap();
}
#(#[test_case(WASM_TESTS[N])])*
@@ -837,7 +849,7 @@ mod native_tests {
let test_dir = TempDir::new(test).unwrap();
env_logger::init();
let path = test_dir.path().to_str().unwrap(); crate::native_tests::mv_test_(path, test);
kzg_prove_and_verify(path, test.to_string(), "safe", "private", "fixed", "public", 1, Some(vec![0,1]), true, "single");
kzg_prove_and_verify(path, test.to_string(), "safe", "private", "fixed", "public", 1, None, true, "single");
#[cfg(not(feature = "icicle"))]
run_js_tests(path, test.to_string(), "testWasm");
test_dir.close().unwrap();
@@ -853,7 +865,7 @@ mod native_tests {
crate::native_tests::init_binary();
let test_dir = TempDir::new(test).unwrap();
let path = test_dir.path().to_str().unwrap(); crate::native_tests::mv_test_(path, test);
kzg_prove_and_verify(path, test.to_string(), "unsafe", "private", "fixed", "public", 1, Some(vec![0,6]), false, "single");
kzg_prove_and_verify(path, test.to_string(), "unsafe", "private", "fixed", "public", 1, None, false, "single");
test_dir.close().unwrap();
}
@@ -863,7 +875,7 @@ mod native_tests {
crate::native_tests::init_binary();
let test_dir = TempDir::new(test).unwrap();
let path = test_dir.path().to_str().unwrap(); crate::native_tests::mv_test_(path, test);
mock(path, test.to_string(), "private", "fixed", "public", 1, "resources", Some(vec![0,6]));
mock(path, test.to_string(), "private", "fixed", "public", 1, "resources", None);
test_dir.close().unwrap();
}
});
@@ -880,7 +892,8 @@ mod native_tests {
use crate::native_tests::TESTS_EVM_AGGR;
use test_case::test_case;
use crate::native_tests::kzg_evm_prove_and_verify;
use crate::native_tests::run_js_tests;
use crate::native_tests::kzg_evm_prove_and_verify_render_seperately;
use crate::native_tests::kzg_evm_on_chain_input_prove_and_verify;
use crate::native_tests::kzg_evm_aggr_prove_and_verify;
use crate::native_tests::kzg_fuzz;
@@ -955,7 +968,7 @@ mod native_tests {
});
seq!(N in 0..= 17 {
seq!(N in 0..=17 {
// these take a particularly long time to run
#(#[test_case(TESTS_EVM_AGGR[N])])*
#[ignore]
@@ -971,7 +984,7 @@ mod native_tests {
});
seq!(N in 0..= 20 {
seq!(N in 0..=22 {
#(#[test_case(TESTS_EVM[N])])*
fn kzg_evm_prove_and_verify_(test: &str) {
@@ -979,9 +992,22 @@ mod native_tests {
let test_dir = TempDir::new(test).unwrap();
let path = test_dir.path().to_str().unwrap(); crate::native_tests::mv_test_(path, test);
let _anvil_child = crate::native_tests::start_anvil(false, Hardfork::Latest);
kzg_evm_prove_and_verify(path, test.to_string(), "private", "private", "public");
#[cfg(not(feature = "icicle"))]
run_js_tests(path, test.to_string(), "testBrowserEvmVerify");
kzg_evm_prove_and_verify(2, path, test.to_string(), "private", "private", "public");
// #[cfg(not(feature = "icicle"))]
// run_js_tests(path, test.to_string(), "testBrowserEvmVerify");
test_dir.close().unwrap();
}
#(#[test_case(TESTS_EVM[N])])*
fn kzg_evm_prove_and_verify_render_seperately_(test: &str) {
crate::native_tests::init_binary();
let test_dir = TempDir::new(test).unwrap();
let path = test_dir.path().to_str().unwrap(); crate::native_tests::mv_test_(path, test);
let _anvil_child = crate::native_tests::start_anvil(false, Hardfork::Latest);
kzg_evm_prove_and_verify_render_seperately(2, path, test.to_string(), "private", "private", "public");
// #[cfg(not(feature = "icicle"))]
// run_js_tests(path, test.to_string(), "testBrowserEvmVerify");
test_dir.close().unwrap();
}
@@ -993,9 +1019,9 @@ mod native_tests {
let test_dir = TempDir::new(test).unwrap();
let path = test_dir.path().to_str().unwrap(); crate::native_tests::mv_test_(path, test);
let mut _anvil_child = crate::native_tests::start_anvil(false, Hardfork::Latest);
kzg_evm_prove_and_verify(path, test.to_string(), "hashed", "private", "private");
#[cfg(not(feature = "icicle"))]
run_js_tests(path, test.to_string(), "testBrowserEvmVerify");
kzg_evm_prove_and_verify(2, path, test.to_string(), "hashed", "private", "private");
// #[cfg(not(feature = "icicle"))]
// run_js_tests(path, test.to_string(), "testBrowserEvmVerify");
test_dir.close().unwrap();
}
@@ -1010,9 +1036,9 @@ mod native_tests {
let test_dir = TempDir::new(test).unwrap();
let path = test_dir.path().to_str().unwrap(); crate::native_tests::mv_test_(path, test);
let mut _anvil_child = crate::native_tests::start_anvil(false, hardfork);
kzg_evm_prove_and_verify(path, test.to_string(), "kzgcommit", "private", "public");
#[cfg(not(feature = "icicle"))]
run_js_tests(path, test.to_string(), "testBrowserEvmVerify");
kzg_evm_prove_and_verify(2, path, test.to_string(), "kzgcommit", "private", "public");
// #[cfg(not(feature = "icicle"))]
// run_js_tests(path, test.to_string(), "testBrowserEvmVerify");
test_dir.close().unwrap();
}
@@ -1023,9 +1049,9 @@ mod native_tests {
let test_dir = TempDir::new(test).unwrap();
let path = test_dir.path().to_str().unwrap(); crate::native_tests::mv_test_(path, test);
let _anvil_child = crate::native_tests::start_anvil(false, Hardfork::Latest);
kzg_evm_prove_and_verify(path, test.to_string(), "private", "hashed", "public");
#[cfg(not(feature = "icicle"))]
run_js_tests(path, test.to_string(), "testBrowserEvmVerify");
kzg_evm_prove_and_verify(2, path, test.to_string(), "private", "hashed", "public");
// #[cfg(not(feature = "icicle"))]
// run_js_tests(path, test.to_string(), "testBrowserEvmVerify");
test_dir.close().unwrap();
}
@@ -1036,9 +1062,9 @@ mod native_tests {
let test_dir = TempDir::new(test).unwrap();
let path = test_dir.path().to_str().unwrap(); crate::native_tests::mv_test_(path, test);
let _anvil_child = crate::native_tests::start_anvil(false, Hardfork::Latest);
kzg_evm_prove_and_verify(path, test.to_string(), "private", "private", "hashed");
#[cfg(not(feature = "icicle"))]
run_js_tests(path, test.to_string(), "testBrowserEvmVerify");
kzg_evm_prove_and_verify(2, path, test.to_string(), "private", "private", "hashed");
// #[cfg(not(feature = "icicle"))]
// run_js_tests(path, test.to_string(), "testBrowserEvmVerify");
test_dir.close().unwrap();
}
@@ -1049,9 +1075,9 @@ mod native_tests {
let test_dir = TempDir::new(test).unwrap();
let path = test_dir.path().to_str().unwrap(); crate::native_tests::mv_test_(path, test);
let _anvil_child = crate::native_tests::start_anvil(false, Hardfork::Latest);
kzg_evm_prove_and_verify(path, test.to_string(), "private", "kzgcommit", "public");
#[cfg(not(feature = "icicle"))]
run_js_tests(path, test.to_string(), "testBrowserEvmVerify");
kzg_evm_prove_and_verify(2, path, test.to_string(), "private", "kzgcommit", "public");
// #[cfg(not(feature = "icicle"))]
// run_js_tests(path, test.to_string(), "testBrowserEvmVerify");
test_dir.close().unwrap();
}
@@ -1062,9 +1088,9 @@ mod native_tests {
let test_dir = TempDir::new(test).unwrap();
let path = test_dir.path().to_str().unwrap(); crate::native_tests::mv_test_(path, test);
let _anvil_child = crate::native_tests::start_anvil(false, Hardfork::Latest);
kzg_evm_prove_and_verify(path, test.to_string(), "private", "private", "kzgcommit");
#[cfg(not(feature = "icicle"))]
run_js_tests(path, test.to_string(), "testBrowserEvmVerify");
kzg_evm_prove_and_verify(2, path, test.to_string(), "private", "private", "kzgcommit");
// #[cfg(not(feature = "icicle"))]
// run_js_tests(path, test.to_string(), "testBrowserEvmVerify");
test_dir.close().unwrap();
}
@@ -1074,9 +1100,9 @@ mod native_tests {
let test_dir = TempDir::new(test).unwrap();
let path = test_dir.path().to_str().unwrap(); crate::native_tests::mv_test_(path, test);
let _anvil_child = crate::native_tests::start_anvil(false, Hardfork::Latest);
kzg_evm_prove_and_verify(path, test.to_string(), "kzgcommit", "kzgcommit", "kzgcommit");
#[cfg(not(feature = "icicle"))]
run_js_tests(path, test.to_string(), "testBrowserEvmVerify");
kzg_evm_prove_and_verify(2, path, test.to_string(), "kzgcommit", "kzgcommit", "kzgcommit");
// #[cfg(not(feature = "icicle"))]
// run_js_tests(path, test.to_string(), "testBrowserEvmVerify");
test_dir.close().unwrap();
}
@@ -1257,6 +1283,7 @@ mod native_tests {
cal_target,
scales_to_use,
2,
false,
);
let status = Command::new(format!("{}/release/ezkl", *CARGO_TARGET_DIR))
@@ -1283,22 +1310,29 @@ mod native_tests {
cal_target: &str,
scales_to_use: Option<Vec<u32>>,
num_inner_columns: usize,
div_rebasing: bool,
) {
let mut args = vec![
"gen-settings".to_string(),
"-M".to_string(),
format!("{}/{}/network.onnx", test_dir, example_name),
format!(
"--settings-path={}/{}/settings.json",
test_dir, example_name
),
format!("--variables=batch_size={}", batch_size),
format!("--input-visibility={}", input_visibility),
format!("--param-visibility={}", param_visibility),
format!("--output-visibility={}", output_visibility),
format!("--num-inner-cols={}", num_inner_columns),
];
if div_rebasing {
args.push("--div-rebasing".to_string());
};
let status = Command::new(format!("{}/release/ezkl", *CARGO_TARGET_DIR))
.args([
"gen-settings",
"-M",
format!("{}/{}/network.onnx", test_dir, example_name).as_str(),
&format!(
"--settings-path={}/{}/settings.json",
test_dir, example_name
),
&format!("--variables=batch_size={}", batch_size),
&format!("--input-visibility={}", input_visibility),
&format!("--param-visibility={}", param_visibility),
&format!("--output-visibility={}", output_visibility),
&format!("--num-inner-cols={}", num_inner_columns),
])
.args(args)
.stdout(std::process::Stdio::null())
.status()
.expect("failed to execute process");
@@ -1376,6 +1410,7 @@ mod native_tests {
batch_size: usize,
cal_target: &str,
target_perc: f32,
div_rebasing: bool,
) {
gen_circuit_settings_and_witness(
test_dir,
@@ -1387,6 +1422,7 @@ mod native_tests {
cal_target,
None,
2,
div_rebasing,
);
println!(
@@ -1645,6 +1681,7 @@ mod native_tests {
target_str,
scales_to_use,
num_inner_columns,
false,
);
let settings_path = format!("{}/{}/settings.json", test_dir, example_name);
@@ -1721,6 +1758,7 @@ mod native_tests {
"resources",
None,
2,
false,
);
let status = Command::new(format!("{}/release/ezkl", *CARGO_TARGET_DIR))
@@ -1740,6 +1778,7 @@ mod native_tests {
// prove-serialize-verify, the usual full path
fn kzg_evm_prove_and_verify(
num_inner_columns: usize,
test_dir: &str,
example_name: String,
input_visibility: &str,
@@ -1755,7 +1794,7 @@ mod native_tests {
input_visibility,
param_visibility,
output_visibility,
2,
num_inner_columns,
None,
false,
"single",
@@ -1830,6 +1869,137 @@ mod native_tests {
assert!(!status.success());
}
// prove-serialize-verify, the usual full path
fn kzg_evm_prove_and_verify_render_seperately(
num_inner_columns: usize,
test_dir: &str,
example_name: String,
input_visibility: &str,
param_visibility: &str,
output_visibility: &str,
) {
let anvil_url = ANVIL_URL.as_str();
kzg_prove_and_verify(
test_dir,
example_name.clone(),
"safe",
input_visibility,
param_visibility,
output_visibility,
num_inner_columns,
None,
false,
"single",
);
let settings_path = format!("{}/{}/settings.json", test_dir, example_name);
init_params(settings_path.clone().into());
let vk_arg = format!("{}/{}/key.vk", test_dir, example_name);
let rpc_arg = format!("--rpc-url={}", anvil_url);
let addr_path_arg = format!("--addr-path={}/{}/addr.txt", test_dir, example_name);
let settings_arg = format!("--settings-path={}", settings_path);
let sol_arg = format!("--sol-code-path={}/{}/kzg.sol", test_dir, example_name);
// create the verifier
let args = vec![
"create-evm-verifier",
"--vk-path",
&vk_arg,
&settings_arg,
&sol_arg,
"--render-vk-seperately",
];
let status = Command::new(format!("{}/release/ezkl", *CARGO_TARGET_DIR))
.args(&args)
.status()
.expect("failed to execute process");
assert!(status.success());
let addr_path_arg_vk = format!("--addr-path={}/{}/addr_vk.txt", test_dir, example_name);
let sol_arg_vk = format!("--sol-code-path={}/{}/vk.sol", test_dir, example_name);
// create the verifier
let args = vec![
"create-evm-vk",
"--vk-path",
&vk_arg,
&settings_arg,
&sol_arg_vk,
];
let status = Command::new(format!("{}/release/ezkl", *CARGO_TARGET_DIR))
.args(&args)
.status()
.expect("failed to execute process");
assert!(status.success());
// deploy the verifier
let args = vec![
"deploy-evm-verifier",
rpc_arg.as_str(),
addr_path_arg.as_str(),
sol_arg.as_str(),
];
let status = Command::new(format!("{}/release/ezkl", *CARGO_TARGET_DIR))
.args(&args)
.status()
.expect("failed to execute process");
assert!(status.success());
// read in the address
let addr = std::fs::read_to_string(format!("{}/{}/addr.txt", test_dir, example_name))
.expect("failed to read address file");
let deployed_addr_arg = format!("--addr-verifier={}", addr);
// deploy the vk
let args = vec![
"deploy-evm-vk",
rpc_arg.as_str(),
addr_path_arg_vk.as_str(),
sol_arg_vk.as_str(),
];
let status = Command::new(format!("{}/release/ezkl", *CARGO_TARGET_DIR))
.args(&args)
.status()
.expect("failed to execute process");
assert!(status.success());
// read in the address
let addr_vk = std::fs::read_to_string(format!("{}/{}/addr_vk.txt", test_dir, example_name))
.expect("failed to read address file");
let deployed_addr_arg_vk = format!("--addr-vk={}", addr_vk);
// now verify the proof
let pf_arg = format!("{}/{}/proof.pf", test_dir, example_name);
let mut args = vec![
"verify-evm",
"--proof-path",
pf_arg.as_str(),
rpc_arg.as_str(),
deployed_addr_arg.as_str(),
deployed_addr_arg_vk.as_str(),
];
let status = Command::new(format!("{}/release/ezkl", *CARGO_TARGET_DIR))
.args(&args)
.status()
.expect("failed to execute process");
assert!(status.success());
// As sanity check, add example that should fail.
args[2] = PF_FAILURE;
let status = Command::new(format!("{}/release/ezkl", *CARGO_TARGET_DIR))
.args(args)
.status()
.expect("failed to execute process");
assert!(!status.success());
}
// run js browser evm verify tests for a given example
fn run_js_tests(test_dir: &str, example_name: String, js_test: &str) {
let status = Command::new("pnpm")
@@ -1864,6 +2034,7 @@ mod native_tests {
// we need the accuracy
Some(vec![7, 8]),
1,
false,
);
let model_path = format!("{}/{}/network.compiled", test_dir, example_name);

View File

@@ -12,7 +12,7 @@ def get_ezkl_output(witness_file, settings_file):
outputs = witness_output['outputs']
with open(settings_file) as f:
settings = json.load(f)
ezkl_outputs = [[ezkl.vecu64_to_float(
ezkl_outputs = [[ezkl.string_to_float(
outputs[i][j], settings['model_output_scales'][i]) for j in range(len(outputs[i]))] for i in range(len(outputs))]
return ezkl_outputs

View File

@@ -118,38 +118,38 @@ mod py_tests {
}
const TESTS: [&str; 32] = [
"proof_splitting.ipynb", // 0
"variance.ipynb",
"proof_splitting.ipynb",
"mnist_gan_proof_splitting.ipynb",
"mnist_gan.ipynb",
// "mnist_vae.ipynb",
"keras_simple_demo.ipynb",
"hashed_vis.ipynb",
"mnist_gan_proof_splitting.ipynb", // 4
"hashed_vis.ipynb", // 5
"simple_demo_all_public.ipynb",
"data_attest.ipynb",
"little_transformer.ipynb",
"simple_demo_aggregated_proofs.ipynb",
"ezkl_demo.ipynb",
"ezkl_demo.ipynb", // 10
"lstm.ipynb",
"set_membership.ipynb",
"decision_tree.ipynb",
"random_forest.ipynb",
"gradient_boosted_trees.ipynb",
"gradient_boosted_trees.ipynb", // 15
"xgboost.ipynb",
"lightgbm.ipynb",
"svm.ipynb",
"simple_demo_public_input_output.ipynb",
"simple_demo_public_network_output.ipynb",
"simple_demo_public_network_output.ipynb", // 20
"gcn.ipynb",
"linear_regression.ipynb",
"stacked_regression.ipynb",
"data_attest_hashed.ipynb",
"kzg_vis.ipynb",
"kzg_vis.ipynb", // 25
"kmeans.ipynb",
"solvency.ipynb",
"sklearn_mlp.ipynb",
"generalized_inverse.ipynb",
"mnist_classifier.ipynb",
"mnist_classifier.ipynb", // 30
"world_rotation.ipynb",
];

View File

@@ -56,9 +56,9 @@ def test_poseidon_hash():
Test for poseidon_hash
"""
message = [1.0, 2.0, 3.0, 4.0]
message = [ezkl.float_to_vecu64(x, 7) for x in message]
message = [ezkl.float_to_string(x, 7) for x in message]
res = ezkl.poseidon_hash(message)
assert ezkl.vecu64_to_felt(
assert ezkl.string_to_felt(
res[0]) == "0x0da7e5e5c8877242fa699f586baf770d731defd54f952d4adeb85047a0e32f45"
@@ -70,14 +70,14 @@ def test_field_serialization():
input = 890
scale = 7
felt = ezkl.float_to_vecu64(input, scale)
roundtrip_input = ezkl.vecu64_to_float(felt, scale)
felt = ezkl.float_to_string(input, scale)
roundtrip_input = ezkl.string_to_float(felt, scale)
assert input == roundtrip_input
input = -700
scale = 7
felt = ezkl.float_to_vecu64(input, scale)
roundtrip_input = ezkl.vecu64_to_float(felt, scale)
felt = ezkl.float_to_string(input, scale)
roundtrip_input = ezkl.string_to_float(felt, scale)
assert input == roundtrip_input

View File

@@ -8,10 +8,10 @@ mod wasm32 {
use ezkl::graph::GraphWitness;
use ezkl::pfsys;
use ezkl::wasm::{
bufferToVecOfVecU64, compiledCircuitValidation, encodeVerifierCalldata, genPk, genVk,
bufferToVecOfstring, compiledCircuitValidation, encodeVerifierCalldata, genPk, genVk,
genWitness, inputValidation, pkValidation, poseidonHash, printProofHex, proofValidation,
prove, settingsValidation, srsValidation, u8_array_to_u128_le, vecU64ToFelt, vecU64ToFloat,
vecU64ToInt, verify, vkValidation, witnessValidation,
prove, settingsValidation, srsValidation, stringToFelt, stringToFloat, stringToInt,
u8_array_to_u128_le, verify, vkValidation, witnessValidation,
};
use halo2_solidity_verifier::encode_calldata;
use halo2curves::bn256::{Fr, G1Affine};
@@ -26,7 +26,7 @@ mod wasm32 {
pub const NETWORK_COMPILED: &[u8] = include_bytes!("../tests/wasm/model.compiled");
pub const NETWORK: &[u8] = include_bytes!("../tests/wasm/network.onnx");
pub const INPUT: &[u8] = include_bytes!("../tests/wasm/input.json");
pub const PROOF: &[u8] = include_bytes!("../tests/wasm/test.proof");
pub const PROOF: &[u8] = include_bytes!("../tests/wasm/proof.json");
pub const SETTINGS: &[u8] = include_bytes!("../tests/wasm/settings.json");
pub const PK: &[u8] = include_bytes!("../tests/wasm/pk.key");
pub const VK: &[u8] = include_bytes!("../tests/wasm/vk.key");
@@ -78,19 +78,19 @@ mod wasm32 {
let serialized = serde_json::to_vec(&field_element).unwrap();
let clamped = wasm_bindgen::Clamped(serialized);
let scale = 2;
let floating_point = vecU64ToFloat(clamped.clone(), scale)
let floating_point = stringToFloat(clamped.clone(), scale)
.map_err(|_| "failed")
.unwrap();
assert_eq!(floating_point, (i as f64) / 4.0);
let integer: i128 = serde_json::from_slice(
&vecU64ToInt(clamped.clone()).map_err(|_| "failed").unwrap(),
&stringToInt(clamped.clone()).map_err(|_| "failed").unwrap(),
)
.unwrap();
assert_eq!(integer, i as i128);
let hex_string = format!("{:?}", field_element);
let returned_string = vecU64ToFelt(clamped).map_err(|_| "failed").unwrap();
let returned_string = stringToFelt(clamped).map_err(|_| "failed").unwrap();
assert_eq!(hex_string, returned_string);
}
}
@@ -101,7 +101,7 @@ mod wasm32 {
let mut buffer = string_high.clone().into_bytes();
let clamped = wasm_bindgen::Clamped(buffer.clone());
let field_elements_ser = bufferToVecOfVecU64(clamped).map_err(|_| "failed").unwrap();
let field_elements_ser = bufferToVecOfstring(clamped).map_err(|_| "failed").unwrap();
let field_elements: Vec<Fr> = serde_json::from_slice(&field_elements_ser[..]).unwrap();
@@ -118,7 +118,7 @@ mod wasm32 {
let buffer = string_sample.clone().into_bytes();
let clamped = wasm_bindgen::Clamped(buffer.clone());
let field_elements_ser = bufferToVecOfVecU64(clamped).map_err(|_| "failed").unwrap();
let field_elements_ser = bufferToVecOfstring(clamped).map_err(|_| "failed").unwrap();
let field_elements: Vec<Fr> = serde_json::from_slice(&field_elements_ser[..]).unwrap();
@@ -133,7 +133,7 @@ mod wasm32 {
let buffer = string_concat.into_bytes();
let clamped = wasm_bindgen::Clamped(buffer.clone());
let field_elements_ser = bufferToVecOfVecU64(clamped).map_err(|_| "failed").unwrap();
let field_elements_ser = bufferToVecOfstring(clamped).map_err(|_| "failed").unwrap();
let field_elements: Vec<Fr> = serde_json::from_slice(&field_elements_ser[..]).unwrap();
@@ -186,6 +186,7 @@ mod wasm32 {
let vk = genVk(
wasm_bindgen::Clamped(NETWORK_COMPILED.to_vec()),
wasm_bindgen::Clamped(SRS.to_vec()),
true,
)
.map_err(|_| "failed")
.unwrap();
@@ -206,6 +207,7 @@ mod wasm32 {
let vk = genVk(
wasm_bindgen::Clamped(NETWORK_COMPILED.to_vec()),
wasm_bindgen::Clamped(SRS.to_vec()),
true,
)
.map_err(|_| "failed")
.unwrap();
@@ -218,6 +220,7 @@ mod wasm32 {
let vk = genVk(
wasm_bindgen::Clamped(NETWORK_COMPILED.to_vec()),
wasm_bindgen::Clamped(SRS.to_vec()),
true,
)
.map_err(|_| "failed")
.unwrap();

Binary file not shown.

Binary file not shown.

File diff suppressed because one or more lines are too long

View File

@@ -1 +1,60 @@
{"run_args":{"tolerance":{"val":0.0,"scale":1.0},"input_scale":0,"param_scale":0,"scale_rebase_multiplier":10,"lookup_range":[-2,0],"logrows":6,"num_inner_cols":2,"variables":[["batch_size",1]],"input_visibility":"Private","output_visibility":"Public","param_visibility":"Private"},"num_rows":16,"total_assignments":32,"total_const_size":8,"model_instance_shapes":[[1,4]],"model_output_scales":[0],"model_input_scales":[0],"module_sizes":{"kzg":[],"poseidon":[0,[0]]},"required_lookups":["ReLU"],"check_mode":"UNSAFE","version":"0.0.0","num_blinding_factors":null,"timestamp":1702474230544}
{
"run_args": {
"tolerance": {
"val": 0.0,
"scale": 1.0
},
"input_scale": 0,
"param_scale": 0,
"scale_rebase_multiplier": 10,
"lookup_range": [
-2,
0
],
"logrows": 6,
"num_inner_cols": 2,
"variables": [
[
"batch_size",
1
]
],
"input_visibility": "Private",
"output_visibility": "Public",
"param_visibility": "Private",
"div_rebasing": false,
"check_mode": "UNSAFE"
},
"num_rows": 16,
"total_assignments": 32,
"total_const_size": 8,
"model_instance_shapes": [
[
1,
4
]
],
"model_output_scales": [
0
],
"model_input_scales": [
0
],
"module_sizes": {
"kzg": [],
"poseidon": [
0,
[
0
]
]
},
"required_lookups": [
"ReLU"
],
"required_range_checks": [],
"check_mode": "UNSAFE",
"version": "0.0.0",
"num_blinding_factors": null,
"timestamp": 1702474230544
}

File diff suppressed because one or more lines are too long

View File

@@ -1,7 +1,6 @@
import * as fs from 'fs/promises';
import * as fsSync from 'fs'
import JSONBig from 'json-bigint';
import { vecU64ToFelt } from '@ezkljs/engine/nodejs'
const solc = require('solc');
// import os module

Binary file not shown.

View File

@@ -1 +1 @@
{"inputs":[[[6425625360762666998,7924344314350639699,14762033076929465436,2023505479389396574],[12436184717236109307,3962172157175319849,7381016538464732718,1011752739694698287],[12436184717236109307,3962172157175319849,7381016538464732718,1011752739694698287]]],"pretty_elements":{"rescaled_inputs":[["2","1","1"]],"inputs":[["0x0000000000000000000000000000000000000000000000000000000000000002","0x0000000000000000000000000000000000000000000000000000000000000001","0x0000000000000000000000000000000000000000000000000000000000000001"]],"processed_inputs":[],"processed_params":[],"processed_outputs":[],"rescaled_outputs":[["0","0","0","0"]],"outputs":[["0x0000000000000000000000000000000000000000000000000000000000000000","0x0000000000000000000000000000000000000000000000000000000000000000","0x0000000000000000000000000000000000000000000000000000000000000000","0x0000000000000000000000000000000000000000000000000000000000000000"]]},"outputs":[[[0,0,0,0],[0,0,0,0],[0,0,0,0],[0,0,0,0]]],"processed_inputs":null,"processed_params":null,"processed_outputs":null,"max_lookup_inputs":0,"min_lookup_inputs":-1}
{"inputs":[["0200000000000000000000000000000000000000000000000000000000000000","0100000000000000000000000000000000000000000000000000000000000000","0100000000000000000000000000000000000000000000000000000000000000"]],"pretty_elements":{"rescaled_inputs":[["2","1","1"]],"inputs":[["0x0000000000000000000000000000000000000000000000000000000000000002","0x0000000000000000000000000000000000000000000000000000000000000001","0x0000000000000000000000000000000000000000000000000000000000000001"]],"processed_inputs":[],"processed_params":[],"processed_outputs":[],"rescaled_outputs":[["0","0","0","0"]],"outputs":[["0x0000000000000000000000000000000000000000000000000000000000000000","0x0000000000000000000000000000000000000000000000000000000000000000","0x0000000000000000000000000000000000000000000000000000000000000000","0x0000000000000000000000000000000000000000000000000000000000000000"]]},"outputs":[["0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000"]],"processed_inputs":null,"processed_params":null,"processed_outputs":null,"max_lookup_inputs":0,"min_lookup_inputs":-1}