149 Commits

Author SHA1 Message Date
Daniel Tehrani
4bf236a1a5 Merge pull request #45 from 0xisk/main
refactor: minor enhancements on the lib package
2023-12-05 14:12:43 -08:00
Daniel Tehrani
ede2b29cac fix: use tsconfig.build.json so vscode can annotate test files as well 2023-12-02 08:45:37 +09:00
0xisk
cb273002c0 refactor: destructure params, update package metadata, enhance file naming and structure. 2023-11-29 20:41:48 +01:00
Daniel Tehrani
a08876d1ff Merge pull request #44 from 0xisk/main
chore: refactor lib package exports to named for clarity
2023-11-25 13:42:03 +09:00
isk
8b0529f868 chore: refactor lib exports to named for clarity 2023-11-24 11:40:18 +01:00
Daniel Tehrani
3827e5de67 v2.3.1 2023-10-11 16:33:48 +09:00
Daniel Tehrani
53049c8cbe fix: this lead to the unintentional breaking change 2023-10-11 16:32:19 +09:00
Daniel Tehrani
2fab170a15 Merge pull request #41 from AtHeartEngineer/patch-1
Update package.json
2023-10-11 11:27:15 +09:00
Daniel Tehrani
82518aabc3 Remove unused snarkjs installations 2023-10-11 11:22:57 +09:00
Daniel Tehrani
e8c95a975b Update yarn.lock 2023-10-11 11:10:44 +09:00
AtHeartEngineer
4cf4b36efd Update package.json
Bump snarkjs version to get rid of vulnerability warning, even though it doesn't effect this project since spartan-ecdsa is only using the witness generation
2023-10-10 21:18:56 -04:00
Daniel Tehrani
63f534a23a v2.3.0 2023-10-05 19:24:51 +09:00
Daniel Tehrani
4d7c3c73df feat: allow specifying using a remote circuit for the prover 2023-10-05 19:13:59 +09:00
Daniel Tehrani
baedc727fb v2.2.0 2023-09-21 14:58:58 +09:00
Daniel Tehrani
eeb704a7d1 feat: allow specifying using the remote circuit 2023-09-21 14:53:35 +09:00
Dan Tehrani
5dae5e1aa4 Update README.md 2023-06-24 00:34:04 +09:00
Daniel Tehrani
3386b30d9b v2.1.4 2023-05-29 12:17:16 +02:00
Daniel Tehrani
19e1ddd4ef Add method verifyProof to Tree 2023-05-29 12:14:23 +02:00
Daniel Tehrani
85fc788204 Remove multi threading config (not doing multi threading at this moment) 2023-05-26 16:01:57 +02:00
Daniel Tehrani
5fa21ec9c3 Commit lock file 2023-05-12 16:17:33 +02:00
Daniel Tehrani
f05b40e7f3 v2.1.3 2023-05-12 13:36:30 +02:00
Daniel Tehrani
adb73907fd Fix WebAssembly instantiation 2023-05-12 13:34:38 +02:00
Daniel Tehrani
86338b7d4d v2.1.2 2023-05-05 13:28:48 +02:00
Daniel Tehrani
4b8acc430d feat: use lazy_static to load the constants 2023-05-05 12:49:40 +02:00
Daniel Tehrani
4d72a30b63 feat: only allow two inputs 2023-05-05 12:48:59 +02:00
Dan Tehrani
ebf2b5f6cc Update README.md 2023-04-30 11:02:21 +02:00
Daniel Tehrani
9da3bb96ab v2.1.1 2023-04-24 11:01:47 +02:00
Daniel Tehrani
84a54098b5 Fix the javascript wasm compile 2023-04-24 11:01:29 +02:00
Daniel Tehrani
b7e9c32e05 v2.1.0 2023-03-29 16:20:57 +09:00
Daniel Tehrani
bfef367bfb Merge branch 'main' of https://github.com/personaelabs/spartan-ecdsa 2023-03-29 13:59:14 +09:00
Dan Tehrani
feaad7e337 Attempt to fix failing build (3) 2023-03-29 13:55:49 +09:00
Daniel Tehrani
ef7e590464 Separate circuit bin file generator from spartan_wasm 2023-03-28 16:35:50 +09:00
Daniel Tehrani
3d0f6f5a01 Attempt to fix failing build (2) 2023-03-28 11:56:58 +09:00
Daniel Tehrani
885e92019e Attempt to fix failing build 2023-03-28 11:48:39 +09:00
Daniel Tehrani
70614aa862 Specify correct rust version 2023-03-28 11:40:30 +09:00
Daniel Tehrani
2061eacf8b Add Github Action workflow for deploying the npm package 2023-03-28 11:32:54 +09:00
Daniel Tehrani
4fa672024b Expose leaves and delete functions 2023-03-23 19:45:30 +09:00
Dan Tehrani
035689ac8f Merge pull request #31 from personaelabs/fix/apply-1e431e2
Fix/apply 1e431e2
2023-03-15 15:47:39 +09:00
Daniel Tehrani
1055716bf3 v2.0.0 2023-03-15 10:12:19 +09:00
Daniel Tehrani
a019e051f5 Apply commit 1e431e2 from microsoft/Spartan 2023-03-15 09:56:39 +09:00
Daniel Tehrani
40ddce1c01 Update wasm.js to the latest Spartan wasm compile 2023-03-15 09:54:41 +09:00
Daniel Tehrani
6bc45a95a1 Remove .gitignore in subdir since we only use the one in root 2023-03-15 09:54:15 +09:00
Daniel Tehrani
e0b9cad003 Add Spartan-secq to the workplace 2023-03-15 09:01:52 +09:00
Dan Tehrani
955f3a2344 Merge pull request #29 from personaelabs/lsankar/publish-circuits-npm
publish spartan-ecdsa-circuits on npm
2023-03-11 13:00:04 +09:00
Dan Tehrani
41b3a48a49 Merge pull request #30 from personaelabs/lsankar/fix-url
link to hosted blog post instead of localhost
2023-03-11 12:59:09 +09:00
lsankar4033
f40a2928b0 link to hosted blog post 2023-03-08 09:55:09 -08:00
lsankar4033
d5c3513286 publish spartan-ecdsa-circuits on npm 2023-03-05 20:17:04 -07:00
lsankar4033
62a666ccca add pragma version to poseidon_constants.circom 2023-03-05 12:31:18 -07:00
Dan Tehrani
6196feb45b Merge pull request #26 from personaelabs/lsankar/move-spartan-secq-in
Inline spartan-secq dependency
2023-02-24 15:00:16 +09:00
lsankar4033
b0996b6d1e remove github CI tests from spartan-secq 2023-02-23 07:06:04 -08:00
Daniel Tehrani
8da070d660 Remove .vscode from subdir since we have it in the root dir 2023-02-23 16:37:33 +09:00
lsankar4033
edb72871f6 update deps 2023-02-22 20:08:43 -08:00
lsankar4033
ac30f8e4dd import spartan-secq in 2023-02-22 19:48:39 -08:00
Vivek Bhupatiraju
a8532b4173 fixed some latex 2023-02-21 18:31:29 -08:00
Vivek Bhupatiraju
4f64f4e857 rewrote constraint breakdown for correctness 2023-02-21 18:30:14 -08:00
Vivek Bhupatiraju
4f9185b0be added constraint breakdown to show ECC operations aren't bottleneck 2023-02-20 13:36:21 -08:00
Lakshman Sankar
6d6f8306ff point to correct blog post url 2023-02-03 09:59:22 -08:00
Dan Tehrani
fea05fd139 Update README.md 2023-02-03 09:20:53 +09:00
Dan Tehrani
ea71962d40 Merge pull request #22 from personaelabs/test/ecc_circuits
Testing the ECC circuits
2023-02-02 15:39:07 +09:00
Daniel Tehrani
099c73edfd Merge branch 'main' into test/ecc_circuits 2023-02-02 15:33:18 +09:00
Daniel Tehrani
a8adbb1cc2 Missed these changes when fixing verification 2023-02-02 15:31:32 +09:00
Daniel Tehrani
d3a68a993b Merge branch 'main' into test/ecc_circuits 2023-02-02 15:27:42 +09:00
Daniel Tehrani
4891c26056 Small fix 2023-02-02 15:23:29 +09:00
Daniel Tehrani
49789bd291 v1.0.2 2023-02-02 15:13:11 +09:00
Dan Tehrani
65f9e3ea0d Merge pull request #23 from personaelabs/dan/fix/pub-input-verify
Dan/fix/pub input verify
2023-02-02 15:11:08 +09:00
Daniel Tehrani
d0471f5866 Rename file 2023-02-02 15:08:16 +09:00
Daniel Tehrani
994b1583df Update Readme 2023-02-02 15:07:31 +09:00
Daniel Tehrani
ed0993a0a5 Fix verification: Add public input validation 2023-02-02 15:06:39 +09:00
Vivek Bhupatiraju
3a14e75fd7 change added to commit 2023-02-02 00:02:32 -05:00
Vivek Bhupatiraju
e53a1dd8ec annotated circuits and tests with comments, reviewed code carefullyand it matches with the Zcash code 2023-02-01 22:30:08 -05:00
Daniel Tehrani
fcb816816e Correct comment 2023-02-02 10:56:42 +09:00
Daniel Tehrani
627c769e99 v1.0.1 2023-02-02 10:45:43 +09:00
Daniel Tehrani
209dd75d94 Update lib README 2023-02-02 10:40:07 +09:00
Daniel Tehrani
63c82256f3 Throw when an URL is passed as reference to circuit/wasm in Node.js env 2023-02-02 10:36:17 +09:00
Daniel Tehrani
83a100b432 Publish 0.0.5 2023-02-02 10:17:02 +09:00
Daniel Tehrani
5680aa3b9b Add disclaimers 2023-02-02 10:15:50 +09:00
Dan Tehrani
c9271eb9f3 Merge pull request #20 from personaelabs/dan/embed-spartan-wasm
Embed spartan_wasm.wasm & Remove circuit artifacts from lib
2023-02-02 10:01:28 +09:00
Daniel Tehrani
99274dc167 Update README.md 2023-02-01 18:33:36 +09:00
Daniel Tehrani
75cb788f46 Add MIT license for other packages 2023-02-01 18:16:57 +09:00
Daniel Tehrani
09439545c2 Add GPL-3.0 LICENSE for circuits and lib
circuits/ and lib/ use cicomlib and snarkjs respectively, which are licensed under GPL-3.0
2023-02-01 18:08:09 +09:00
Daniel Tehrani
9ddf979a46 Only initialize wasm once 2023-02-01 17:49:46 +09:00
Daniel Tehrani
12dfe56fb5 Merge branch 'main' into dan/embed-spartan-wasm 2023-01-31 21:25:47 +09:00
Daniel Tehrani
d895639b5d Add tests re: invalid proofs 2023-01-31 21:23:35 +09:00
Daniel Tehrani
209570aee7 Add/update READMEs 2023-01-31 20:48:41 +09:00
Daniel Tehrani
b954c2c497 Refactor 2023-01-31 20:47:22 +09:00
Daniel Tehrani
d00d44b1e4 Move hashtofile.rs 2023-01-31 20:45:18 +09:00
Daniel Tehrani
11d81929c0 Refactor & Add comments 2023-01-31 20:44:24 +09:00
Daniel Tehrani
e75dc8530e Add mention about the Merkle tree depth 2023-01-31 20:12:27 +09:00
Dan Tehrani
be0df0a8a3 Update membership_prover.ts 2023-01-31 19:53:44 +09:00
Dan Tehrani
d1d8b00397 Update membership_verifier.ts 2023-01-31 19:53:23 +09:00
Dan Tehrani
7b51926a2b Update README.md 2023-01-31 19:50:32 +09:00
Daniel Tehrani
cdf22bc00d Include download link to default config warning 2023-01-31 19:45:30 +09:00
Daniel Tehrani
c4e6822e70 Add circuit download links in the README 2023-01-31 19:38:40 +09:00
Daniel Tehrani
e650bfe7e3 Warn default config use 2023-01-31 19:15:15 +09:00
Daniel Tehrani
c83f97cec0 Update README.md 2023-01-31 14:55:38 +09:00
Daniel Tehrani
221150b28d Small change 2023-01-31 14:25:44 +09:00
Daniel Tehrani
2a3c867b78 Rename loadWasm -> embedWasmBytes 2023-01-31 14:23:08 +09:00
Daniel Tehrani
b72f288d9d Don't include circuit artifacts in the build
The circuit artifacts are too large for publishing to the npm registry. (possible, but not desirable)
2023-01-31 14:22:02 +09:00
Daniel Tehrani
6e0eaafd07 Small fix 2023-01-31 14:19:59 +09:00
Daniel Tehrani
5d54fe98d1 Output the built wasm under spartan_wasm
Simpler to have the build files under its own source dir
2023-01-31 13:54:36 +09:00
Daniel Tehrani
e15985f103 Fix merge conflicts 2023-01-31 13:46:12 +09:00
Daniel Tehrani
f16ebd418d Merge branch 'main' into dan/embed-spartan-wasm 2023-01-31 13:36:47 +09:00
Dan Tehrani
0697208cb5 Merge pull request #19 from personaelabs/dan/hashtocurve
Dan/hashtocurve
2023-01-31 13:25:06 +09:00
Daniel Tehrani
96cf0f21f1 Refactor 2023-01-31 13:20:34 +09:00
Daniel Tehrani
42f827bc8f Fix 2023-01-31 12:02:10 +09:00
Daniel Tehrani
0331001a51 Refactor 2023-01-31 11:59:27 +09:00
Daniel Tehrani
a6709d5725 Rename file 2023-01-31 11:37:24 +09:00
Daniel Tehrani
6246d39dbe Modify to be compatible with from_uniform_bytes 2023-01-31 11:31:29 +09:00
Dan Tehrani
1fde1c218a Merge pull request #15 from personaelabs/lsankar/membership-verifier
membership verifier in ts lib
2023-01-31 09:55:26 +09:00
Daniel Tehrani
0d488ef219 Enable profiler in web benchmark 2023-01-31 09:51:28 +09:00
lsankar4033
1395e70ed6 typo 2023-01-30 13:19:27 -08:00
Lakshman Sankar
a677536d3e Merge branch 'main' into lsankar/membership-verifier 2023-01-30 13:16:25 -08:00
lsankar4033
972d08391a add verification to node benchmarks 2023-01-30 13:09:55 -08:00
lsankar4033
aa8a9a4721 add verification to web benchmark 2023-01-30 12:59:53 -08:00
lsankar4033
9c2f1cbfe4 fix typo 2023-01-30 10:25:22 -08:00
lsankar4033
dd80fd146c fix lerna build 2023-01-30 09:45:11 -08:00
Daniel Tehrani
9482932284 Fix warnings 2023-01-30 16:32:55 +09:00
Daniel Tehrani
714bf04ba7 Fix 2023-01-30 16:30:52 +09:00
Daniel Tehrani
fed4d553ce Merge branch 'main' into dan/hashtocurve 2023-01-30 13:52:32 +09:00
Dan Tehrani
d4c7e77955 Merge pull request #16 from personaelabs/dan/refactor/secpq
Dan/refactor/secpq
2023-01-30 13:27:30 +09:00
Daniel Tehrani
4d4e21c14c Past code used to read r1cs from Nova-Scotia & detach Nova-Scotia from dep 2023-01-30 11:43:29 +09:00
Daniel Tehrani
e487f1cb24 Comment out Neptune which is causing build issues 2023-01-30 11:42:16 +09:00
Daniel Tehrani
5f8fcd0be0 Fix tests 2023-01-30 10:34:18 +09:00
Daniel Tehrani
40dfab4328 Detach secpq_curves 2023-01-30 09:32:58 +09:00
Daniel Tehrani
398a3409f3 Use secq256k1 instead of secpq_curves 2023-01-30 09:31:44 +09:00
Daniel Tehrani
0228738d1c Re-export the Group trait 2023-01-30 09:30:50 +09:00
Dan Tehrani
bab4cc9d0c Paste file link 2023-01-30 08:59:35 +09:00
Daniel Tehrani
4e21aabf0c Update README.md 2023-01-29 21:38:04 +09:00
Daniel Tehrani
7286f5be97 Fix 2023-01-29 21:33:12 +09:00
Daniel Tehrani
d1f54a6a8c Hardcode wasm into ts file & add script for embedded wasm ops 2023-01-29 21:10:50 +09:00
Daniel Tehrani
f6ffbb4fc8 Update package name 2023-01-29 20:20:19 +09:00
Daniel Tehrani
d62305aeee Config lib for the npm registry 2023-01-29 15:27:27 +09:00
Daniel Tehrani
151000f250 Update lib name 2023-01-29 14:47:20 +09:00
Daniel Tehrani
64b80a128c Throw err with message when wasm isn't initialized 2023-01-29 14:32:48 +09:00
Daniel Tehrani
0570665f26 Add build script 2023-01-29 14:29:55 +09:00
Daniel Tehrani
50f60758a4 Use correct poseidon constants 2023-01-29 14:27:10 +09:00
Daniel Tehrani
6de547856d Add doc re: poseidon params 2023-01-29 13:33:26 +09:00
Daniel Tehrani
a290a1936b Fix poseidon parm generation 2023-01-29 13:32:58 +09:00
Daniel Tehrani
82fab20975 Add sage script to check poseidon params security inequalities 2023-01-29 13:32:41 +09:00
Daniel Tehrani
f41768f14a Return correct public input from prover 2023-01-29 13:16:05 +09:00
lsankar4033
663ecb0dfe add test task 2023-01-28 20:05:03 -08:00
lsankar4033
bc7be8d232 typo 2023-01-28 20:04:58 -08:00
lsankar4033
35c4725462 paste in simple tests 2023-01-28 18:07:52 -08:00
lsankar4033
b856f41279 Options -> Config to be consistent with prover 2023-01-28 17:55:25 -08:00
lsankar4033
e44dd69ae7 verify function (no tests) 2023-01-28 17:33:47 -08:00
Lakshman Sankar
e62385ec97 Merge pull request #14 from personaelabs/lsankar/fix-lerna-build
Fix lerna build
2023-01-27 07:53:34 -08:00
Daniel Tehrani
4b2cbbecec Fix constants 2023-01-27 11:35:43 +09:00
Daniel Tehrani
31299a465d Implement hash2curve for both secp and secq 2023-01-26 13:58:45 +09:00
Daniel Tehrani
2f1c6c9867 Add sage scripts to generate the params 2023-01-26 13:54:18 +09:00
147 changed files with 14904 additions and 5388 deletions

View File

@@ -1,5 +1,5 @@
[target.wasm32-unknown-unknown]
rustflags = ["-C", "target-feature=+atomics,+bulk-memory,+mutable-globals", "-C", "link-arg=--max-memory=4294967296"]
rustflags = ["-C", "link-arg=--max-memory=4294967296"]
[unstable]
build-std = ["panic_abort", "std"]

1
.eslintignore Normal file
View File

@@ -0,0 +1 @@
wasm_bytes.ts

42
.github/workflows/publish.yaml vendored Normal file
View File

@@ -0,0 +1,42 @@
name: Publish Package to npmjs
on:
release:
types: [published]
workflow_dispatch:
jobs:
publish:
runs-on: macos-latest
steps:
- uses: actions/checkout@v3
with:
ref: ${{ github.ref_name }}
# Setup Node.js
- uses: actions/setup-node@v3
with:
node-version: 18
registry-url: "https://registry.npmjs.org"
# Setup Rust
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2022-10-31
- run: rustup component add rust-src
- run: rustup target add x86_64-apple-darwin
# Install circom-secq
- uses: GuillaumeFalourd/clone-github-repo-action@v2
with:
owner: "DanTehrani"
repository: "circom-secq"
- run: cd circom-secq && cargo build --release && cargo install --path circom
# Install wasm-pack
- uses: jetli/wasm-pack-action@v0.4.0
with:
version: "v0.10.3"
- run: cargo test --release
- run: yarn
- run: yarn build
- run: yarn test
- run: npm publish
working-directory: ./packages/lib
env:
NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }}

12
.gitignore vendored
View File

@@ -2,9 +2,6 @@
# will have compiled files and executables
/target/
# Remove Cargo.lock from gitignore if creating an executable, leave it for libraries
# More information here https://doc.rust-lang.org/cargo/guide/cargo-toml-vs-cargo-lock.html
Cargo.lock
# These are backup files generated by rustfmt
**/*.rs.bk
@@ -26,12 +23,19 @@ circom_witness.wtns
*.ptau
build/
dist/
*.r1cs
*.sym
!test_circuit.r1cs
packages/prover/test_circuit/test_circuit_js/
#input files
packages/prover/test_circuit/*.json
packages/lib/src/circuits/
wasmBytes.ts
**/sage/*.sage.py
packages/lib/src/circuits/
packages/lib/example/

1
.prettierignore Normal file
View File

@@ -0,0 +1 @@
wasm_bytes.ts

View File

@@ -1,6 +1,7 @@
{
"editor.formatOnSave": true,
"cSpell.words": [
"merkle",
"NIZK"
]
}

1398
Cargo.lock generated Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -1,7 +1,8 @@
[workspace]
members = [
"packages/spartan_wasm",
"packages/secpq_curves",
"packages/secq256k1",
"packages/poseidon",
"packages/Spartan-secq",
"packages/circuit_reader",
]

View File

@@ -1,23 +1,70 @@
# spartan-ecdsa
# Spartan-ecdsa
Efficient in-browser zero-knowledge ECDSA membership proving
Spartan-ecdsa (which to our knowledge) is the fastest open-source method to verify ECDSA (secp256k1) signatures in zero-knowledge. It can prove ECDSA group membership 10 times faster than [efficient-zk-ecdsa](https://github.com/personaelabs/efficient-zk-ecdsa), our previous implementation of fast ECDSA signature proving. Please refer to [this blog post](https://personaelabs.org/posts/spartan-ecdsa/) for further information.
## What we use
## Constraint breakdown
**Spartan**
spartan-ecdsa achieves the phenomenal result of **hashing becoming the bottleneck instead of ECC operations** for the `pubkey_membership.circom` circuit. In particular, there are **3,039** constraints for efficient ECDSA signature verification, and **5,037** constraints for a depth 20 merkle tree membership check + 1 Poseidon hash of the ECDSA public key. The drop from the original 1.5 million constraints of [circom-ecdsa](https://github.com/0xPARC/circom-ecdsa) comes primarily from doing right-field arithmetic with secq and avoiding SNARK-unfriendly range checks and big integer math.
- We use a fork of Spartan that operates over the secq256k1 curve.
We also use [efficient ECDSA signatures](https://personaelabs.org/posts/efficient-ecdsa-1/) instead of standard ECDSA siagnatures to save an additional **14,505** constraints. To review, the standard ECDSA signature consists of $(r, s)$ for a public key $Q_a$ and message $m$, where $r$ is the x-coordinate of a random elliptic curve point $R$. Standard ECDSA signature verification checks if
**Nova-Scotia**
```math
R == m s ^{-1} * G + r s ^{-1} * Q_a
```
- We use a fork of Nova-Scotia to compile Circom circuits into a binary format that Spartan can process. We slightly modify Nova-Scotia to be compatible with secq256k1.
where $G$ is the generator point of the curve. The efficient ECDSA signature consists of $s$ as well as $T = r^{-1} * R$ and $U = -r^{-1} * m * G$, which can both be computed outside of the SNARK without breaking correctness. Efficient ECDSA signature verification checks if
## About proving
```math
s * T + U == Q_a
```
**Witness generation**
Thus, verifying a standard ECDSA signature instead of the efficient ECDSA signature requires (1) computing $s^{-1}$, $r \* s^{-1}$, $m \* s^{-1}$, and (2) an extra ECC scalar multiply to compute $m s ^{-1} * G$. The former computations happen in the scalar field of secp, which is unequal to the scalar field of secq, and so we incur 11,494 additional constraints for the wrong-field math. The latter can use the `Secp256k1Mul` subroutine and incurs 3,011 additional constraints.
- We use the wasm witness generator generated by Circom to compute the witness. More specifically, the witness generation is done by running `snarkJs.wtns.calculate` (the actual code [here](https://github.com/personaelabs/spartan-ecdsa/blob/bdc0ce4a5aa75934b9ca604b1cac9dbfc51d0134/packages/browser_benchmark/lib/prover/spartan.ts#L4)).
## Benchmarks
**Proof generation**
Proving membership to a group of ECDSA public keys
- The prover is a SpartanNIZK prover in wasm, which reads a circuit compiled by Nova-Scotia.
| Benchmark | # |
| :--------------------------: | :---: |
| Constraints | 8,076 |
| Proving time in browser | 4s |
| Proving time in Node.js | 2s |
| Verification time in browser | 1s |
| Verification time in Node.js | 300ms |
| Proof size | 16kb |
- Measured on a M1 MacBook Pro with 80Mbps internet speed.
- Both proving and verification time in browser includes the time to download the circuit.
## Disclaimers
- Spartan-ecdsa is unaudited. Please use it at your own risk.
- Usage on mobile browsers isnt currently supported.
## Install
```jsx
yarn add @personaelabs/spartan-ecdsa
```
## Development
### Node.js
v18 or later
### Build
1. Install Circom with secq256k1 support
```
git clone https://github.com/DanTehrani/circom-secq
cd circom-secq && cargo build --release && cargo install --path circom
```
2. Install [wasm-pack](https://rustwasm.github.io/wasm-pack/installer/)
4. Install dependencies & Build all packages
```jsx
yarn && yarn build
```

View File

@@ -5,6 +5,10 @@
"main": "index.js",
"repository": "https://github.com/DanTehrani/spartan-wasm.git",
"author": "Daniel Tehrani <contact@dantehrani.com>",
"scripts": {
"build": "sh ./scripts/build.sh && lerna run build",
"test": "sh ./scripts/test.sh"
},
"devDependencies": {
"@types/jest": "^29.2.4",
"@typescript-eslint/eslint-plugin": "5.49.0",
@@ -12,8 +16,7 @@
"eslint-plugin-react": "7.32.1",
"eslint-plugin-react-hooks": "4.6.0",
"eslint-plugin-security": "1.7.0",
"lerna": "^6.4.0",
"snarkjs": "^0.5.0"
"lerna": "^6.4.0"
},
"workspaces": [
"packages/lib",

View File

@@ -0,0 +1,9 @@
# Microsoft Open Source Code of Conduct
This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/).
Resources:
- [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/)
- [Microsoft Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/)
- Contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with questions or concerns

View File

@@ -0,0 +1,12 @@
This project welcomes contributions and suggestions. Most contributions require you to
agree to a Contributor License Agreement (CLA) declaring that you have the right to,
and actually do, grant us the rights to use your contribution. For details, visit
https://cla.microsoft.com.
When you submit a pull request, a CLA-bot will automatically determine whether you need
to provide a CLA and decorate the PR appropriately (e.g., label, comment). Simply follow the
instructions provided by the bot. You will only need to do this once across all repositories using our CLA.
This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/).
For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/)
or contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments.

View File

@@ -0,0 +1,57 @@
[package]
name = "spartan"
version = "0.7.1"
authors = ["Srinath Setty <srinath@microsoft.com>"]
edition = "2021"
description = "High-speed zkSNARKs without trusted setup"
documentation = "https://docs.rs/spartan/"
readme = "README.md"
repository = "https://github.com/microsoft/Spartan"
license-file = "LICENSE"
keywords = ["zkSNARKs", "cryptography", "proofs"]
[dependencies]
num-bigint-dig = "^0.7"
secq256k1 = { path = "../secq256k1" }
merlin = "3.0.0"
rand = "0.7.3"
digest = "0.8.1"
sha3 = "0.8.2"
byteorder = "1.3.4"
rayon = { version = "1.3.0", optional = true }
serde = { version = "1.0.106", features = ["derive"] }
bincode = "1.2.1"
subtle = { version = "2.4", default-features = false }
rand_core = { version = "0.6", default-features = false }
zeroize = { version = "1", default-features = false }
itertools = "0.10.0"
colored = "2.0.0"
flate2 = "1.0.14"
thiserror = "1.0"
num-traits = "0.2.15"
hex-literal = { version = "0.3" }
multiexp = "0.2.2"
[dev-dependencies]
criterion = "0.3.1"
[lib]
name = "libspartan"
path = "src/lib.rs"
crate-type = ["cdylib", "rlib"]
[[bin]]
name = "snark"
path = "profiler/snark.rs"
[[bin]]
name = "nizk"
path = "profiler/nizk.rs"
[[bench]]
name = "snark"
harness = false
[[bench]]
name = "nizk"
harness = false

View File

@@ -0,0 +1,21 @@
MIT License
Copyright (c) Microsoft Corporation.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE

View File

@@ -0,0 +1,10 @@
## Fork of [Spartan](https://github.com/microsoft/Spartan)
_This fork is still under development._
Modify Spartan to operate over the **base field** of secp256k1.
### Changes from the original Spartan
- Use the secq256k1 crate instead of curve25519-dalek
- Modify values in scalar.rs (originally ristretto255.rs)
Please refer to [spartan-ecdsa](https://github.com/personaelabs/spartan-ecdsa) for development status.

View File

@@ -0,0 +1,41 @@
<!-- BEGIN MICROSOFT SECURITY.MD V0.0.3 BLOCK -->
## Security
Microsoft takes the security of our software products and services seriously, which includes all source code repositories managed through our GitHub organizations, which include [Microsoft](https://github.com/Microsoft), [Azure](https://github.com/Azure), [DotNet](https://github.com/dotnet), [AspNet](https://github.com/aspnet), [Xamarin](https://github.com/xamarin), and [our GitHub organizations](https://opensource.microsoft.com/).
If you believe you have found a security vulnerability in any Microsoft-owned repository that meets Microsoft's [Microsoft's definition of a security vulnerability](https://docs.microsoft.com/en-us/previous-versions/tn-archive/cc751383(v=technet.10)) of a security vulnerability, please report it to us as described below.
## Reporting Security Issues
**Please do not report security vulnerabilities through public GitHub issues.**
Instead, please report them to the Microsoft Security Response Center (MSRC) at [https://msrc.microsoft.com/create-report](https://msrc.microsoft.com/create-report).
If you prefer to submit without logging in, send email to [secure@microsoft.com](mailto:secure@microsoft.com). If possible, encrypt your message with our PGP key; please download it from the the [Microsoft Security Response Center PGP Key page](https://www.microsoft.com/en-us/msrc/pgp-key-msrc).
You should receive a response within 24 hours. If for some reason you do not, please follow up via email to ensure we received your original message. Additional information can be found at [microsoft.com/msrc](https://www.microsoft.com/msrc).
Please include the requested information listed below (as much as you can provide) to help us better understand the nature and scope of the possible issue:
* Type of issue (e.g. buffer overflow, SQL injection, cross-site scripting, etc.)
* Full paths of source file(s) related to the manifestation of the issue
* The location of the affected source code (tag/branch/commit or direct URL)
* Any special configuration required to reproduce the issue
* Step-by-step instructions to reproduce the issue
* Proof-of-concept or exploit code (if possible)
* Impact of the issue, including how an attacker might exploit the issue
This information will help us triage your report more quickly.
If you are reporting for a bug bounty, more complete reports can contribute to a higher bounty award. Please visit our [Microsoft Bug Bounty Program](https://microsoft.com/msrc/bounty) page for more details about our active programs.
## Preferred Languages
We prefer all communications to be in English.
## Policy
Microsoft follows the principle of [Coordinated Vulnerability Disclosure](https://www.microsoft.com/en-us/msrc/cvd).
<!-- END MICROSOFT SECURITY.MD BLOCK -->

View File

@@ -0,0 +1,92 @@
#![allow(clippy::assertions_on_result_states)]
extern crate byteorder;
extern crate core;
extern crate criterion;
extern crate digest;
extern crate libspartan;
extern crate merlin;
extern crate rand;
extern crate sha3;
use libspartan::{Instance, NIZKGens, NIZK};
use merlin::Transcript;
use criterion::*;
fn nizk_prove_benchmark(c: &mut Criterion) {
for &s in [10, 12, 16].iter() {
let plot_config = PlotConfiguration::default().summary_scale(AxisScale::Logarithmic);
let mut group = c.benchmark_group("NIZK_prove_benchmark");
group.plot_config(plot_config);
let num_vars = (2_usize).pow(s as u32);
let num_cons = num_vars;
let num_inputs = 10;
let (inst, vars, inputs) = Instance::produce_synthetic_r1cs(num_cons, num_vars, num_inputs);
let gens = NIZKGens::new(num_cons, num_vars, num_inputs);
let name = format!("NIZK_prove_{}", num_vars);
group.bench_function(&name, move |b| {
b.iter(|| {
let mut prover_transcript = Transcript::new(b"example");
NIZK::prove(
black_box(&inst),
black_box(vars.clone()),
black_box(&inputs),
black_box(&gens),
black_box(&mut prover_transcript),
);
});
});
group.finish();
}
}
fn nizk_verify_benchmark(c: &mut Criterion) {
for &s in [10, 12, 16].iter() {
let plot_config = PlotConfiguration::default().summary_scale(AxisScale::Logarithmic);
let mut group = c.benchmark_group("NIZK_verify_benchmark");
group.plot_config(plot_config);
let num_vars = (2_usize).pow(s as u32);
let num_cons = num_vars;
let num_inputs = 10;
let (inst, vars, inputs) = Instance::produce_synthetic_r1cs(num_cons, num_vars, num_inputs);
let gens = NIZKGens::new(num_cons, num_vars, num_inputs);
// produce a proof of satisfiability
let mut prover_transcript = Transcript::new(b"example");
let proof = NIZK::prove(&inst, vars, &inputs, &gens, &mut prover_transcript);
let name = format!("NIZK_verify_{}", num_cons);
group.bench_function(&name, move |b| {
b.iter(|| {
let mut verifier_transcript = Transcript::new(b"example");
assert!(proof
.verify(
black_box(&inst),
black_box(&inputs),
black_box(&mut verifier_transcript),
black_box(&gens)
)
.is_ok());
});
});
group.finish();
}
}
fn set_duration() -> Criterion {
Criterion::default().sample_size(10)
}
criterion_group! {
name = benches_nizk;
config = set_duration();
targets = nizk_prove_benchmark, nizk_verify_benchmark
}
criterion_main!(benches_nizk);

View File

@@ -0,0 +1,131 @@
#![allow(clippy::assertions_on_result_states)]
extern crate libspartan;
extern crate merlin;
use libspartan::{Instance, SNARKGens, SNARK};
use merlin::Transcript;
use criterion::*;
fn snark_encode_benchmark(c: &mut Criterion) {
for &s in [10, 12, 16].iter() {
let plot_config = PlotConfiguration::default().summary_scale(AxisScale::Logarithmic);
let mut group = c.benchmark_group("SNARK_encode_benchmark");
group.plot_config(plot_config);
let num_vars = (2_usize).pow(s as u32);
let num_cons = num_vars;
let num_inputs = 10;
let (inst, _vars, _inputs) = Instance::produce_synthetic_r1cs(num_cons, num_vars, num_inputs);
// produce public parameters
let gens = SNARKGens::new(num_cons, num_vars, num_inputs, num_cons);
// produce a commitment to R1CS instance
let name = format!("SNARK_encode_{}", num_cons);
group.bench_function(&name, move |b| {
b.iter(|| {
SNARK::encode(black_box(&inst), black_box(&gens));
});
});
group.finish();
}
}
fn snark_prove_benchmark(c: &mut Criterion) {
for &s in [10, 12, 16].iter() {
let plot_config = PlotConfiguration::default().summary_scale(AxisScale::Logarithmic);
let mut group = c.benchmark_group("SNARK_prove_benchmark");
group.plot_config(plot_config);
let num_vars = (2_usize).pow(s as u32);
let num_cons = num_vars;
let num_inputs = 10;
let (inst, vars, inputs) = Instance::produce_synthetic_r1cs(num_cons, num_vars, num_inputs);
// produce public parameters
let gens = SNARKGens::new(num_cons, num_vars, num_inputs, num_cons);
// produce a commitment to R1CS instance
let (comm, decomm) = SNARK::encode(&inst, &gens);
// produce a proof
let name = format!("SNARK_prove_{}", num_cons);
group.bench_function(&name, move |b| {
b.iter(|| {
let mut prover_transcript = Transcript::new(b"example");
SNARK::prove(
black_box(&inst),
black_box(&comm),
black_box(&decomm),
black_box(vars.clone()),
black_box(&inputs),
black_box(&gens),
black_box(&mut prover_transcript),
);
});
});
group.finish();
}
}
fn snark_verify_benchmark(c: &mut Criterion) {
for &s in [10, 12, 16].iter() {
let plot_config = PlotConfiguration::default().summary_scale(AxisScale::Logarithmic);
let mut group = c.benchmark_group("SNARK_verify_benchmark");
group.plot_config(plot_config);
let num_vars = (2_usize).pow(s as u32);
let num_cons = num_vars;
let num_inputs = 10;
let (inst, vars, inputs) = Instance::produce_synthetic_r1cs(num_cons, num_vars, num_inputs);
// produce public parameters
let gens = SNARKGens::new(num_cons, num_vars, num_inputs, num_cons);
// produce a commitment to R1CS instance
let (comm, decomm) = SNARK::encode(&inst, &gens);
// produce a proof of satisfiability
let mut prover_transcript = Transcript::new(b"example");
let proof = SNARK::prove(
&inst,
&comm,
&decomm,
vars,
&inputs,
&gens,
&mut prover_transcript,
);
// verify the proof
let name = format!("SNARK_verify_{}", num_cons);
group.bench_function(&name, move |b| {
b.iter(|| {
let mut verifier_transcript = Transcript::new(b"example");
assert!(proof
.verify(
black_box(&comm),
black_box(&inputs),
black_box(&mut verifier_transcript),
black_box(&gens)
)
.is_ok());
});
});
group.finish();
}
}
fn set_duration() -> Criterion {
Criterion::default().sample_size(10)
}
criterion_group! {
name = benches_snark;
config = set_duration();
targets = snark_encode_benchmark, snark_prove_benchmark, snark_verify_benchmark
}
criterion_main!(benches_snark);

View File

@@ -0,0 +1,147 @@
//! Demonstrates how to produces a proof for canonical cubic equation: `x^3 + x + 5 = y`.
//! The example is described in detail [here].
//!
//! The R1CS for this problem consists of the following 4 constraints:
//! `Z0 * Z0 - Z1 = 0`
//! `Z1 * Z0 - Z2 = 0`
//! `(Z2 + Z0) * 1 - Z3 = 0`
//! `(Z3 + 5) * 1 - I0 = 0`
//!
//! [here]: https://medium.com/@VitalikButerin/quadratic-arithmetic-programs-from-zero-to-hero-f6d558cea649
#![allow(clippy::assertions_on_result_states)]
use libspartan::{InputsAssignment, Instance, SNARKGens, VarsAssignment, SNARK};
use merlin::Transcript;
use rand_core::OsRng;
use secq256k1::elliptic_curve::Field;
use secq256k1::Scalar;
#[allow(non_snake_case)]
fn produce_r1cs() -> (
usize,
usize,
usize,
usize,
Instance,
VarsAssignment,
InputsAssignment,
) {
// parameters of the R1CS instance
let num_cons = 4;
let num_vars = 4;
let num_inputs = 1;
let num_non_zero_entries = 8;
// We will encode the above constraints into three matrices, where
// the coefficients in the matrix are in the little-endian byte order
let mut A: Vec<(usize, usize, [u8; 32])> = Vec::new();
let mut B: Vec<(usize, usize, [u8; 32])> = Vec::new();
let mut C: Vec<(usize, usize, [u8; 32])> = Vec::new();
let one: [u8; 32] = Scalar::ONE.to_bytes().into();
// R1CS is a set of three sparse matrices A B C, where is a row for every
// constraint and a column for every entry in z = (vars, 1, inputs)
// An R1CS instance is satisfiable iff:
// Az \circ Bz = Cz, where z = (vars, 1, inputs)
// constraint 0 entries in (A,B,C)
// constraint 0 is Z0 * Z0 - Z1 = 0.
A.push((0, 0, one));
B.push((0, 0, one));
C.push((0, 1, one));
// constraint 1 entries in (A,B,C)
// constraint 1 is Z1 * Z0 - Z2 = 0.
A.push((1, 1, one));
B.push((1, 0, one));
C.push((1, 2, one));
// constraint 2 entries in (A,B,C)
// constraint 2 is (Z2 + Z0) * 1 - Z3 = 0.
A.push((2, 2, one));
A.push((2, 0, one));
B.push((2, num_vars, one));
C.push((2, 3, one));
// constraint 3 entries in (A,B,C)
// constraint 3 is (Z3 + 5) * 1 - I0 = 0.
A.push((3, 3, one));
A.push((3, num_vars, Scalar::from(5u32).to_bytes().into()));
B.push((3, num_vars, one));
C.push((3, num_vars + 1, one));
let inst = Instance::new(num_cons, num_vars, num_inputs, &A, &B, &C).unwrap();
// compute a satisfying assignment
let mut csprng: OsRng = OsRng;
let z0 = Scalar::random(&mut csprng);
let z1 = z0 * z0; // constraint 0
let z2 = z1 * z0; // constraint 1
let z3 = z2 + z0; // constraint 2
let i0 = z3 + Scalar::from(5u32); // constraint 3
// create a VarsAssignment
let mut vars: Vec<[u8; 32]> = vec![Scalar::ZERO.to_bytes().into(); num_vars];
vars[0] = z0.to_bytes().into();
vars[1] = z1.to_bytes().into();
vars[2] = z2.to_bytes().into();
vars[3] = z3.to_bytes().into();
let assignment_vars = VarsAssignment::new(&vars).unwrap();
// create an InputsAssignment
let mut inputs: Vec<[u8; 32]> = vec![Scalar::ZERO.to_bytes().into(); num_inputs];
inputs[0] = i0.to_bytes().into();
let assignment_inputs = InputsAssignment::new(&inputs).unwrap();
// check if the instance we created is satisfiable
let res = inst.is_sat(&assignment_vars, &assignment_inputs);
assert!(res.unwrap(), "should be satisfied");
(
num_cons,
num_vars,
num_inputs,
num_non_zero_entries,
inst,
assignment_vars,
assignment_inputs,
)
}
fn main() {
// produce an R1CS instance
let (
num_cons,
num_vars,
num_inputs,
num_non_zero_entries,
inst,
assignment_vars,
assignment_inputs,
) = produce_r1cs();
// produce public parameters
let gens = SNARKGens::new(num_cons, num_vars, num_inputs, num_non_zero_entries);
// create a commitment to the R1CS instance
let (comm, decomm) = SNARK::encode(&inst, &gens);
// produce a proof of satisfiability
let mut prover_transcript = Transcript::new(b"snark_example");
let proof = SNARK::prove(
&inst,
&comm,
&decomm,
assignment_vars,
&assignment_inputs,
&gens,
&mut prover_transcript,
);
// verify the proof of satisfiability
let mut verifier_transcript = Transcript::new(b"snark_example");
assert!(proof
.verify(&comm, &assignment_inputs, &mut verifier_transcript, &gens)
.is_ok());
println!("proof verification successful!");
}

View File

@@ -0,0 +1,52 @@
#![allow(non_snake_case)]
#![allow(clippy::assertions_on_result_states)]
extern crate flate2;
extern crate libspartan;
extern crate merlin;
extern crate rand;
use flate2::{write::ZlibEncoder, Compression};
use libspartan::{Instance, NIZKGens, NIZK};
use merlin::Transcript;
fn print(msg: &str) {
let star = "* ";
println!("{:indent$}{}{}", "", star, msg, indent = 2);
}
pub fn main() {
// the list of number of variables (and constraints) in an R1CS instance
let inst_sizes = vec![10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20];
println!("Profiler:: NIZK");
for &s in inst_sizes.iter() {
let num_vars = (2_usize).pow(s as u32);
let num_cons = num_vars;
let num_inputs = 10;
// produce a synthetic R1CSInstance
let (inst, vars, inputs) = Instance::produce_synthetic_r1cs(num_cons, num_vars, num_inputs);
// produce public generators
let gens = NIZKGens::new(num_cons, num_vars, num_inputs);
// produce a proof of satisfiability
let mut prover_transcript = Transcript::new(b"nizk_example");
let proof = NIZK::prove(&inst, vars, &inputs, &gens, &mut prover_transcript);
let mut encoder = ZlibEncoder::new(Vec::new(), Compression::default());
bincode::serialize_into(&mut encoder, &proof).unwrap();
let proof_encoded = encoder.finish().unwrap();
let msg_proof_len = format!("NIZK::proof_compressed_len {:?}", proof_encoded.len());
print(&msg_proof_len);
// verify the proof of satisfiability
let mut verifier_transcript = Transcript::new(b"nizk_example");
assert!(proof
.verify(&inst, &inputs, &mut verifier_transcript, &gens)
.is_ok());
println!();
}
}

View File

@@ -0,0 +1,62 @@
#![allow(non_snake_case)]
#![allow(clippy::assertions_on_result_states)]
extern crate flate2;
extern crate libspartan;
extern crate merlin;
use flate2::{write::ZlibEncoder, Compression};
use libspartan::{Instance, SNARKGens, SNARK};
use merlin::Transcript;
fn print(msg: &str) {
let star = "* ";
println!("{:indent$}{}{}", "", star, msg, indent = 2);
}
pub fn main() {
// the list of number of variables (and constraints) in an R1CS instance
let inst_sizes = vec![10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20];
println!("Profiler:: SNARK");
for &s in inst_sizes.iter() {
let num_vars = (2_usize).pow(s as u32);
let num_cons = num_vars;
let num_inputs = 10;
// produce a synthetic R1CSInstance
let (inst, vars, inputs) = Instance::produce_synthetic_r1cs(num_cons, num_vars, num_inputs);
// produce public generators
let gens = SNARKGens::new(num_cons, num_vars, num_inputs, num_cons);
// create a commitment to R1CSInstance
let (comm, decomm) = SNARK::encode(&inst, &gens);
// produce a proof of satisfiability
let mut prover_transcript = Transcript::new(b"snark_example");
let proof = SNARK::prove(
&inst,
&comm,
&decomm,
vars,
&inputs,
&gens,
&mut prover_transcript,
);
let mut encoder = ZlibEncoder::new(Vec::new(), Compression::default());
bincode::serialize_into(&mut encoder, &proof).unwrap();
let proof_encoded = encoder.finish().unwrap();
let msg_proof_len = format!("SNARK::proof_compressed_len {:?}", proof_encoded.len());
print(&msg_proof_len);
// verify the proof of satisfiability
let mut verifier_transcript = Transcript::new(b"snark_example");
assert!(proof
.verify(&comm, &inputs, &mut verifier_transcript, &gens)
.is_ok());
println!();
}
}

View File

@@ -0,0 +1,4 @@
edition = "2018"
tab_spaces = 2
newline_style = "Unix"
use_try_shorthand = true

View File

@@ -0,0 +1,54 @@
use hex_literal::hex;
use num_bigint_dig::{BigInt, BigUint, ModInverse, ToBigInt};
use num_traits::{FromPrimitive, ToPrimitive};
use std::ops::Neg;
fn get_words(n: &BigUint) -> [u64; 4] {
let mut words = [0u64; 4];
for i in 0..4 {
let word = n.clone() >> (64 * i) & BigUint::from(0xffffffffffffffffu64);
words[i] = word.to_u64().unwrap();
}
words
}
fn render_hex(label: String, words: &[u64; 4]) {
println!("// {}", label);
for word in words {
println!("0x{:016x},", word);
}
}
fn main() {
let modulus = BigUint::from_bytes_be(&hex!(
"fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f"
));
let r = BigUint::from_u8(2)
.unwrap()
.modpow(&BigUint::from_u64(256).unwrap(), &modulus);
let r2 = BigUint::from_u8(2)
.unwrap()
.modpow(&BigUint::from_u64(512).unwrap(), &modulus);
let r3 = BigUint::from_u8(2)
.unwrap()
.modpow(&BigUint::from_u64(768).unwrap(), &modulus);
let two_pow_64 = BigUint::from_u128(18446744073709551616u128).unwrap();
let one = BigInt::from_u8(1).unwrap();
let inv = modulus
.clone()
.mod_inverse(&two_pow_64)
.unwrap()
.neg()
.modpow(&one, &two_pow_64.to_bigint().unwrap());
render_hex("Modulus".to_string(), &get_words(&modulus));
render_hex("R".to_string(), &get_words(&r));
render_hex("R2".to_string(), &get_words(&r2));
render_hex("R3".to_string(), &get_words(&r3));
render_hex("INV".to_string(), &get_words(&inv.to_biguint().unwrap()));
}

View File

@@ -0,0 +1,96 @@
use super::group::{GroupElement, VartimeMultiscalarMul};
use super::scalar::Scalar;
use digest::{ExtendableOutput, Input};
use secq256k1::AffinePoint;
use sha3::Shake256;
use std::io::Read;
#[derive(Debug)]
pub struct MultiCommitGens {
pub n: usize,
pub G: Vec<GroupElement>,
pub h: GroupElement,
}
impl MultiCommitGens {
pub fn new(n: usize, label: &[u8]) -> Self {
let mut shake = Shake256::default();
shake.input(label);
shake.input(AffinePoint::generator().compress().as_bytes());
let mut reader = shake.xof_result();
let mut gens: Vec<GroupElement> = Vec::new();
let mut uniform_bytes = [0u8; 128];
for _ in 0..n + 1 {
reader.read_exact(&mut uniform_bytes).unwrap();
gens.push(AffinePoint::from_uniform_bytes(&uniform_bytes));
}
MultiCommitGens {
n,
G: gens[..n].to_vec(),
h: gens[n],
}
}
pub fn clone(&self) -> MultiCommitGens {
MultiCommitGens {
n: self.n,
h: self.h,
G: self.G.clone(),
}
}
pub fn scale(&self, s: &Scalar) -> MultiCommitGens {
MultiCommitGens {
n: self.n,
h: self.h,
G: (0..self.n).map(|i| s * self.G[i]).collect(),
}
}
pub fn split_at(&self, mid: usize) -> (MultiCommitGens, MultiCommitGens) {
let (G1, G2) = self.G.split_at(mid);
(
MultiCommitGens {
n: G1.len(),
G: G1.to_vec(),
h: self.h,
},
MultiCommitGens {
n: G2.len(),
G: G2.to_vec(),
h: self.h,
},
)
}
}
pub trait Commitments {
fn commit(&self, blind: &Scalar, gens_n: &MultiCommitGens) -> GroupElement;
}
impl Commitments for Scalar {
fn commit(&self, blind: &Scalar, gens_n: &MultiCommitGens) -> GroupElement {
assert_eq!(gens_n.n, 1);
GroupElement::vartime_multiscalar_mul(
[*self, *blind].to_vec(),
[gens_n.G[0], gens_n.h].to_vec(),
)
}
}
impl Commitments for Vec<Scalar> {
fn commit(&self, blind: &Scalar, gens_n: &MultiCommitGens) -> GroupElement {
assert_eq!(gens_n.n, self.len());
GroupElement::vartime_multiscalar_mul((*self).clone(), gens_n.G.clone()) + blind * gens_n.h
}
}
impl Commitments for [Scalar] {
fn commit(&self, blind: &Scalar, gens_n: &MultiCommitGens) -> GroupElement {
assert_eq!(gens_n.n, self.len());
GroupElement::vartime_multiscalar_mul(self.to_vec(), gens_n.G.clone()) + blind * gens_n.h
}
}

View File

@@ -0,0 +1,602 @@
#![allow(clippy::too_many_arguments)]
use super::commitments::{Commitments, MultiCommitGens};
use super::errors::ProofVerifyError;
use super::group::{CompressedGroup, GroupElement, VartimeMultiscalarMul};
use super::math::Math;
use super::nizk::{DotProductProofGens, DotProductProofLog};
use super::random::RandomTape;
use super::scalar::Scalar;
use super::transcript::{AppendToTranscript, ProofTranscript};
use crate::group::DecompressEncodedPoint;
use core::ops::Index;
use merlin::Transcript;
use serde::{Deserialize, Serialize};
#[cfg(feature = "multicore")]
use rayon::prelude::*;
#[derive(Debug)]
pub struct DensePolynomial {
num_vars: usize, // the number of variables in the multilinear polynomial
len: usize,
Z: Vec<Scalar>, // evaluations of the polynomial in all the 2^num_vars Boolean inputs
}
pub struct PolyCommitmentGens {
pub gens: DotProductProofGens,
}
impl PolyCommitmentGens {
// the number of variables in the multilinear polynomial
pub fn new(num_vars: usize, label: &'static [u8]) -> PolyCommitmentGens {
let (_left, right) = EqPolynomial::compute_factored_lens(num_vars);
let gens = DotProductProofGens::new(right.pow2(), label);
PolyCommitmentGens { gens }
}
}
pub struct PolyCommitmentBlinds {
blinds: Vec<Scalar>,
}
#[derive(Debug, Serialize, Deserialize)]
pub struct PolyCommitment {
C: Vec<CompressedGroup>,
}
#[derive(Debug, Serialize, Deserialize)]
pub struct ConstPolyCommitment {
C: CompressedGroup,
}
pub struct EqPolynomial {
r: Vec<Scalar>,
}
impl EqPolynomial {
pub fn new(r: Vec<Scalar>) -> Self {
EqPolynomial { r }
}
pub fn evaluate(&self, rx: &[Scalar]) -> Scalar {
assert_eq!(self.r.len(), rx.len());
(0..rx.len())
.map(|i| self.r[i] * rx[i] + (Scalar::one() - self.r[i]) * (Scalar::one() - rx[i]))
.product()
}
pub fn evals(&self) -> Vec<Scalar> {
let ell = self.r.len();
let mut evals: Vec<Scalar> = vec![Scalar::one(); ell.pow2()];
let mut size = 1;
for j in 0..ell {
// in each iteration, we double the size of chis
size *= 2;
for i in (0..size).rev().step_by(2) {
// copy each element from the prior iteration twice
let scalar = evals[i / 2];
evals[i] = scalar * self.r[j];
evals[i - 1] = scalar - evals[i];
}
}
evals
}
pub fn compute_factored_lens(ell: usize) -> (usize, usize) {
(ell / 2, ell - ell / 2)
}
pub fn compute_factored_evals(&self) -> (Vec<Scalar>, Vec<Scalar>) {
let ell = self.r.len();
let (left_num_vars, _right_num_vars) = EqPolynomial::compute_factored_lens(ell);
let L = EqPolynomial::new(self.r[..left_num_vars].to_vec()).evals();
let R = EqPolynomial::new(self.r[left_num_vars..ell].to_vec()).evals();
(L, R)
}
}
pub struct IdentityPolynomial {
size_point: usize,
}
impl IdentityPolynomial {
pub fn new(size_point: usize) -> Self {
IdentityPolynomial { size_point }
}
pub fn evaluate(&self, r: &[Scalar]) -> Scalar {
let len = r.len();
assert_eq!(len, self.size_point);
(0..len)
.map(|i| Scalar::from((len - i - 1).pow2() as u64) * r[i])
.sum()
}
}
impl DensePolynomial {
pub fn new(Z: Vec<Scalar>) -> Self {
DensePolynomial {
num_vars: Z.len().log_2(),
len: Z.len(),
Z,
}
}
pub fn get_num_vars(&self) -> usize {
self.num_vars
}
pub fn len(&self) -> usize {
self.len
}
pub fn clone(&self) -> DensePolynomial {
DensePolynomial::new(self.Z[0..self.len].to_vec())
}
pub fn split(&self, idx: usize) -> (DensePolynomial, DensePolynomial) {
assert!(idx < self.len());
(
DensePolynomial::new(self.Z[..idx].to_vec()),
DensePolynomial::new(self.Z[idx..2 * idx].to_vec()),
)
}
#[cfg(feature = "multicore")]
fn commit_inner(&self, blinds: &[Scalar], gens: &MultiCommitGens) -> PolyCommitment {
let L_size = blinds.len();
let R_size = self.Z.len() / L_size;
assert_eq!(L_size * R_size, self.Z.len());
let C = (0..L_size)
.into_par_iter()
.map(|i| {
self.Z[R_size * i..R_size * (i + 1)]
.commit(&blinds[i], gens)
.compress()
})
.collect();
PolyCommitment { C }
}
#[cfg(not(feature = "multicore"))]
fn commit_inner(&self, blinds: &[Scalar], gens: &MultiCommitGens) -> PolyCommitment {
let L_size = blinds.len();
let R_size = self.Z.len() / L_size;
assert_eq!(L_size * R_size, self.Z.len());
let C = (0..L_size)
.map(|i| {
self.Z[R_size * i..R_size * (i + 1)]
.commit(&blinds[i], gens)
.compress()
})
.collect();
PolyCommitment { C }
}
pub fn commit(
&self,
gens: &PolyCommitmentGens,
random_tape: Option<&mut RandomTape>,
) -> (PolyCommitment, PolyCommitmentBlinds) {
let n = self.Z.len();
let ell = self.get_num_vars();
assert_eq!(n, ell.pow2());
let (left_num_vars, right_num_vars) = EqPolynomial::compute_factored_lens(ell);
let L_size = left_num_vars.pow2();
let R_size = right_num_vars.pow2();
assert_eq!(L_size * R_size, n);
let blinds = if let Some(t) = random_tape {
PolyCommitmentBlinds {
blinds: t.random_vector(b"poly_blinds", L_size),
}
} else {
PolyCommitmentBlinds {
blinds: vec![Scalar::zero(); L_size],
}
};
(self.commit_inner(&blinds.blinds, &gens.gens.gens_n), blinds)
}
pub fn bound(&self, L: &[Scalar]) -> Vec<Scalar> {
let (left_num_vars, right_num_vars) = EqPolynomial::compute_factored_lens(self.get_num_vars());
let L_size = left_num_vars.pow2();
let R_size = right_num_vars.pow2();
(0..R_size)
.map(|i| (0..L_size).map(|j| L[j] * self.Z[j * R_size + i]).sum())
.collect()
}
pub fn bound_poly_var_top(&mut self, r: &Scalar) {
let n = self.len() / 2;
for i in 0..n {
self.Z[i] = self.Z[i] + r * (self.Z[i + n] - self.Z[i]);
}
self.num_vars -= 1;
self.len = n;
}
pub fn bound_poly_var_bot(&mut self, r: &Scalar) {
let n = self.len() / 2;
for i in 0..n {
self.Z[i] = self.Z[2 * i] + r * (self.Z[2 * i + 1] - self.Z[2 * i]);
}
self.num_vars -= 1;
self.len = n;
}
// returns Z(r) in O(n) time
pub fn evaluate(&self, r: &[Scalar]) -> Scalar {
// r must have a value for each variable
assert_eq!(r.len(), self.get_num_vars());
let chis = EqPolynomial::new(r.to_vec()).evals();
assert_eq!(chis.len(), self.Z.len());
DotProductProofLog::compute_dotproduct(&self.Z, &chis)
}
fn vec(&self) -> &Vec<Scalar> {
&self.Z
}
pub fn extend(&mut self, other: &DensePolynomial) {
// TODO: allow extension even when some vars are bound
assert_eq!(self.Z.len(), self.len);
let other_vec = other.vec();
assert_eq!(other_vec.len(), self.len);
self.Z.extend(other_vec);
self.num_vars += 1;
self.len *= 2;
assert_eq!(self.Z.len(), self.len);
}
pub fn merge<'a, I>(polys: I) -> DensePolynomial
where
I: IntoIterator<Item = &'a DensePolynomial>,
{
let mut Z: Vec<Scalar> = Vec::new();
for poly in polys.into_iter() {
Z.extend(poly.vec());
}
// pad the polynomial with zero polynomial at the end
Z.resize(Z.len().next_power_of_two(), Scalar::zero());
DensePolynomial::new(Z)
}
pub fn from_usize(Z: &[usize]) -> Self {
DensePolynomial::new(
(0..Z.len())
.map(|i| Scalar::from(Z[i] as u64))
.collect::<Vec<Scalar>>(),
)
}
}
impl Index<usize> for DensePolynomial {
type Output = Scalar;
#[inline(always)]
fn index(&self, _index: usize) -> &Scalar {
&(self.Z[_index])
}
}
impl AppendToTranscript for PolyCommitment {
fn append_to_transcript(&self, label: &'static [u8], transcript: &mut Transcript) {
transcript.append_message(label, b"poly_commitment_begin");
for i in 0..self.C.len() {
transcript.append_point(b"poly_commitment_share", &self.C[i]);
}
transcript.append_message(label, b"poly_commitment_end");
}
}
#[derive(Debug, Serialize, Deserialize)]
pub struct PolyEvalProof {
proof: DotProductProofLog,
}
impl PolyEvalProof {
fn protocol_name() -> &'static [u8] {
b"polynomial evaluation proof"
}
pub fn prove(
poly: &DensePolynomial,
blinds_opt: Option<&PolyCommitmentBlinds>,
r: &[Scalar], // point at which the polynomial is evaluated
Zr: &Scalar, // evaluation of \widetilde{Z}(r)
blind_Zr_opt: Option<&Scalar>, // specifies a blind for Zr
gens: &PolyCommitmentGens,
transcript: &mut Transcript,
random_tape: &mut RandomTape,
) -> (PolyEvalProof, CompressedGroup) {
transcript.append_protocol_name(PolyEvalProof::protocol_name());
// assert vectors are of the right size
assert_eq!(poly.get_num_vars(), r.len());
let (left_num_vars, right_num_vars) = EqPolynomial::compute_factored_lens(r.len());
let L_size = left_num_vars.pow2();
let R_size = right_num_vars.pow2();
let default_blinds = PolyCommitmentBlinds {
blinds: vec![Scalar::zero(); L_size],
};
let blinds = blinds_opt.map_or(&default_blinds, |p| p);
assert_eq!(blinds.blinds.len(), L_size);
let zero = Scalar::zero();
let blind_Zr = blind_Zr_opt.map_or(&zero, |p| p);
// compute the L and R vectors
let eq = EqPolynomial::new(r.to_vec());
let (L, R) = eq.compute_factored_evals();
assert_eq!(L.len(), L_size);
assert_eq!(R.len(), R_size);
// compute the vector underneath L*Z and the L*blinds
// compute vector-matrix product between L and Z viewed as a matrix
let LZ = poly.bound(&L);
let LZ_blind: Scalar = (0..L.len()).map(|i| blinds.blinds[i] * L[i]).sum();
// a dot product proof of size R_size
let (proof, _C_LR, C_Zr_prime) = DotProductProofLog::prove(
&gens.gens,
transcript,
random_tape,
&LZ,
&LZ_blind,
&R,
Zr,
blind_Zr,
);
(PolyEvalProof { proof }, C_Zr_prime)
}
pub fn verify(
&self,
gens: &PolyCommitmentGens,
transcript: &mut Transcript,
r: &[Scalar], // point at which the polynomial is evaluated
C_Zr: &CompressedGroup, // commitment to \widetilde{Z}(r)
comm: &PolyCommitment,
) -> Result<(), ProofVerifyError> {
transcript.append_protocol_name(PolyEvalProof::protocol_name());
// compute L and R
let eq = EqPolynomial::new(r.to_vec());
let (L, R) = eq.compute_factored_evals();
// compute a weighted sum of commitments and L
let C_decompressed = comm.C.iter().map(|pt| pt.decompress().unwrap());
let C_LZ = GroupElement::vartime_multiscalar_mul(L, C_decompressed.collect()).compress();
self
.proof
.verify(R.len(), &gens.gens, transcript, &R, &C_LZ, C_Zr)
}
pub fn verify_plain(
&self,
gens: &PolyCommitmentGens,
transcript: &mut Transcript,
r: &[Scalar], // point at which the polynomial is evaluated
Zr: &Scalar, // evaluation \widetilde{Z}(r)
comm: &PolyCommitment,
) -> Result<(), ProofVerifyError> {
// compute a commitment to Zr with a blind of zero
let C_Zr = Zr.commit(&Scalar::zero(), &gens.gens.gens_1).compress();
self.verify(gens, transcript, r, &C_Zr, comm)
}
}
#[cfg(test)]
mod tests {
use super::super::scalar::ScalarFromPrimitives;
use super::*;
use rand_core::OsRng;
fn evaluate_with_LR(Z: &[Scalar], r: &[Scalar]) -> Scalar {
let eq = EqPolynomial::new(r.to_vec());
let (L, R) = eq.compute_factored_evals();
let ell = r.len();
// ensure ell is even
assert!(ell % 2 == 0);
// compute n = 2^\ell
let n = ell.pow2();
// compute m = sqrt(n) = 2^{\ell/2}
let m = n.square_root();
// compute vector-matrix product between L and Z viewed as a matrix
let LZ = (0..m)
.map(|i| (0..m).map(|j| L[j] * Z[j * m + i]).sum())
.collect::<Vec<Scalar>>();
// compute dot product between LZ and R
DotProductProofLog::compute_dotproduct(&LZ, &R)
}
#[test]
fn check_polynomial_evaluation() {
// Z = [1, 2, 1, 4]
let Z = vec![
Scalar::one(),
(2_usize).to_scalar(),
(1_usize).to_scalar(),
(4_usize).to_scalar(),
];
// r = [4,3]
let r = vec![(4_usize).to_scalar(), (3_usize).to_scalar()];
let eval_with_LR = evaluate_with_LR(&Z, &r);
let poly = DensePolynomial::new(Z);
let eval = poly.evaluate(&r);
assert_eq!(eval, (28_usize).to_scalar());
assert_eq!(eval_with_LR, eval);
}
pub fn compute_factored_chis_at_r(r: &[Scalar]) -> (Vec<Scalar>, Vec<Scalar>) {
let mut L: Vec<Scalar> = Vec::new();
let mut R: Vec<Scalar> = Vec::new();
let ell = r.len();
assert!(ell % 2 == 0); // ensure ell is even
let n = ell.pow2();
let m = n.square_root();
// compute row vector L
for i in 0..m {
let mut chi_i = Scalar::one();
for j in 0..ell / 2 {
let bit_j = ((m * i) & (1 << (r.len() - j - 1))) > 0;
if bit_j {
chi_i *= r[j];
} else {
chi_i *= Scalar::one() - r[j];
}
}
L.push(chi_i);
}
// compute column vector R
for i in 0..m {
let mut chi_i = Scalar::one();
for j in ell / 2..ell {
let bit_j = (i & (1 << (r.len() - j - 1))) > 0;
if bit_j {
chi_i *= r[j];
} else {
chi_i *= Scalar::one() - r[j];
}
}
R.push(chi_i);
}
(L, R)
}
pub fn compute_chis_at_r(r: &[Scalar]) -> Vec<Scalar> {
let ell = r.len();
let n = ell.pow2();
let mut chis: Vec<Scalar> = Vec::new();
for i in 0..n {
let mut chi_i = Scalar::one();
for j in 0..r.len() {
let bit_j = (i & (1 << (r.len() - j - 1))) > 0;
if bit_j {
chi_i *= r[j];
} else {
chi_i *= Scalar::one() - r[j];
}
}
chis.push(chi_i);
}
chis
}
pub fn compute_outerproduct(L: Vec<Scalar>, R: Vec<Scalar>) -> Vec<Scalar> {
assert_eq!(L.len(), R.len());
(0..L.len())
.map(|i| (0..R.len()).map(|j| L[i] * R[j]).collect::<Vec<Scalar>>())
.collect::<Vec<Vec<Scalar>>>()
.into_iter()
.flatten()
.collect::<Vec<Scalar>>()
}
#[test]
fn check_memoized_chis() {
let mut csprng: OsRng = OsRng;
let s = 10;
let mut r: Vec<Scalar> = Vec::new();
for _i in 0..s {
r.push(Scalar::random(&mut csprng));
}
let chis = tests::compute_chis_at_r(&r);
let chis_m = EqPolynomial::new(r).evals();
assert_eq!(chis, chis_m);
}
#[test]
fn check_factored_chis() {
let mut csprng: OsRng = OsRng;
let s = 10;
let mut r: Vec<Scalar> = Vec::new();
for _i in 0..s {
r.push(Scalar::random(&mut csprng));
}
let chis = EqPolynomial::new(r.clone()).evals();
let (L, R) = EqPolynomial::new(r).compute_factored_evals();
let O = compute_outerproduct(L, R);
assert_eq!(chis, O);
}
#[test]
fn check_memoized_factored_chis() {
let mut csprng: OsRng = OsRng;
let s = 10;
let mut r: Vec<Scalar> = Vec::new();
for _i in 0..s {
r.push(Scalar::random(&mut csprng));
}
let (L, R) = tests::compute_factored_chis_at_r(&r);
let eq = EqPolynomial::new(r);
let (L2, R2) = eq.compute_factored_evals();
assert_eq!(L, L2);
assert_eq!(R, R2);
}
#[test]
fn check_polynomial_commit() {
let Z = vec![
(1_usize).to_scalar(),
(2_usize).to_scalar(),
(1_usize).to_scalar(),
(4_usize).to_scalar(),
];
let poly = DensePolynomial::new(Z);
// r = [4,3]
let r = vec![(4_usize).to_scalar(), (3_usize).to_scalar()];
let eval = poly.evaluate(&r);
assert_eq!(eval, (28_usize).to_scalar());
let gens = PolyCommitmentGens::new(poly.get_num_vars(), b"test-two");
let (poly_commitment, blinds) = poly.commit(&gens, None);
let mut random_tape = RandomTape::new(b"proof");
let mut prover_transcript = Transcript::new(b"example");
let (proof, C_Zr) = PolyEvalProof::prove(
&poly,
Some(&blinds),
&r,
&eval,
None,
&gens,
&mut prover_transcript,
&mut random_tape,
);
let mut verifier_transcript = Transcript::new(b"example");
assert!(proof
.verify(&gens, &mut verifier_transcript, &r, &C_Zr, &poly_commitment)
.is_ok());
}
}

View File

@@ -0,0 +1,32 @@
use core::fmt::Debug;
use thiserror::Error;
#[derive(Error, Debug)]
pub enum ProofVerifyError {
#[error("Proof verification failed")]
InternalError,
#[error("Compressed group element failed to decompress: {0:?}")]
DecompressionError([u8; 32]),
}
impl Default for ProofVerifyError {
fn default() -> Self {
ProofVerifyError::InternalError
}
}
#[derive(Clone, Debug, Eq, PartialEq)]
pub enum R1CSError {
/// returned if the number of constraints is not a power of 2
NonPowerOfTwoCons,
/// returned if the number of variables is not a power of 2
NonPowerOfTwoVars,
/// returned if a wrong number of inputs in an assignment are supplied
InvalidNumberOfInputs,
/// returned if a wrong number of variables in an assignment are supplied
InvalidNumberOfVars,
/// returned if a [u8;32] does not parse into a valid Scalar in the field of secq256k1
InvalidScalar,
/// returned if the supplied row or col in (row,col,val) tuple is out of range
InvalidIndex,
}

View File

@@ -0,0 +1,138 @@
use secq256k1::{AffinePoint, ProjectivePoint};
use super::errors::ProofVerifyError;
use super::scalar::{Scalar, ScalarBytes, ScalarBytesFromScalar};
use core::ops::{Mul, MulAssign};
use multiexp::multiexp;
pub type GroupElement = secq256k1::AffinePoint;
pub type CompressedGroup = secq256k1::EncodedPoint;
pub trait CompressedGroupExt {
type Group;
fn unpack(&self) -> Result<Self::Group, ProofVerifyError>;
}
impl CompressedGroupExt for CompressedGroup {
type Group = secq256k1::AffinePoint;
fn unpack(&self) -> Result<Self::Group, ProofVerifyError> {
let result = AffinePoint::decompress(*self);
if result.is_some().into() {
return Ok(result.unwrap());
} else {
Err(ProofVerifyError::DecompressionError(
(*self.to_bytes()).try_into().unwrap(),
))
}
}
}
pub trait DecompressEncodedPoint {
fn decompress(&self) -> Option<GroupElement>;
}
impl DecompressEncodedPoint for CompressedGroup {
fn decompress(&self) -> Option<GroupElement> {
Some(self.unpack().unwrap())
}
}
impl<'b> MulAssign<&'b Scalar> for GroupElement {
fn mul_assign(&mut self, scalar: &'b Scalar) {
let result = (self as &GroupElement) * Scalar::decompress_scalar(scalar);
*self = result;
}
}
impl<'a, 'b> Mul<&'b Scalar> for &'a GroupElement {
type Output = GroupElement;
fn mul(self, scalar: &'b Scalar) -> GroupElement {
*self * Scalar::decompress_scalar(scalar)
}
}
impl<'a, 'b> Mul<&'b GroupElement> for &'a Scalar {
type Output = GroupElement;
fn mul(self, point: &'b GroupElement) -> GroupElement {
(*point * Scalar::decompress_scalar(self)).into()
}
}
macro_rules! define_mul_variants {
(LHS = $lhs:ty, RHS = $rhs:ty, Output = $out:ty) => {
impl<'b> Mul<&'b $rhs> for $lhs {
type Output = $out;
fn mul(self, rhs: &'b $rhs) -> $out {
&self * rhs
}
}
impl<'a> Mul<$rhs> for &'a $lhs {
type Output = $out;
fn mul(self, rhs: $rhs) -> $out {
self * &rhs
}
}
impl Mul<$rhs> for $lhs {
type Output = $out;
fn mul(self, rhs: $rhs) -> $out {
&self * &rhs
}
}
};
}
macro_rules! define_mul_assign_variants {
(LHS = $lhs:ty, RHS = $rhs:ty) => {
impl MulAssign<$rhs> for $lhs {
fn mul_assign(&mut self, rhs: $rhs) {
*self *= &rhs;
}
}
};
}
define_mul_assign_variants!(LHS = GroupElement, RHS = Scalar);
define_mul_variants!(LHS = GroupElement, RHS = Scalar, Output = GroupElement);
define_mul_variants!(LHS = Scalar, RHS = GroupElement, Output = GroupElement);
pub trait VartimeMultiscalarMul {
type Scalar;
fn vartime_multiscalar_mul(scalars: Vec<Scalar>, points: Vec<GroupElement>) -> Self;
}
impl VartimeMultiscalarMul for GroupElement {
type Scalar = super::scalar::Scalar;
// TODO Borrow the arguments so we don't have to clone them, as it was in the original implementation
fn vartime_multiscalar_mul(scalars: Vec<Scalar>, points: Vec<GroupElement>) -> Self {
let points: Vec<ProjectivePoint> = points.iter().map(|p| ProjectivePoint::from(p.0)).collect();
let pairs: Vec<(ScalarBytes, ProjectivePoint)> = scalars
.into_iter()
.enumerate()
.map(|(i, s)| (Scalar::decompress_scalar(&s), points[i]))
.collect();
let result = multiexp::<ProjectivePoint>(pairs.as_slice());
AffinePoint(result.to_affine())
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn msm() {
let scalars = vec![Scalar::from(1), Scalar::from(2), Scalar::from(3)];
let points = vec![
GroupElement::generator(),
GroupElement::generator(),
GroupElement::generator(),
];
let result = GroupElement::vartime_multiscalar_mul(scalars, points);
assert_eq!(result, GroupElement::generator() * Scalar::from(6));
}
}

View File

@@ -0,0 +1,751 @@
#![allow(non_snake_case)]
#![doc = include_str!("../README.md")]
#![deny(missing_docs)]
#![allow(clippy::assertions_on_result_states)]
extern crate byteorder;
extern crate core;
extern crate digest;
extern crate merlin;
extern crate rand;
extern crate sha3;
#[cfg(feature = "multicore")]
extern crate rayon;
mod commitments;
mod dense_mlpoly;
mod errors;
mod group;
mod math;
mod nizk;
mod product_tree;
mod r1csinstance;
mod r1csproof;
mod random;
mod scalar;
mod sparse_mlpoly;
mod sumcheck;
mod timer;
mod transcript;
mod unipoly;
use core::cmp::max;
use errors::{ProofVerifyError, R1CSError};
use merlin::Transcript;
use r1csinstance::{
R1CSCommitment, R1CSCommitmentGens, R1CSDecommitment, R1CSEvalProof, R1CSInstance,
};
use r1csproof::{R1CSGens, R1CSProof};
use random::RandomTape;
use scalar::Scalar;
use serde::{Deserialize, Serialize};
use timer::Timer;
use transcript::{AppendToTranscript, ProofTranscript};
/// `ComputationCommitment` holds a public preprocessed NP statement (e.g., R1CS)
pub struct ComputationCommitment {
comm: R1CSCommitment,
}
/// `ComputationDecommitment` holds information to decommit `ComputationCommitment`
pub struct ComputationDecommitment {
decomm: R1CSDecommitment,
}
/// `Assignment` holds an assignment of values to either the inputs or variables in an `Instance`
#[derive(Serialize, Deserialize, Clone)]
pub struct Assignment {
assignment: Vec<Scalar>,
}
impl Assignment {
/// Constructs a new `Assignment` from a vector
pub fn new(assignment: &[[u8; 32]]) -> Result<Assignment, R1CSError> {
let bytes_to_scalar = |vec: &[[u8; 32]]| -> Result<Vec<Scalar>, R1CSError> {
let mut vec_scalar: Vec<Scalar> = Vec::new();
for v in vec {
let val = Scalar::from_bytes(v);
if val.is_some().unwrap_u8() == 1 {
vec_scalar.push(val.unwrap());
} else {
return Err(R1CSError::InvalidScalar);
}
}
Ok(vec_scalar)
};
let assignment_scalar = bytes_to_scalar(assignment);
// check for any parsing errors
if assignment_scalar.is_err() {
return Err(R1CSError::InvalidScalar);
}
Ok(Assignment {
assignment: assignment_scalar.unwrap(),
})
}
/// pads Assignment to the specified length
fn pad(&self, len: usize) -> VarsAssignment {
// check that the new length is higher than current length
assert!(len > self.assignment.len());
let padded_assignment = {
let mut padded_assignment = self.assignment.clone();
padded_assignment.extend(vec![Scalar::zero(); len - self.assignment.len()]);
padded_assignment
};
VarsAssignment {
assignment: padded_assignment,
}
}
}
/// `VarsAssignment` holds an assignment of values to variables in an `Instance`
pub type VarsAssignment = Assignment;
/// `InputsAssignment` holds an assignment of values to variables in an `Instance`
pub type InputsAssignment = Assignment;
/// `Instance` holds the description of R1CS matrices and a hash of the matrices
#[derive(Serialize, Deserialize)]
pub struct Instance {
/// R1CS instance
pub inst: R1CSInstance,
digest: Vec<u8>,
}
impl Instance {
/// Constructs a new `Instance` and an associated satisfying assignment
pub fn new(
num_cons: usize,
num_vars: usize,
num_inputs: usize,
A: &[(usize, usize, [u8; 32])],
B: &[(usize, usize, [u8; 32])],
C: &[(usize, usize, [u8; 32])],
) -> Result<Instance, R1CSError> {
let (num_vars_padded, num_cons_padded) = {
let num_vars_padded = {
let mut num_vars_padded = num_vars;
// ensure that num_inputs + 1 <= num_vars
num_vars_padded = max(num_vars_padded, num_inputs + 1);
// ensure that num_vars_padded a power of two
if num_vars_padded.next_power_of_two() != num_vars_padded {
num_vars_padded = num_vars_padded.next_power_of_two();
}
num_vars_padded
};
let num_cons_padded = {
let mut num_cons_padded = num_cons;
// ensure that num_cons_padded is at least 2
if num_cons_padded == 0 || num_cons_padded == 1 {
num_cons_padded = 2;
}
// ensure that num_cons_padded is power of 2
if num_cons.next_power_of_two() != num_cons {
num_cons_padded = num_cons.next_power_of_two();
}
num_cons_padded
};
(num_vars_padded, num_cons_padded)
};
let bytes_to_scalar =
|tups: &[(usize, usize, [u8; 32])]| -> Result<Vec<(usize, usize, Scalar)>, R1CSError> {
let mut mat: Vec<(usize, usize, Scalar)> = Vec::new();
for &(row, col, val_bytes) in tups {
// row must be smaller than num_cons
if row >= num_cons {
return Err(R1CSError::InvalidIndex);
}
// col must be smaller than num_vars + 1 + num_inputs
if col >= num_vars + 1 + num_inputs {
return Err(R1CSError::InvalidIndex);
}
let val = Scalar::from_bytes(&val_bytes);
if val.is_some().unwrap_u8() == 1 {
// if col >= num_vars, it means that it is referencing a 1 or input in the satisfying
// assignment
if col >= num_vars {
mat.push((row, col + num_vars_padded - num_vars, val.unwrap()));
} else {
mat.push((row, col, val.unwrap()));
}
} else {
return Err(R1CSError::InvalidScalar);
}
}
// pad with additional constraints up until num_cons_padded if the original constraints were 0 or 1
// we do not need to pad otherwise because the dummy constraints are implicit in the sum-check protocol
if num_cons == 0 || num_cons == 1 {
for i in tups.len()..num_cons_padded {
mat.push((i, num_vars, Scalar::zero()));
}
}
Ok(mat)
};
let A_scalar = bytes_to_scalar(A);
if A_scalar.is_err() {
return Err(A_scalar.err().unwrap());
}
let B_scalar = bytes_to_scalar(B);
if B_scalar.is_err() {
return Err(B_scalar.err().unwrap());
}
let C_scalar = bytes_to_scalar(C);
if C_scalar.is_err() {
return Err(C_scalar.err().unwrap());
}
let inst = R1CSInstance::new(
num_cons_padded,
num_vars_padded,
num_inputs,
&A_scalar.unwrap(),
&B_scalar.unwrap(),
&C_scalar.unwrap(),
);
let digest = inst.get_digest();
Ok(Instance { inst, digest })
}
/// Checks if a given R1CSInstance is satisfiable with a given variables and inputs assignments
pub fn is_sat(
&self,
vars: &VarsAssignment,
inputs: &InputsAssignment,
) -> Result<bool, R1CSError> {
if vars.assignment.len() > self.inst.get_num_vars() {
return Err(R1CSError::InvalidNumberOfInputs);
}
if inputs.assignment.len() != self.inst.get_num_inputs() {
return Err(R1CSError::InvalidNumberOfInputs);
}
// we might need to pad variables
let padded_vars = {
let num_padded_vars = self.inst.get_num_vars();
let num_vars = vars.assignment.len();
if num_padded_vars > num_vars {
vars.pad(num_padded_vars)
} else {
vars.clone()
}
};
Ok(
self
.inst
.is_sat(&padded_vars.assignment, &inputs.assignment),
)
}
/// Constructs a new synthetic R1CS `Instance` and an associated satisfying assignment
pub fn produce_synthetic_r1cs(
num_cons: usize,
num_vars: usize,
num_inputs: usize,
) -> (Instance, VarsAssignment, InputsAssignment) {
let (inst, vars, inputs) = R1CSInstance::produce_synthetic_r1cs(num_cons, num_vars, num_inputs);
let digest = inst.get_digest();
(
Instance { inst, digest },
VarsAssignment { assignment: vars },
InputsAssignment { assignment: inputs },
)
}
}
/// `SNARKGens` holds public parameters for producing and verifying proofs with the Spartan SNARK
pub struct SNARKGens {
gens_r1cs_sat: R1CSGens,
gens_r1cs_eval: R1CSCommitmentGens,
}
impl SNARKGens {
/// Constructs a new `SNARKGens` given the size of the R1CS statement
/// `num_nz_entries` specifies the maximum number of non-zero entries in any of the three R1CS matrices
pub fn new(num_cons: usize, num_vars: usize, num_inputs: usize, num_nz_entries: usize) -> Self {
let num_vars_padded = {
let mut num_vars_padded = max(num_vars, num_inputs + 1);
if num_vars_padded != num_vars_padded.next_power_of_two() {
num_vars_padded = num_vars_padded.next_power_of_two();
}
num_vars_padded
};
let gens_r1cs_sat = R1CSGens::new(b"gens_r1cs_sat", num_cons, num_vars_padded);
let gens_r1cs_eval = R1CSCommitmentGens::new(
b"gens_r1cs_eval",
num_cons,
num_vars_padded,
num_inputs,
num_nz_entries,
);
SNARKGens {
gens_r1cs_sat,
gens_r1cs_eval,
}
}
}
/// `SNARK` holds a proof produced by Spartan SNARK
#[derive(Serialize, Deserialize, Debug)]
pub struct SNARK {
r1cs_sat_proof: R1CSProof,
inst_evals: (Scalar, Scalar, Scalar),
r1cs_eval_proof: R1CSEvalProof,
}
impl SNARK {
fn protocol_name() -> &'static [u8] {
b"Spartan SNARK proof"
}
/// A public computation to create a commitment to an R1CS instance
pub fn encode(
inst: &Instance,
gens: &SNARKGens,
) -> (ComputationCommitment, ComputationDecommitment) {
let timer_encode = Timer::new("SNARK::encode");
let (comm, decomm) = inst.inst.commit(&gens.gens_r1cs_eval);
timer_encode.stop();
(
ComputationCommitment { comm },
ComputationDecommitment { decomm },
)
}
/// A method to produce a SNARK proof of the satisfiability of an R1CS instance
pub fn prove(
inst: &Instance,
comm: &ComputationCommitment,
decomm: &ComputationDecommitment,
vars: VarsAssignment,
inputs: &InputsAssignment,
gens: &SNARKGens,
transcript: &mut Transcript,
) -> Self {
let timer_prove = Timer::new("SNARK::prove");
// we create a Transcript object seeded with a random Scalar
// to aid the prover produce its randomness
let mut random_tape = RandomTape::new(b"proof");
transcript.append_protocol_name(SNARK::protocol_name());
comm.comm.append_to_transcript(b"comm", transcript);
let (r1cs_sat_proof, rx, ry) = {
let (proof, rx, ry) = {
// we might need to pad variables
let padded_vars = {
let num_padded_vars = inst.inst.get_num_vars();
let num_vars = vars.assignment.len();
if num_padded_vars > num_vars {
vars.pad(num_padded_vars)
} else {
vars
}
};
R1CSProof::prove(
&inst.inst,
padded_vars.assignment,
&inputs.assignment,
&gens.gens_r1cs_sat,
transcript,
&mut random_tape,
)
};
let proof_encoded: Vec<u8> = bincode::serialize(&proof).unwrap();
Timer::print(&format!("len_r1cs_sat_proof {:?}", proof_encoded.len()));
(proof, rx, ry)
};
// We send evaluations of A, B, C at r = (rx, ry) as claims
// to enable the verifier complete the first sum-check
let timer_eval = Timer::new("eval_sparse_polys");
let inst_evals = {
let (Ar, Br, Cr) = inst.inst.evaluate(&rx, &ry);
Ar.append_to_transcript(b"Ar_claim", transcript);
Br.append_to_transcript(b"Br_claim", transcript);
Cr.append_to_transcript(b"Cr_claim", transcript);
(Ar, Br, Cr)
};
timer_eval.stop();
let r1cs_eval_proof = {
let proof = R1CSEvalProof::prove(
&decomm.decomm,
&rx,
&ry,
&inst_evals,
&gens.gens_r1cs_eval,
transcript,
&mut random_tape,
);
let proof_encoded: Vec<u8> = bincode::serialize(&proof).unwrap();
Timer::print(&format!("len_r1cs_eval_proof {:?}", proof_encoded.len()));
proof
};
timer_prove.stop();
SNARK {
r1cs_sat_proof,
inst_evals,
r1cs_eval_proof,
}
}
/// A method to verify the SNARK proof of the satisfiability of an R1CS instance
pub fn verify(
&self,
comm: &ComputationCommitment,
input: &InputsAssignment,
transcript: &mut Transcript,
gens: &SNARKGens,
) -> Result<(), ProofVerifyError> {
let timer_verify = Timer::new("SNARK::verify");
transcript.append_protocol_name(SNARK::protocol_name());
// append a commitment to the computation to the transcript
comm.comm.append_to_transcript(b"comm", transcript);
let timer_sat_proof = Timer::new("verify_sat_proof");
assert_eq!(input.assignment.len(), comm.comm.get_num_inputs());
let (rx, ry) = self.r1cs_sat_proof.verify(
comm.comm.get_num_vars(),
comm.comm.get_num_cons(),
&input.assignment,
&self.inst_evals,
transcript,
&gens.gens_r1cs_sat,
)?;
timer_sat_proof.stop();
let timer_eval_proof = Timer::new("verify_eval_proof");
let (Ar, Br, Cr) = &self.inst_evals;
Ar.append_to_transcript(b"Ar_claim", transcript);
Br.append_to_transcript(b"Br_claim", transcript);
Cr.append_to_transcript(b"Cr_claim", transcript);
self.r1cs_eval_proof.verify(
&comm.comm,
&rx,
&ry,
&self.inst_evals,
&gens.gens_r1cs_eval,
transcript,
)?;
timer_eval_proof.stop();
timer_verify.stop();
Ok(())
}
}
/// `NIZKGens` holds public parameters for producing and verifying proofs with the Spartan NIZK
pub struct NIZKGens {
gens_r1cs_sat: R1CSGens,
}
impl NIZKGens {
/// Constructs a new `NIZKGens` given the size of the R1CS statement
pub fn new(num_cons: usize, num_vars: usize, num_inputs: usize) -> Self {
let num_vars_padded = {
let mut num_vars_padded = max(num_vars, num_inputs + 1);
if num_vars_padded != num_vars_padded.next_power_of_two() {
num_vars_padded = num_vars_padded.next_power_of_two();
}
num_vars_padded
};
let gens_r1cs_sat = R1CSGens::new(b"gens_r1cs_sat", num_cons, num_vars_padded);
NIZKGens { gens_r1cs_sat }
}
}
/// `NIZK` holds a proof produced by Spartan NIZK
#[derive(Serialize, Deserialize, Debug)]
pub struct NIZK {
r1cs_sat_proof: R1CSProof,
r: (Vec<Scalar>, Vec<Scalar>),
}
impl NIZK {
fn protocol_name() -> &'static [u8] {
b"Spartan NIZK proof"
}
/// A method to produce a NIZK proof of the satisfiability of an R1CS instance
pub fn prove(
inst: &Instance,
vars: VarsAssignment,
input: &InputsAssignment,
gens: &NIZKGens,
transcript: &mut Transcript,
) -> Self {
let timer_prove = Timer::new("NIZK::prove");
// we create a Transcript object seeded with a random Scalar
// to aid the prover produce its randomness
let mut random_tape = RandomTape::new(b"proof");
transcript.append_protocol_name(NIZK::protocol_name());
transcript.append_message(b"R1CSInstanceDigest", &inst.digest);
let (r1cs_sat_proof, rx, ry) = {
// we might need to pad variables
let padded_vars = {
let num_padded_vars = inst.inst.get_num_vars();
let num_vars = vars.assignment.len();
if num_padded_vars > num_vars {
vars.pad(num_padded_vars)
} else {
vars
}
};
let (proof, rx, ry) = R1CSProof::prove(
&inst.inst,
padded_vars.assignment,
&input.assignment,
&gens.gens_r1cs_sat,
transcript,
&mut random_tape,
);
let proof_encoded: Vec<u8> = bincode::serialize(&proof).unwrap();
Timer::print(&format!("len_r1cs_sat_proof {:?}", proof_encoded.len()));
(proof, rx, ry)
};
timer_prove.stop();
NIZK {
r1cs_sat_proof,
r: (rx, ry),
}
}
/// A method to verify a NIZK proof of the satisfiability of an R1CS instance
pub fn verify(
&self,
inst: &Instance,
input: &InputsAssignment,
transcript: &mut Transcript,
gens: &NIZKGens,
) -> Result<(), ProofVerifyError> {
let timer_verify = Timer::new("NIZK::verify");
transcript.append_protocol_name(NIZK::protocol_name());
transcript.append_message(b"R1CSInstanceDigest", &inst.digest);
// We send evaluations of A, B, C at r = (rx, ry) as claims
// to enable the verifier complete the first sum-check
let timer_eval = Timer::new("eval_sparse_polys");
let (claimed_rx, claimed_ry) = &self.r;
let inst_evals = inst.inst.evaluate(claimed_rx, claimed_ry);
timer_eval.stop();
let timer_sat_proof = Timer::new("verify_sat_proof");
assert_eq!(input.assignment.len(), inst.inst.get_num_inputs());
let (rx, ry) = self.r1cs_sat_proof.verify(
inst.inst.get_num_vars(),
inst.inst.get_num_cons(),
&input.assignment,
&inst_evals,
transcript,
&gens.gens_r1cs_sat,
)?;
// verify if claimed rx and ry are correct
assert_eq!(rx, *claimed_rx);
assert_eq!(ry, *claimed_ry);
timer_sat_proof.stop();
timer_verify.stop();
Ok(())
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
pub fn check_snark() {
let num_vars = 256;
let num_cons = num_vars;
let num_inputs = 10;
// produce public generators
let gens = SNARKGens::new(num_cons, num_vars, num_inputs, num_cons);
// produce a synthetic R1CSInstance
let (inst, vars, inputs) = Instance::produce_synthetic_r1cs(num_cons, num_vars, num_inputs);
// create a commitment to R1CSInstance
let (comm, decomm) = SNARK::encode(&inst, &gens);
// produce a proof
let mut prover_transcript = Transcript::new(b"example");
let proof = SNARK::prove(
&inst,
&comm,
&decomm,
vars,
&inputs,
&gens,
&mut prover_transcript,
);
// verify the proof
let mut verifier_transcript = Transcript::new(b"example");
assert!(proof
.verify(&comm, &inputs, &mut verifier_transcript, &gens)
.is_ok());
}
#[test]
pub fn check_r1cs_invalid_index() {
let num_cons = 4;
let num_vars = 8;
let num_inputs = 1;
let zero: [u8; 32] = [
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0,
];
let A = vec![(0, 0, zero)];
let B = vec![(100, 1, zero)];
let C = vec![(1, 1, zero)];
let inst = Instance::new(num_cons, num_vars, num_inputs, &A, &B, &C);
assert!(inst.is_err());
assert_eq!(inst.err(), Some(R1CSError::InvalidIndex));
}
#[test]
pub fn check_r1cs_invalid_scalar() {
let num_cons = 4;
let num_vars = 8;
let num_inputs = 1;
let zero: [u8; 32] = [
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0,
];
let larger_than_mod = [255; 32];
let A = vec![(0, 0, zero)];
let B = vec![(1, 1, larger_than_mod)];
let C = vec![(1, 1, zero)];
let inst = Instance::new(num_cons, num_vars, num_inputs, &A, &B, &C);
assert!(inst.is_err());
assert_eq!(inst.err(), Some(R1CSError::InvalidScalar));
}
#[test]
fn test_padded_constraints() {
// parameters of the R1CS instance
let num_cons = 1;
let num_vars = 0;
let num_inputs = 3;
let num_non_zero_entries = 3;
// We will encode the above constraints into three matrices, where
// the coefficients in the matrix are in the little-endian byte order
let mut A: Vec<(usize, usize, [u8; 32])> = Vec::new();
let mut B: Vec<(usize, usize, [u8; 32])> = Vec::new();
let mut C: Vec<(usize, usize, [u8; 32])> = Vec::new();
// Create a^2 + b + 13
A.push((0, num_vars + 2, Scalar::one().to_bytes())); // 1*a
B.push((0, num_vars + 2, Scalar::one().to_bytes())); // 1*a
C.push((0, num_vars + 1, Scalar::one().to_bytes())); // 1*z
C.push((0, num_vars, (-Scalar::from(13u64)).to_bytes())); // -13*1
C.push((0, num_vars + 3, (-Scalar::one()).to_bytes())); // -1*b
// Var Assignments (Z_0 = 16 is the only output)
let vars = vec![Scalar::zero().to_bytes(); num_vars];
// create an InputsAssignment (a = 1, b = 2)
let mut inputs = vec![Scalar::zero().to_bytes(); num_inputs];
inputs[0] = Scalar::from(16u64).to_bytes();
inputs[1] = Scalar::from(1u64).to_bytes();
inputs[2] = Scalar::from(2u64).to_bytes();
let assignment_inputs = InputsAssignment::new(&inputs).unwrap();
let assignment_vars = VarsAssignment::new(&vars).unwrap();
// Check if instance is satisfiable
let inst = Instance::new(num_cons, num_vars, num_inputs, &A, &B, &C).unwrap();
let res = inst.is_sat(&assignment_vars, &assignment_inputs);
assert!(res.unwrap(), "should be satisfied");
// SNARK public params
let gens = SNARKGens::new(num_cons, num_vars, num_inputs, num_non_zero_entries);
// create a commitment to the R1CS instance
let (comm, decomm) = SNARK::encode(&inst, &gens);
// produce a SNARK
let mut prover_transcript = Transcript::new(b"snark_example");
let proof = SNARK::prove(
&inst,
&comm,
&decomm,
assignment_vars.clone(),
&assignment_inputs,
&gens,
&mut prover_transcript,
);
// verify the SNARK
let mut verifier_transcript = Transcript::new(b"snark_example");
assert!(proof
.verify(&comm, &assignment_inputs, &mut verifier_transcript, &gens)
.is_ok());
// NIZK public params
let gens = NIZKGens::new(num_cons, num_vars, num_inputs);
// produce a NIZK
let mut prover_transcript = Transcript::new(b"nizk_example");
let proof = NIZK::prove(
&inst,
assignment_vars,
&assignment_inputs,
&gens,
&mut prover_transcript,
);
// verify the NIZK
let mut verifier_transcript = Transcript::new(b"nizk_example");
assert!(proof
.verify(&inst, &assignment_inputs, &mut verifier_transcript, &gens)
.is_ok());
}
}

View File

@@ -0,0 +1,36 @@
pub trait Math {
fn square_root(self) -> usize;
fn pow2(self) -> usize;
fn get_bits(self, num_bits: usize) -> Vec<bool>;
fn log_2(self) -> usize;
}
impl Math for usize {
#[inline]
fn square_root(self) -> usize {
(self as f64).sqrt() as usize
}
#[inline]
fn pow2(self) -> usize {
let base: usize = 2;
base.pow(self as u32)
}
/// Returns the num_bits from n in a canonical order
fn get_bits(self, num_bits: usize) -> Vec<bool> {
(0..num_bits)
.map(|shift_amount| ((self & (1 << (num_bits - shift_amount - 1))) > 0))
.collect::<Vec<bool>>()
}
fn log_2(self) -> usize {
assert_ne!(self, 0);
if self.is_power_of_two() {
(1usize.leading_zeros() - self.leading_zeros()) as usize
} else {
(0usize.leading_zeros() - self.leading_zeros()) as usize
}
}
}

View File

@@ -0,0 +1,267 @@
//! This module is an adaptation of code from the bulletproofs crate.
//! See NOTICE.md for more details
#![allow(non_snake_case)]
#![allow(clippy::type_complexity)]
#![allow(clippy::too_many_arguments)]
use super::super::errors::ProofVerifyError;
use super::super::group::{CompressedGroup, GroupElement, VartimeMultiscalarMul};
use super::super::math::Math;
use super::super::scalar::Scalar;
use super::super::transcript::ProofTranscript;
use crate::group::DecompressEncodedPoint;
use core::iter;
use merlin::Transcript;
use serde::{Deserialize, Serialize};
#[derive(Debug, Serialize, Deserialize)]
pub struct BulletReductionProof {
L_vec: Vec<CompressedGroup>,
R_vec: Vec<CompressedGroup>,
}
impl BulletReductionProof {
/// Create an inner-product proof.
///
/// The proof is created with respect to the bases \\(G\\).
///
/// The `transcript` is passed in as a parameter so that the
/// challenges depend on the *entire* transcript (including parent
/// protocols).
///
/// The lengths of the vectors must all be the same, and must all be
/// either 0 or a power of 2.
pub fn prove(
transcript: &mut Transcript,
Q: &GroupElement,
G_vec: &[GroupElement],
H: &GroupElement,
a_vec: &[Scalar],
b_vec: &[Scalar],
blind: &Scalar,
blinds_vec: &[(Scalar, Scalar)],
) -> (
BulletReductionProof,
GroupElement,
Scalar,
Scalar,
GroupElement,
Scalar,
) {
// Create slices G, H, a, b backed by their respective
// vectors. This lets us reslice as we compress the lengths
// of the vectors in the main loop below.
let mut G = &mut G_vec.to_owned()[..];
let mut a = &mut a_vec.to_owned()[..];
let mut b = &mut b_vec.to_owned()[..];
// All of the input vectors must have a length that is a power of two.
let mut n = G.len();
assert!(n.is_power_of_two());
let lg_n = n.log_2();
// All of the input vectors must have the same length.
assert_eq!(G.len(), n);
assert_eq!(a.len(), n);
assert_eq!(b.len(), n);
assert_eq!(blinds_vec.len(), 2 * lg_n);
let mut L_vec = Vec::with_capacity(lg_n);
let mut R_vec = Vec::with_capacity(lg_n);
let mut blinds_iter = blinds_vec.iter();
let mut blind_fin = *blind;
while n != 1 {
n /= 2;
let (a_L, a_R) = a.split_at_mut(n);
let (b_L, b_R) = b.split_at_mut(n);
let (G_L, G_R) = G.split_at_mut(n);
let c_L = inner_product(a_L, b_R);
let c_R = inner_product(a_R, b_L);
let (blind_L, blind_R) = blinds_iter.next().unwrap();
let L = GroupElement::vartime_multiscalar_mul(
a_L
.iter()
.chain(iter::once(&c_L))
.chain(iter::once(blind_L))
.map(|s| *s)
.collect(),
G_R
.iter()
.chain(iter::once(Q))
.chain(iter::once(H))
.map(|s| *s)
.collect(),
);
let R = GroupElement::vartime_multiscalar_mul(
a_R
.iter()
.chain(iter::once(&c_R))
.chain(iter::once(blind_R))
.map(|s| *s)
.collect(),
G_L
.iter()
.chain(iter::once(Q))
.chain(iter::once(H))
.map(|s| *s)
.collect(),
);
transcript.append_point(b"L", &L.compress());
transcript.append_point(b"R", &R.compress());
let u = transcript.challenge_scalar(b"u");
let u_inv = u.invert().unwrap();
for i in 0..n {
a_L[i] = a_L[i] * u + u_inv * a_R[i];
b_L[i] = b_L[i] * u_inv + u * b_R[i];
G_L[i] =
GroupElement::vartime_multiscalar_mul([u_inv, u].to_vec(), [G_L[i], G_R[i]].to_vec());
}
blind_fin = blind_fin + blind_L * u * u + blind_R * u_inv * u_inv;
L_vec.push(L.compress());
R_vec.push(R.compress());
a = a_L;
b = b_L;
G = G_L;
}
let Gamma_hat = GroupElement::vartime_multiscalar_mul(
[a[0], a[0] * b[0], blind_fin].to_vec(),
[G[0], *Q, *H].to_vec(),
);
(
BulletReductionProof { L_vec, R_vec },
Gamma_hat,
a[0],
b[0],
G[0],
blind_fin,
)
}
/// Computes three vectors of verification scalars \\([u\_{i}^{2}]\\), \\([u\_{i}^{-2}]\\) and \\([s\_{i}]\\) for combined multiscalar multiplication
/// in a parent protocol. See [inner product protocol notes](index.html#verification-equation) for details.
/// The verifier must provide the input length \\(n\\) explicitly to avoid unbounded allocation within the inner product proof.
fn verification_scalars(
&self,
n: usize,
transcript: &mut Transcript,
) -> Result<(Vec<Scalar>, Vec<Scalar>, Vec<Scalar>), ProofVerifyError> {
let lg_n = self.L_vec.len();
if lg_n >= 32 {
// 4 billion multiplications should be enough for anyone
// and this check prevents overflow in 1<<lg_n below.
return Err(ProofVerifyError::InternalError);
}
if n != (1 << lg_n) {
return Err(ProofVerifyError::InternalError);
}
// 1. Recompute x_k,...,x_1 based on the proof transcript
let mut challenges = Vec::with_capacity(lg_n);
for (L, R) in self.L_vec.iter().zip(self.R_vec.iter()) {
transcript.append_point(b"L", L);
transcript.append_point(b"R", R);
challenges.push(transcript.challenge_scalar(b"u"));
}
// 2. Compute 1/(u_k...u_1) and 1/u_k, ..., 1/u_1
let mut challenges_inv = challenges.clone();
let allinv = Scalar::batch_invert(&mut challenges_inv);
// 3. Compute u_i^2 and (1/u_i)^2
for i in 0..lg_n {
challenges[i] = challenges[i].square();
challenges_inv[i] = challenges_inv[i].square();
}
let challenges_sq = challenges;
let challenges_inv_sq = challenges_inv;
// 4. Compute s values inductively.
let mut s = Vec::with_capacity(n);
s.push(allinv);
for i in 1..n {
let lg_i = (32 - 1 - (i as u32).leading_zeros()) as usize;
let k = 1 << lg_i;
// The challenges are stored in "creation order" as [u_k,...,u_1],
// so u_{lg(i)+1} = is indexed by (lg_n-1) - lg_i
let u_lg_i_sq = challenges_sq[(lg_n - 1) - lg_i];
s.push(s[i - k] * u_lg_i_sq);
}
Ok((challenges_sq, challenges_inv_sq, s))
}
/// This method is for testing that proof generation work,
/// but for efficiency the actual protocols would use `verification_scalars`
/// method to combine inner product verification with other checks
/// in a single multiscalar multiplication.
pub fn verify(
&self,
n: usize,
a: &[Scalar],
transcript: &mut Transcript,
Gamma: &GroupElement,
G: &[GroupElement],
) -> Result<(GroupElement, GroupElement, Scalar), ProofVerifyError> {
let (u_sq, u_inv_sq, s) = self.verification_scalars(n, transcript)?;
let Ls = self
.L_vec
.iter()
.map(|p| p.decompress().ok_or(ProofVerifyError::InternalError))
.collect::<Result<Vec<_>, _>>()?;
let Rs = self
.R_vec
.iter()
.map(|p| p.decompress().ok_or(ProofVerifyError::InternalError))
.collect::<Result<Vec<_>, _>>()?;
let G_hat = GroupElement::vartime_multiscalar_mul(s.clone(), G.to_vec());
let a_hat = inner_product(a, &s);
let Gamma_hat = GroupElement::vartime_multiscalar_mul(
u_sq
.iter()
.chain(u_inv_sq.iter())
.chain(iter::once(&Scalar::one()))
.map(|s| *s)
.collect(),
Ls.iter()
.chain(Rs.iter())
.chain(iter::once(Gamma))
.map(|p| *p)
.collect(),
);
Ok((G_hat, Gamma_hat, a_hat))
}
}
/// Computes an inner product of two vectors
/// \\[
/// {\langle {\mathbf{a}}, {\mathbf{b}} \rangle} = \sum\_{i=0}^{n-1} a\_i \cdot b\_i.
/// \\]
/// Panics if the lengths of \\(\mathbf{a}\\) and \\(\mathbf{b}\\) are not equal.
pub fn inner_product(a: &[Scalar], b: &[Scalar]) -> Scalar {
assert!(
a.len() == b.len(),
"inner_product(a,b): lengths of vectors do not match"
);
let mut out = Scalar::zero();
for i in 0..a.len() {
out += a[i] * b[i];
}
out
}

View File

@@ -0,0 +1,735 @@
#![allow(clippy::too_many_arguments)]
use super::commitments::{Commitments, MultiCommitGens};
use super::errors::ProofVerifyError;
use super::group::{CompressedGroup, CompressedGroupExt};
use super::math::Math;
use super::random::RandomTape;
use super::scalar::Scalar;
use super::transcript::{AppendToTranscript, ProofTranscript};
use crate::group::DecompressEncodedPoint;
use merlin::Transcript;
use serde::{Deserialize, Serialize};
mod bullet;
use bullet::BulletReductionProof;
#[derive(Serialize, Deserialize, Debug)]
pub struct KnowledgeProof {
alpha: CompressedGroup,
z1: Scalar,
z2: Scalar,
}
impl KnowledgeProof {
fn protocol_name() -> &'static [u8] {
b"knowledge proof"
}
pub fn prove(
gens_n: &MultiCommitGens,
transcript: &mut Transcript,
random_tape: &mut RandomTape,
x: &Scalar,
r: &Scalar,
) -> (KnowledgeProof, CompressedGroup) {
transcript.append_protocol_name(KnowledgeProof::protocol_name());
// produce two random Scalars
let t1 = random_tape.random_scalar(b"t1");
let t2 = random_tape.random_scalar(b"t2");
let C = x.commit(r, gens_n).compress();
C.append_to_transcript(b"C", transcript);
let alpha = t1.commit(&t2, gens_n).compress();
alpha.append_to_transcript(b"alpha", transcript);
let c = transcript.challenge_scalar(b"c");
let z1 = x * c + t1;
let z2 = r * c + t2;
(KnowledgeProof { alpha, z1, z2 }, C)
}
pub fn verify(
&self,
gens_n: &MultiCommitGens,
transcript: &mut Transcript,
C: &CompressedGroup,
) -> Result<(), ProofVerifyError> {
transcript.append_protocol_name(KnowledgeProof::protocol_name());
C.append_to_transcript(b"C", transcript);
self.alpha.append_to_transcript(b"alpha", transcript);
let c = transcript.challenge_scalar(b"c");
let lhs = self.z1.commit(&self.z2, gens_n).compress();
let rhs = (c * C.unpack()? + self.alpha.unpack()?).compress();
if lhs == rhs {
Ok(())
} else {
Err(ProofVerifyError::InternalError)
}
}
}
#[derive(Serialize, Deserialize, Debug)]
pub struct EqualityProof {
alpha: CompressedGroup,
z: Scalar,
}
impl EqualityProof {
fn protocol_name() -> &'static [u8] {
b"equality proof"
}
pub fn prove(
gens_n: &MultiCommitGens,
transcript: &mut Transcript,
random_tape: &mut RandomTape,
v1: &Scalar,
s1: &Scalar,
v2: &Scalar,
s2: &Scalar,
) -> (EqualityProof, CompressedGroup, CompressedGroup) {
transcript.append_protocol_name(EqualityProof::protocol_name());
// produce a random Scalar
let r = random_tape.random_scalar(b"r");
let C1 = v1.commit(s1, gens_n).compress();
C1.append_to_transcript(b"C1", transcript);
let C2 = v2.commit(s2, gens_n).compress();
C2.append_to_transcript(b"C2", transcript);
let alpha = (r * gens_n.h).compress();
alpha.append_to_transcript(b"alpha", transcript);
let c = transcript.challenge_scalar(b"c");
let z = c * (s1 - s2) + r;
(EqualityProof { alpha, z }, C1, C2)
}
pub fn verify(
&self,
gens_n: &MultiCommitGens,
transcript: &mut Transcript,
C1: &CompressedGroup,
C2: &CompressedGroup,
) -> Result<(), ProofVerifyError> {
transcript.append_protocol_name(EqualityProof::protocol_name());
C1.append_to_transcript(b"C1", transcript);
C2.append_to_transcript(b"C2", transcript);
self.alpha.append_to_transcript(b"alpha", transcript);
let c = transcript.challenge_scalar(b"c");
let rhs = {
let C = C1.unpack()? - C2.unpack()?;
(c * C + self.alpha.unpack()?).compress()
};
let lhs = (self.z * gens_n.h).compress();
if lhs == rhs {
Ok(())
} else {
Err(ProofVerifyError::InternalError)
}
}
}
#[derive(Serialize, Deserialize, Debug)]
pub struct ProductProof {
alpha: CompressedGroup,
beta: CompressedGroup,
delta: CompressedGroup,
z: [Scalar; 5],
}
impl ProductProof {
fn protocol_name() -> &'static [u8] {
b"product proof"
}
pub fn prove(
gens_n: &MultiCommitGens,
transcript: &mut Transcript,
random_tape: &mut RandomTape,
x: &Scalar,
rX: &Scalar,
y: &Scalar,
rY: &Scalar,
z: &Scalar,
rZ: &Scalar,
) -> (
ProductProof,
CompressedGroup,
CompressedGroup,
CompressedGroup,
) {
transcript.append_protocol_name(ProductProof::protocol_name());
// produce five random Scalar
let b1 = random_tape.random_scalar(b"b1");
let b2 = random_tape.random_scalar(b"b2");
let b3 = random_tape.random_scalar(b"b3");
let b4 = random_tape.random_scalar(b"b4");
let b5 = random_tape.random_scalar(b"b5");
let X = x.commit(rX, gens_n).compress();
X.append_to_transcript(b"X", transcript);
let Y = y.commit(rY, gens_n).compress();
Y.append_to_transcript(b"Y", transcript);
let Z = z.commit(rZ, gens_n).compress();
Z.append_to_transcript(b"Z", transcript);
let alpha = b1.commit(&b2, gens_n).compress();
alpha.append_to_transcript(b"alpha", transcript);
let beta = b3.commit(&b4, gens_n).compress();
beta.append_to_transcript(b"beta", transcript);
let delta = {
let gens_X = &MultiCommitGens {
n: 1,
G: vec![X.decompress().unwrap()],
h: gens_n.h,
};
b3.commit(&b5, gens_X).compress()
};
delta.append_to_transcript(b"delta", transcript);
let c = transcript.challenge_scalar(b"c");
let z1 = b1 + c * x;
let z2 = b2 + c * rX;
let z3 = b3 + c * y;
let z4 = b4 + c * rY;
let z5 = b5 + c * (rZ - rX * y);
let z = [z1, z2, z3, z4, z5];
(
ProductProof {
alpha,
beta,
delta,
z,
},
X,
Y,
Z,
)
}
fn check_equality(
P: &CompressedGroup,
X: &CompressedGroup,
c: &Scalar,
gens_n: &MultiCommitGens,
z1: &Scalar,
z2: &Scalar,
) -> bool {
let lhs = (P.decompress().unwrap() + c * X.decompress().unwrap()).compress();
let rhs = z1.commit(z2, gens_n).compress();
lhs == rhs
}
pub fn verify(
&self,
gens_n: &MultiCommitGens,
transcript: &mut Transcript,
X: &CompressedGroup,
Y: &CompressedGroup,
Z: &CompressedGroup,
) -> Result<(), ProofVerifyError> {
transcript.append_protocol_name(ProductProof::protocol_name());
X.append_to_transcript(b"X", transcript);
Y.append_to_transcript(b"Y", transcript);
Z.append_to_transcript(b"Z", transcript);
self.alpha.append_to_transcript(b"alpha", transcript);
self.beta.append_to_transcript(b"beta", transcript);
self.delta.append_to_transcript(b"delta", transcript);
let z1 = self.z[0];
let z2 = self.z[1];
let z3 = self.z[2];
let z4 = self.z[3];
let z5 = self.z[4];
let c = transcript.challenge_scalar(b"c");
if ProductProof::check_equality(&self.alpha, X, &c, gens_n, &z1, &z2)
&& ProductProof::check_equality(&self.beta, Y, &c, gens_n, &z3, &z4)
&& ProductProof::check_equality(
&self.delta,
Z,
&c,
&MultiCommitGens {
n: 1,
G: vec![X.unpack()?],
h: gens_n.h,
},
&z3,
&z5,
)
{
Ok(())
} else {
Err(ProofVerifyError::InternalError)
}
}
}
#[derive(Debug, Serialize, Deserialize)]
pub struct DotProductProof {
delta: CompressedGroup,
beta: CompressedGroup,
z: Vec<Scalar>,
z_delta: Scalar,
z_beta: Scalar,
}
impl DotProductProof {
fn protocol_name() -> &'static [u8] {
b"dot product proof"
}
pub fn compute_dotproduct(a: &[Scalar], b: &[Scalar]) -> Scalar {
assert_eq!(a.len(), b.len());
(0..a.len()).map(|i| a[i] * b[i]).sum()
}
pub fn prove(
gens_1: &MultiCommitGens,
gens_n: &MultiCommitGens,
transcript: &mut Transcript,
random_tape: &mut RandomTape,
x_vec: &[Scalar],
blind_x: &Scalar,
a_vec: &[Scalar],
y: &Scalar,
blind_y: &Scalar,
) -> (DotProductProof, CompressedGroup, CompressedGroup) {
transcript.append_protocol_name(DotProductProof::protocol_name());
let n = x_vec.len();
assert_eq!(x_vec.len(), a_vec.len());
assert_eq!(gens_n.n, a_vec.len());
assert_eq!(gens_1.n, 1);
// produce randomness for the proofs
let d_vec = random_tape.random_vector(b"d_vec", n);
let r_delta = random_tape.random_scalar(b"r_delta");
let r_beta = random_tape.random_scalar(b"r_beta");
let Cx = x_vec.commit(blind_x, gens_n).compress();
Cx.append_to_transcript(b"Cx", transcript);
let Cy = y.commit(blind_y, gens_1).compress();
Cy.append_to_transcript(b"Cy", transcript);
a_vec.append_to_transcript(b"a", transcript);
let delta = d_vec.commit(&r_delta, gens_n).compress();
delta.append_to_transcript(b"delta", transcript);
let dotproduct_a_d = DotProductProof::compute_dotproduct(a_vec, &d_vec);
let beta = dotproduct_a_d.commit(&r_beta, gens_1).compress();
beta.append_to_transcript(b"beta", transcript);
let c = transcript.challenge_scalar(b"c");
let z = (0..d_vec.len())
.map(|i| c * x_vec[i] + d_vec[i])
.collect::<Vec<Scalar>>();
let z_delta = c * blind_x + r_delta;
let z_beta = c * blind_y + r_beta;
(
DotProductProof {
delta,
beta,
z,
z_delta,
z_beta,
},
Cx,
Cy,
)
}
pub fn verify(
&self,
gens_1: &MultiCommitGens,
gens_n: &MultiCommitGens,
transcript: &mut Transcript,
a: &[Scalar],
Cx: &CompressedGroup,
Cy: &CompressedGroup,
) -> Result<(), ProofVerifyError> {
assert_eq!(gens_n.n, a.len());
assert_eq!(gens_1.n, 1);
transcript.append_protocol_name(DotProductProof::protocol_name());
Cx.append_to_transcript(b"Cx", transcript);
Cy.append_to_transcript(b"Cy", transcript);
a.append_to_transcript(b"a", transcript);
self.delta.append_to_transcript(b"delta", transcript);
self.beta.append_to_transcript(b"beta", transcript);
let c = transcript.challenge_scalar(b"c");
let mut result =
c * Cx.unpack()? + self.delta.unpack()? == self.z.commit(&self.z_delta, gens_n);
let dotproduct_z_a = DotProductProof::compute_dotproduct(&self.z, a);
result &= c * Cy.unpack()? + self.beta.unpack()? == dotproduct_z_a.commit(&self.z_beta, gens_1);
if result {
Ok(())
} else {
Err(ProofVerifyError::InternalError)
}
}
}
pub struct DotProductProofGens {
n: usize,
pub gens_n: MultiCommitGens,
pub gens_1: MultiCommitGens,
}
impl DotProductProofGens {
pub fn new(n: usize, label: &[u8]) -> Self {
let (gens_n, gens_1) = MultiCommitGens::new(n + 1, label).split_at(n);
DotProductProofGens { n, gens_n, gens_1 }
}
}
#[derive(Debug, Serialize, Deserialize)]
pub struct DotProductProofLog {
bullet_reduction_proof: BulletReductionProof,
delta: CompressedGroup,
beta: CompressedGroup,
z1: Scalar,
z2: Scalar,
}
impl DotProductProofLog {
fn protocol_name() -> &'static [u8] {
b"dot product proof (log)"
}
pub fn compute_dotproduct(a: &[Scalar], b: &[Scalar]) -> Scalar {
assert_eq!(a.len(), b.len());
(0..a.len()).map(|i| a[i] * b[i]).sum()
}
pub fn prove(
gens: &DotProductProofGens,
transcript: &mut Transcript,
random_tape: &mut RandomTape,
x_vec: &[Scalar],
blind_x: &Scalar,
a_vec: &[Scalar],
y: &Scalar,
blind_y: &Scalar,
) -> (DotProductProofLog, CompressedGroup, CompressedGroup) {
transcript.append_protocol_name(DotProductProofLog::protocol_name());
let n = x_vec.len();
assert_eq!(x_vec.len(), a_vec.len());
assert_eq!(gens.n, n);
// produce randomness for generating a proof
let d = random_tape.random_scalar(b"d");
let r_delta = random_tape.random_scalar(b"r_delta");
let r_beta = random_tape.random_scalar(b"r_delta");
let blinds_vec = {
let v1 = random_tape.random_vector(b"blinds_vec_1", 2 * n.log_2());
let v2 = random_tape.random_vector(b"blinds_vec_2", 2 * n.log_2());
(0..v1.len())
.map(|i| (v1[i], v2[i]))
.collect::<Vec<(Scalar, Scalar)>>()
};
let Cx = x_vec.commit(blind_x, &gens.gens_n).compress();
Cx.append_to_transcript(b"Cx", transcript);
let Cy = y.commit(blind_y, &gens.gens_1).compress();
Cy.append_to_transcript(b"Cy", transcript);
a_vec.append_to_transcript(b"a", transcript);
// sample a random base and scale the generator used for
// the output of the inner product
let r = transcript.challenge_scalar(b"r");
let gens_1_scaled = gens.gens_1.scale(&r);
let blind_Gamma = blind_x + r * blind_y;
let (bullet_reduction_proof, _Gamma_hat, x_hat, a_hat, g_hat, rhat_Gamma) =
BulletReductionProof::prove(
transcript,
&gens_1_scaled.G[0],
&gens.gens_n.G,
&gens.gens_n.h,
x_vec,
a_vec,
&blind_Gamma,
&blinds_vec,
);
let y_hat = x_hat * a_hat;
let delta = {
let gens_hat = MultiCommitGens {
n: 1,
G: vec![g_hat],
h: gens.gens_1.h,
};
d.commit(&r_delta, &gens_hat).compress()
};
delta.append_to_transcript(b"delta", transcript);
let beta = d.commit(&r_beta, &gens_1_scaled).compress();
beta.append_to_transcript(b"beta", transcript);
let c = transcript.challenge_scalar(b"c");
let z1 = d + c * y_hat;
let z2 = a_hat * (c * rhat_Gamma + r_beta) + r_delta;
(
DotProductProofLog {
bullet_reduction_proof,
delta,
beta,
z1,
z2,
},
Cx,
Cy,
)
}
pub fn verify(
&self,
n: usize,
gens: &DotProductProofGens,
transcript: &mut Transcript,
a: &[Scalar],
Cx: &CompressedGroup,
Cy: &CompressedGroup,
) -> Result<(), ProofVerifyError> {
assert_eq!(gens.n, n);
assert_eq!(a.len(), n);
transcript.append_protocol_name(DotProductProofLog::protocol_name());
Cx.append_to_transcript(b"Cx", transcript);
Cy.append_to_transcript(b"Cy", transcript);
a.append_to_transcript(b"a", transcript);
// sample a random base and scale the generator used for
// the output of the inner product
let r = transcript.challenge_scalar(b"r");
let gens_1_scaled = gens.gens_1.scale(&r);
let Gamma = Cx.unpack()? + r * Cy.unpack()?;
let (g_hat, Gamma_hat, a_hat) =
self
.bullet_reduction_proof
.verify(n, a, transcript, &Gamma, &gens.gens_n.G)?;
self.delta.append_to_transcript(b"delta", transcript);
self.beta.append_to_transcript(b"beta", transcript);
let c = transcript.challenge_scalar(b"c");
let c_s = &c;
let beta_s = self.beta.unpack()?;
let a_hat_s = &a_hat;
let delta_s = self.delta.unpack()?;
let z1_s = &self.z1;
let z2_s = &self.z2;
let lhs = ((Gamma_hat * c_s + beta_s) * a_hat_s + delta_s).compress();
let rhs = ((g_hat + gens_1_scaled.G[0] * a_hat_s) * z1_s + gens_1_scaled.h * z2_s).compress();
assert_eq!(lhs, rhs);
if lhs == rhs {
Ok(())
} else {
Err(ProofVerifyError::InternalError)
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use rand_core::OsRng;
#[test]
fn check_knowledgeproof() {
let mut csprng: OsRng = OsRng;
let gens_1 = MultiCommitGens::new(1, b"test-knowledgeproof");
let x = Scalar::random(&mut csprng);
let r = Scalar::random(&mut csprng);
let mut random_tape = RandomTape::new(b"proof");
let mut prover_transcript = Transcript::new(b"example");
let (proof, committed_value) =
KnowledgeProof::prove(&gens_1, &mut prover_transcript, &mut random_tape, &x, &r);
let mut verifier_transcript = Transcript::new(b"example");
assert!(proof
.verify(&gens_1, &mut verifier_transcript, &committed_value)
.is_ok());
}
#[test]
fn check_equalityproof() {
let mut csprng: OsRng = OsRng;
let gens_1 = MultiCommitGens::new(1, b"test-equalityproof");
let v1 = Scalar::random(&mut csprng);
let v2 = v1;
let s1 = Scalar::random(&mut csprng);
let s2 = Scalar::random(&mut csprng);
let mut random_tape = RandomTape::new(b"proof");
let mut prover_transcript = Transcript::new(b"example");
let (proof, C1, C2) = EqualityProof::prove(
&gens_1,
&mut prover_transcript,
&mut random_tape,
&v1,
&s1,
&v2,
&s2,
);
let mut verifier_transcript = Transcript::new(b"example");
assert!(proof
.verify(&gens_1, &mut verifier_transcript, &C1, &C2)
.is_ok());
}
#[test]
fn check_productproof() {
let mut csprng: OsRng = OsRng;
let gens_1 = MultiCommitGens::new(1, b"test-productproof");
let x = Scalar::random(&mut csprng);
let rX = Scalar::random(&mut csprng);
let y = Scalar::random(&mut csprng);
let rY = Scalar::random(&mut csprng);
let z = x * y;
let rZ = Scalar::random(&mut csprng);
let mut random_tape = RandomTape::new(b"proof");
let mut prover_transcript = Transcript::new(b"example");
let (proof, X, Y, Z) = ProductProof::prove(
&gens_1,
&mut prover_transcript,
&mut random_tape,
&x,
&rX,
&y,
&rY,
&z,
&rZ,
);
let mut verifier_transcript = Transcript::new(b"example");
assert!(proof
.verify(&gens_1, &mut verifier_transcript, &X, &Y, &Z)
.is_ok());
}
#[test]
fn check_dotproductproof() {
let mut csprng: OsRng = OsRng;
let n = 1024;
let gens_1 = MultiCommitGens::new(1, b"test-two");
let gens_1024 = MultiCommitGens::new(n, b"test-1024");
let mut x: Vec<Scalar> = Vec::new();
let mut a: Vec<Scalar> = Vec::new();
for _ in 0..n {
x.push(Scalar::random(&mut csprng));
a.push(Scalar::random(&mut csprng));
}
let y = DotProductProofLog::compute_dotproduct(&x, &a);
let r_x = Scalar::random(&mut csprng);
let r_y = Scalar::random(&mut csprng);
let mut random_tape = RandomTape::new(b"proof");
let mut prover_transcript = Transcript::new(b"example");
let (proof, Cx, Cy) = DotProductProof::prove(
&gens_1,
&gens_1024,
&mut prover_transcript,
&mut random_tape,
&x,
&r_x,
&a,
&y,
&r_y,
);
let mut verifier_transcript = Transcript::new(b"example");
assert!(proof
.verify(&gens_1, &gens_1024, &mut verifier_transcript, &a, &Cx, &Cy)
.is_ok());
}
#[test]
fn check_dotproductproof_log() {
let mut csprng: OsRng = OsRng;
let n = 1024;
let gens = DotProductProofGens::new(n, b"test-1024");
let x: Vec<Scalar> = (0..n).map(|_i| Scalar::random(&mut csprng)).collect();
let a: Vec<Scalar> = (0..n).map(|_i| Scalar::random(&mut csprng)).collect();
let y = DotProductProof::compute_dotproduct(&x, &a);
let r_x = Scalar::random(&mut csprng);
let r_y = Scalar::random(&mut csprng);
let mut random_tape = RandomTape::new(b"proof");
let mut prover_transcript = Transcript::new(b"example");
let (proof, Cx, Cy) = DotProductProofLog::prove(
&gens,
&mut prover_transcript,
&mut random_tape,
&x,
&r_x,
&a,
&y,
&r_y,
);
let mut verifier_transcript = Transcript::new(b"example");
assert!(proof
.verify(n, &gens, &mut verifier_transcript, &a, &Cx, &Cy)
.is_ok());
}
}

View File

@@ -0,0 +1,486 @@
#![allow(dead_code)]
use super::dense_mlpoly::DensePolynomial;
use super::dense_mlpoly::EqPolynomial;
use super::math::Math;
use super::scalar::Scalar;
use super::sumcheck::SumcheckInstanceProof;
use super::transcript::ProofTranscript;
use merlin::Transcript;
use serde::{Deserialize, Serialize};
#[derive(Debug)]
pub struct ProductCircuit {
left_vec: Vec<DensePolynomial>,
right_vec: Vec<DensePolynomial>,
}
impl ProductCircuit {
fn compute_layer(
inp_left: &DensePolynomial,
inp_right: &DensePolynomial,
) -> (DensePolynomial, DensePolynomial) {
let len = inp_left.len() + inp_right.len();
let outp_left = (0..len / 4)
.map(|i| inp_left[i] * inp_right[i])
.collect::<Vec<Scalar>>();
let outp_right = (len / 4..len / 2)
.map(|i| inp_left[i] * inp_right[i])
.collect::<Vec<Scalar>>();
(
DensePolynomial::new(outp_left),
DensePolynomial::new(outp_right),
)
}
pub fn new(poly: &DensePolynomial) -> Self {
let mut left_vec: Vec<DensePolynomial> = Vec::new();
let mut right_vec: Vec<DensePolynomial> = Vec::new();
let num_layers = poly.len().log_2();
let (outp_left, outp_right) = poly.split(poly.len() / 2);
left_vec.push(outp_left);
right_vec.push(outp_right);
for i in 0..num_layers - 1 {
let (outp_left, outp_right) = ProductCircuit::compute_layer(&left_vec[i], &right_vec[i]);
left_vec.push(outp_left);
right_vec.push(outp_right);
}
ProductCircuit {
left_vec,
right_vec,
}
}
pub fn evaluate(&self) -> Scalar {
let len = self.left_vec.len();
assert_eq!(self.left_vec[len - 1].get_num_vars(), 0);
assert_eq!(self.right_vec[len - 1].get_num_vars(), 0);
self.left_vec[len - 1][0] * self.right_vec[len - 1][0]
}
}
pub struct DotProductCircuit {
left: DensePolynomial,
right: DensePolynomial,
weight: DensePolynomial,
}
impl DotProductCircuit {
pub fn new(left: DensePolynomial, right: DensePolynomial, weight: DensePolynomial) -> Self {
assert_eq!(left.len(), right.len());
assert_eq!(left.len(), weight.len());
DotProductCircuit {
left,
right,
weight,
}
}
pub fn evaluate(&self) -> Scalar {
(0..self.left.len())
.map(|i| self.left[i] * self.right[i] * self.weight[i])
.sum()
}
pub fn split(&mut self) -> (DotProductCircuit, DotProductCircuit) {
let idx = self.left.len() / 2;
assert_eq!(idx * 2, self.left.len());
let (l1, l2) = self.left.split(idx);
let (r1, r2) = self.right.split(idx);
let (w1, w2) = self.weight.split(idx);
(
DotProductCircuit {
left: l1,
right: r1,
weight: w1,
},
DotProductCircuit {
left: l2,
right: r2,
weight: w2,
},
)
}
}
#[allow(dead_code)]
#[derive(Debug, Serialize, Deserialize)]
pub struct LayerProof {
pub proof: SumcheckInstanceProof,
pub claims: Vec<Scalar>,
}
#[allow(dead_code)]
impl LayerProof {
pub fn verify(
&self,
claim: Scalar,
num_rounds: usize,
degree_bound: usize,
transcript: &mut Transcript,
) -> (Scalar, Vec<Scalar>) {
self
.proof
.verify(claim, num_rounds, degree_bound, transcript)
.unwrap()
}
}
#[allow(dead_code)]
#[derive(Debug, Serialize, Deserialize)]
pub struct LayerProofBatched {
pub proof: SumcheckInstanceProof,
pub claims_prod_left: Vec<Scalar>,
pub claims_prod_right: Vec<Scalar>,
}
#[allow(dead_code)]
impl LayerProofBatched {
pub fn verify(
&self,
claim: Scalar,
num_rounds: usize,
degree_bound: usize,
transcript: &mut Transcript,
) -> (Scalar, Vec<Scalar>) {
self
.proof
.verify(claim, num_rounds, degree_bound, transcript)
.unwrap()
}
}
#[derive(Debug, Serialize, Deserialize)]
pub struct ProductCircuitEvalProof {
proof: Vec<LayerProof>,
}
#[derive(Debug, Serialize, Deserialize)]
pub struct ProductCircuitEvalProofBatched {
proof: Vec<LayerProofBatched>,
claims_dotp: (Vec<Scalar>, Vec<Scalar>, Vec<Scalar>),
}
impl ProductCircuitEvalProof {
#![allow(dead_code)]
pub fn prove(
circuit: &mut ProductCircuit,
transcript: &mut Transcript,
) -> (Self, Scalar, Vec<Scalar>) {
let mut proof: Vec<LayerProof> = Vec::new();
let num_layers = circuit.left_vec.len();
let mut claim = circuit.evaluate();
let mut rand = Vec::new();
for layer_id in (0..num_layers).rev() {
let len = circuit.left_vec[layer_id].len() + circuit.right_vec[layer_id].len();
let mut poly_C = DensePolynomial::new(EqPolynomial::new(rand.clone()).evals());
assert_eq!(poly_C.len(), len / 2);
let num_rounds_prod = poly_C.len().log_2();
let comb_func_prod = |poly_A_comp: &Scalar,
poly_B_comp: &Scalar,
poly_C_comp: &Scalar|
-> Scalar { poly_A_comp * poly_B_comp * poly_C_comp };
let (proof_prod, rand_prod, claims_prod) = SumcheckInstanceProof::prove_cubic(
&claim,
num_rounds_prod,
&mut circuit.left_vec[layer_id],
&mut circuit.right_vec[layer_id],
&mut poly_C,
comb_func_prod,
transcript,
);
transcript.append_scalar(b"claim_prod_left", &claims_prod[0]);
transcript.append_scalar(b"claim_prod_right", &claims_prod[1]);
// produce a random challenge
let r_layer = transcript.challenge_scalar(b"challenge_r_layer");
claim = claims_prod[0] + r_layer * (claims_prod[1] - claims_prod[0]);
let mut ext = vec![r_layer];
ext.extend(rand_prod);
rand = ext;
proof.push(LayerProof {
proof: proof_prod,
claims: claims_prod[0..claims_prod.len() - 1].to_vec(),
});
}
(ProductCircuitEvalProof { proof }, claim, rand)
}
pub fn verify(
&self,
eval: Scalar,
len: usize,
transcript: &mut Transcript,
) -> (Scalar, Vec<Scalar>) {
let num_layers = len.log_2();
let mut claim = eval;
let mut rand: Vec<Scalar> = Vec::new();
//let mut num_rounds = 0;
assert_eq!(self.proof.len(), num_layers);
for (num_rounds, i) in (0..num_layers).enumerate() {
let (claim_last, rand_prod) = self.proof[i].verify(claim, num_rounds, 3, transcript);
let claims_prod = &self.proof[i].claims;
transcript.append_scalar(b"claim_prod_left", &claims_prod[0]);
transcript.append_scalar(b"claim_prod_right", &claims_prod[1]);
assert_eq!(rand.len(), rand_prod.len());
let eq: Scalar = (0..rand.len())
.map(|i| {
rand[i] * rand_prod[i] + (Scalar::one() - rand[i]) * (Scalar::one() - rand_prod[i])
})
.product();
assert_eq!(claims_prod[0] * claims_prod[1] * eq, claim_last);
// produce a random challenge
let r_layer = transcript.challenge_scalar(b"challenge_r_layer");
claim = (Scalar::one() - r_layer) * claims_prod[0] + r_layer * claims_prod[1];
let mut ext = vec![r_layer];
ext.extend(rand_prod);
rand = ext;
}
(claim, rand)
}
}
impl ProductCircuitEvalProofBatched {
pub fn prove(
prod_circuit_vec: &mut Vec<&mut ProductCircuit>,
dotp_circuit_vec: &mut Vec<&mut DotProductCircuit>,
transcript: &mut Transcript,
) -> (Self, Vec<Scalar>) {
assert!(!prod_circuit_vec.is_empty());
let mut claims_dotp_final = (Vec::new(), Vec::new(), Vec::new());
let mut proof_layers: Vec<LayerProofBatched> = Vec::new();
let num_layers = prod_circuit_vec[0].left_vec.len();
let mut claims_to_verify = (0..prod_circuit_vec.len())
.map(|i| prod_circuit_vec[i].evaluate())
.collect::<Vec<Scalar>>();
let mut rand = Vec::new();
for layer_id in (0..num_layers).rev() {
// prepare paralell instance that share poly_C first
let len = prod_circuit_vec[0].left_vec[layer_id].len()
+ prod_circuit_vec[0].right_vec[layer_id].len();
let mut poly_C_par = DensePolynomial::new(EqPolynomial::new(rand.clone()).evals());
assert_eq!(poly_C_par.len(), len / 2);
let num_rounds_prod = poly_C_par.len().log_2();
let comb_func_prod = |poly_A_comp: &Scalar,
poly_B_comp: &Scalar,
poly_C_comp: &Scalar|
-> Scalar { poly_A_comp * poly_B_comp * poly_C_comp };
let mut poly_A_batched_par: Vec<&mut DensePolynomial> = Vec::new();
let mut poly_B_batched_par: Vec<&mut DensePolynomial> = Vec::new();
for prod_circuit in prod_circuit_vec.iter_mut() {
poly_A_batched_par.push(&mut prod_circuit.left_vec[layer_id]);
poly_B_batched_par.push(&mut prod_circuit.right_vec[layer_id])
}
let poly_vec_par = (
&mut poly_A_batched_par,
&mut poly_B_batched_par,
&mut poly_C_par,
);
// prepare sequential instances that don't share poly_C
let mut poly_A_batched_seq: Vec<&mut DensePolynomial> = Vec::new();
let mut poly_B_batched_seq: Vec<&mut DensePolynomial> = Vec::new();
let mut poly_C_batched_seq: Vec<&mut DensePolynomial> = Vec::new();
if layer_id == 0 && !dotp_circuit_vec.is_empty() {
// add additional claims
for item in dotp_circuit_vec.iter() {
claims_to_verify.push(item.evaluate());
assert_eq!(len / 2, item.left.len());
assert_eq!(len / 2, item.right.len());
assert_eq!(len / 2, item.weight.len());
}
for dotp_circuit in dotp_circuit_vec.iter_mut() {
poly_A_batched_seq.push(&mut dotp_circuit.left);
poly_B_batched_seq.push(&mut dotp_circuit.right);
poly_C_batched_seq.push(&mut dotp_circuit.weight);
}
}
let poly_vec_seq = (
&mut poly_A_batched_seq,
&mut poly_B_batched_seq,
&mut poly_C_batched_seq,
);
// produce a fresh set of coeffs and a joint claim
let coeff_vec =
transcript.challenge_vector(b"rand_coeffs_next_layer", claims_to_verify.len());
let claim = (0..claims_to_verify.len())
.map(|i| claims_to_verify[i] * coeff_vec[i])
.sum();
let (proof, rand_prod, claims_prod, claims_dotp) = SumcheckInstanceProof::prove_cubic_batched(
&claim,
num_rounds_prod,
poly_vec_par,
poly_vec_seq,
&coeff_vec,
comb_func_prod,
transcript,
);
let (claims_prod_left, claims_prod_right, _claims_eq) = claims_prod;
for i in 0..prod_circuit_vec.len() {
transcript.append_scalar(b"claim_prod_left", &claims_prod_left[i]);
transcript.append_scalar(b"claim_prod_right", &claims_prod_right[i]);
}
if layer_id == 0 && !dotp_circuit_vec.is_empty() {
let (claims_dotp_left, claims_dotp_right, claims_dotp_weight) = claims_dotp;
for i in 0..dotp_circuit_vec.len() {
transcript.append_scalar(b"claim_dotp_left", &claims_dotp_left[i]);
transcript.append_scalar(b"claim_dotp_right", &claims_dotp_right[i]);
transcript.append_scalar(b"claim_dotp_weight", &claims_dotp_weight[i]);
}
claims_dotp_final = (claims_dotp_left, claims_dotp_right, claims_dotp_weight);
}
// produce a random challenge to condense two claims into a single claim
let r_layer = transcript.challenge_scalar(b"challenge_r_layer");
claims_to_verify = (0..prod_circuit_vec.len())
.map(|i| claims_prod_left[i] + r_layer * (claims_prod_right[i] - claims_prod_left[i]))
.collect::<Vec<Scalar>>();
let mut ext = vec![r_layer];
ext.extend(rand_prod);
rand = ext;
proof_layers.push(LayerProofBatched {
proof,
claims_prod_left,
claims_prod_right,
});
}
(
ProductCircuitEvalProofBatched {
proof: proof_layers,
claims_dotp: claims_dotp_final,
},
rand,
)
}
pub fn verify(
&self,
claims_prod_vec: &[Scalar],
claims_dotp_vec: &[Scalar],
len: usize,
transcript: &mut Transcript,
) -> (Vec<Scalar>, Vec<Scalar>, Vec<Scalar>) {
let num_layers = len.log_2();
let mut rand: Vec<Scalar> = Vec::new();
//let mut num_rounds = 0;
assert_eq!(self.proof.len(), num_layers);
let mut claims_to_verify = claims_prod_vec.to_owned();
let mut claims_to_verify_dotp: Vec<Scalar> = Vec::new();
for (num_rounds, i) in (0..num_layers).enumerate() {
if i == num_layers - 1 {
claims_to_verify.extend(claims_dotp_vec);
}
// produce random coefficients, one for each instance
let coeff_vec =
transcript.challenge_vector(b"rand_coeffs_next_layer", claims_to_verify.len());
// produce a joint claim
let claim = (0..claims_to_verify.len())
.map(|i| claims_to_verify[i] * coeff_vec[i])
.sum();
let (claim_last, rand_prod) = self.proof[i].verify(claim, num_rounds, 3, transcript);
let claims_prod_left = &self.proof[i].claims_prod_left;
let claims_prod_right = &self.proof[i].claims_prod_right;
assert_eq!(claims_prod_left.len(), claims_prod_vec.len());
assert_eq!(claims_prod_right.len(), claims_prod_vec.len());
for i in 0..claims_prod_vec.len() {
transcript.append_scalar(b"claim_prod_left", &claims_prod_left[i]);
transcript.append_scalar(b"claim_prod_right", &claims_prod_right[i]);
}
assert_eq!(rand.len(), rand_prod.len());
let eq: Scalar = (0..rand.len())
.map(|i| {
rand[i] * rand_prod[i] + (Scalar::one() - rand[i]) * (Scalar::one() - rand_prod[i])
})
.product();
let mut claim_expected: Scalar = (0..claims_prod_vec.len())
.map(|i| coeff_vec[i] * (claims_prod_left[i] * claims_prod_right[i] * eq))
.sum();
// add claims from the dotp instances
if i == num_layers - 1 {
let num_prod_instances = claims_prod_vec.len();
let (claims_dotp_left, claims_dotp_right, claims_dotp_weight) = &self.claims_dotp;
for i in 0..claims_dotp_left.len() {
transcript.append_scalar(b"claim_dotp_left", &claims_dotp_left[i]);
transcript.append_scalar(b"claim_dotp_right", &claims_dotp_right[i]);
transcript.append_scalar(b"claim_dotp_weight", &claims_dotp_weight[i]);
claim_expected += coeff_vec[i + num_prod_instances]
* claims_dotp_left[i]
* claims_dotp_right[i]
* claims_dotp_weight[i];
}
}
assert_eq!(claim_expected, claim_last);
// produce a random challenge
let r_layer = transcript.challenge_scalar(b"challenge_r_layer");
claims_to_verify = (0..claims_prod_left.len())
.map(|i| claims_prod_left[i] + r_layer * (claims_prod_right[i] - claims_prod_left[i]))
.collect::<Vec<Scalar>>();
// add claims to verify for dotp circuit
if i == num_layers - 1 {
let (claims_dotp_left, claims_dotp_right, claims_dotp_weight) = &self.claims_dotp;
for i in 0..claims_dotp_vec.len() / 2 {
// combine left claims
let claim_left = claims_dotp_left[2 * i]
+ r_layer * (claims_dotp_left[2 * i + 1] - claims_dotp_left[2 * i]);
let claim_right = claims_dotp_right[2 * i]
+ r_layer * (claims_dotp_right[2 * i + 1] - claims_dotp_right[2 * i]);
let claim_weight = claims_dotp_weight[2 * i]
+ r_layer * (claims_dotp_weight[2 * i + 1] - claims_dotp_weight[2 * i]);
claims_to_verify_dotp.push(claim_left);
claims_to_verify_dotp.push(claim_right);
claims_to_verify_dotp.push(claim_weight);
}
}
let mut ext = vec![r_layer];
ext.extend(rand_prod);
rand = ext;
}
(claims_to_verify, claims_to_verify_dotp, rand)
}
}

View File

@@ -0,0 +1,367 @@
use crate::transcript::AppendToTranscript;
use super::dense_mlpoly::DensePolynomial;
use super::errors::ProofVerifyError;
use super::math::Math;
use super::random::RandomTape;
use super::scalar::Scalar;
use super::sparse_mlpoly::{
MultiSparseMatPolynomialAsDense, SparseMatEntry, SparseMatPolyCommitment,
SparseMatPolyCommitmentGens, SparseMatPolyEvalProof, SparseMatPolynomial,
};
use super::timer::Timer;
use flate2::{write::ZlibEncoder, Compression};
use merlin::Transcript;
use rand_core::OsRng;
use serde::{Deserialize, Serialize};
#[derive(Debug, Serialize, Deserialize)]
pub struct R1CSInstance {
num_cons: usize,
num_vars: usize,
num_inputs: usize,
A: SparseMatPolynomial,
B: SparseMatPolynomial,
C: SparseMatPolynomial,
}
pub struct R1CSCommitmentGens {
gens: SparseMatPolyCommitmentGens,
}
impl R1CSCommitmentGens {
pub fn new(
label: &'static [u8],
num_cons: usize,
num_vars: usize,
num_inputs: usize,
num_nz_entries: usize,
) -> R1CSCommitmentGens {
assert!(num_inputs < num_vars);
let num_poly_vars_x = num_cons.log_2();
let num_poly_vars_y = (2 * num_vars).log_2();
let gens =
SparseMatPolyCommitmentGens::new(label, num_poly_vars_x, num_poly_vars_y, num_nz_entries, 3);
R1CSCommitmentGens { gens }
}
}
#[derive(Debug, Serialize, Deserialize)]
pub struct R1CSCommitment {
num_cons: usize,
num_vars: usize,
num_inputs: usize,
comm: SparseMatPolyCommitment,
}
impl AppendToTranscript for R1CSCommitment {
fn append_to_transcript(&self, _label: &'static [u8], transcript: &mut Transcript) {
transcript.append_u64(b"num_cons", self.num_cons as u64);
transcript.append_u64(b"num_vars", self.num_vars as u64);
transcript.append_u64(b"num_inputs", self.num_inputs as u64);
self.comm.append_to_transcript(b"comm", transcript);
}
}
pub struct R1CSDecommitment {
dense: MultiSparseMatPolynomialAsDense,
}
impl R1CSCommitment {
pub fn get_num_cons(&self) -> usize {
self.num_cons
}
pub fn get_num_vars(&self) -> usize {
self.num_vars
}
pub fn get_num_inputs(&self) -> usize {
self.num_inputs
}
}
impl R1CSInstance {
pub fn new(
num_cons: usize,
num_vars: usize,
num_inputs: usize,
A: &[(usize, usize, Scalar)],
B: &[(usize, usize, Scalar)],
C: &[(usize, usize, Scalar)],
) -> R1CSInstance {
Timer::print(&format!("number_of_constraints {}", num_cons));
Timer::print(&format!("number_of_variables {}", num_vars));
Timer::print(&format!("number_of_inputs {}", num_inputs));
Timer::print(&format!("number_non-zero_entries_A {}", A.len()));
Timer::print(&format!("number_non-zero_entries_B {}", B.len()));
Timer::print(&format!("number_non-zero_entries_C {}", C.len()));
// check that num_cons is a power of 2
assert_eq!(num_cons.next_power_of_two(), num_cons);
// check that num_vars is a power of 2
assert_eq!(num_vars.next_power_of_two(), num_vars);
// check that number_inputs + 1 <= num_vars
assert!(num_inputs < num_vars);
// no errors, so create polynomials
let num_poly_vars_x = num_cons.log_2();
let num_poly_vars_y = (2 * num_vars).log_2();
let mat_A = (0..A.len())
.map(|i| SparseMatEntry::new(A[i].0, A[i].1, A[i].2))
.collect::<Vec<SparseMatEntry>>();
let mat_B = (0..B.len())
.map(|i| SparseMatEntry::new(B[i].0, B[i].1, B[i].2))
.collect::<Vec<SparseMatEntry>>();
let mat_C = (0..C.len())
.map(|i| SparseMatEntry::new(C[i].0, C[i].1, C[i].2))
.collect::<Vec<SparseMatEntry>>();
let poly_A = SparseMatPolynomial::new(num_poly_vars_x, num_poly_vars_y, mat_A);
let poly_B = SparseMatPolynomial::new(num_poly_vars_x, num_poly_vars_y, mat_B);
let poly_C = SparseMatPolynomial::new(num_poly_vars_x, num_poly_vars_y, mat_C);
R1CSInstance {
num_cons,
num_vars,
num_inputs,
A: poly_A,
B: poly_B,
C: poly_C,
}
}
pub fn get_num_vars(&self) -> usize {
self.num_vars
}
pub fn get_num_cons(&self) -> usize {
self.num_cons
}
pub fn get_num_inputs(&self) -> usize {
self.num_inputs
}
pub fn get_digest(&self) -> Vec<u8> {
let mut encoder = ZlibEncoder::new(Vec::new(), Compression::default());
bincode::serialize_into(&mut encoder, &self).unwrap();
encoder.finish().unwrap()
}
pub fn produce_synthetic_r1cs(
num_cons: usize,
num_vars: usize,
num_inputs: usize,
) -> (R1CSInstance, Vec<Scalar>, Vec<Scalar>) {
Timer::print(&format!("number_of_constraints {}", num_cons));
Timer::print(&format!("number_of_variables {}", num_vars));
Timer::print(&format!("number_of_inputs {}", num_inputs));
let mut csprng: OsRng = OsRng;
// assert num_cons and num_vars are power of 2
assert_eq!((num_cons.log_2()).pow2(), num_cons);
assert_eq!((num_vars.log_2()).pow2(), num_vars);
// num_inputs + 1 <= num_vars
assert!(num_inputs < num_vars);
// z is organized as [vars,1,io]
let size_z = num_vars + num_inputs + 1;
// produce a random satisfying assignment
let Z = {
let mut Z: Vec<Scalar> = (0..size_z)
.map(|_i| Scalar::random(&mut csprng))
.collect::<Vec<Scalar>>();
Z[num_vars] = Scalar::one(); // set the constant term to 1
Z
};
// three sparse matrices
let mut A: Vec<SparseMatEntry> = Vec::new();
let mut B: Vec<SparseMatEntry> = Vec::new();
let mut C: Vec<SparseMatEntry> = Vec::new();
let one = Scalar::one();
for i in 0..num_cons {
let A_idx = i % size_z;
let B_idx = (i + 2) % size_z;
A.push(SparseMatEntry::new(i, A_idx, one));
B.push(SparseMatEntry::new(i, B_idx, one));
let AB_val = Z[A_idx] * Z[B_idx];
let C_idx = (i + 3) % size_z;
let C_val = Z[C_idx];
if C_val == Scalar::zero() {
C.push(SparseMatEntry::new(i, num_vars, AB_val));
} else {
C.push(SparseMatEntry::new(
i,
C_idx,
AB_val * C_val.invert().unwrap(),
));
}
}
Timer::print(&format!("number_non-zero_entries_A {}", A.len()));
Timer::print(&format!("number_non-zero_entries_B {}", B.len()));
Timer::print(&format!("number_non-zero_entries_C {}", C.len()));
let num_poly_vars_x = num_cons.log_2();
let num_poly_vars_y = (2 * num_vars).log_2();
let poly_A = SparseMatPolynomial::new(num_poly_vars_x, num_poly_vars_y, A);
let poly_B = SparseMatPolynomial::new(num_poly_vars_x, num_poly_vars_y, B);
let poly_C = SparseMatPolynomial::new(num_poly_vars_x, num_poly_vars_y, C);
let inst = R1CSInstance {
num_cons,
num_vars,
num_inputs,
A: poly_A,
B: poly_B,
C: poly_C,
};
assert!(inst.is_sat(&Z[..num_vars], &Z[num_vars + 1..]));
(inst, Z[..num_vars].to_vec(), Z[num_vars + 1..].to_vec())
}
pub fn is_sat(&self, vars: &[Scalar], input: &[Scalar]) -> bool {
assert_eq!(vars.len(), self.num_vars);
assert_eq!(input.len(), self.num_inputs);
let z = {
let mut z = vars.to_vec();
z.extend(&vec![Scalar::one()]);
z.extend(input);
z
};
// verify if Az * Bz - Cz = [0...]
let Az = self
.A
.multiply_vec(self.num_cons, self.num_vars + self.num_inputs + 1, &z);
let Bz = self
.B
.multiply_vec(self.num_cons, self.num_vars + self.num_inputs + 1, &z);
let Cz = self
.C
.multiply_vec(self.num_cons, self.num_vars + self.num_inputs + 1, &z);
assert_eq!(Az.len(), self.num_cons);
assert_eq!(Bz.len(), self.num_cons);
assert_eq!(Cz.len(), self.num_cons);
let res: usize = (0..self.num_cons)
.map(|i| usize::from(Az[i] * Bz[i] != Cz[i]))
.sum();
res == 0
}
pub fn multiply_vec(
&self,
num_rows: usize,
num_cols: usize,
z: &[Scalar],
) -> (DensePolynomial, DensePolynomial, DensePolynomial) {
assert_eq!(num_rows, self.num_cons);
assert_eq!(z.len(), num_cols);
assert!(num_cols > self.num_vars);
(
DensePolynomial::new(self.A.multiply_vec(num_rows, num_cols, z)),
DensePolynomial::new(self.B.multiply_vec(num_rows, num_cols, z)),
DensePolynomial::new(self.C.multiply_vec(num_rows, num_cols, z)),
)
}
pub fn compute_eval_table_sparse(
&self,
num_rows: usize,
num_cols: usize,
evals: &[Scalar],
) -> (Vec<Scalar>, Vec<Scalar>, Vec<Scalar>) {
assert_eq!(num_rows, self.num_cons);
assert!(num_cols > self.num_vars);
let evals_A = self.A.compute_eval_table_sparse(evals, num_rows, num_cols);
let evals_B = self.B.compute_eval_table_sparse(evals, num_rows, num_cols);
let evals_C = self.C.compute_eval_table_sparse(evals, num_rows, num_cols);
(evals_A, evals_B, evals_C)
}
pub fn evaluate(&self, rx: &[Scalar], ry: &[Scalar]) -> (Scalar, Scalar, Scalar) {
let evals = SparseMatPolynomial::multi_evaluate(&[&self.A, &self.B, &self.C], rx, ry);
(evals[0], evals[1], evals[2])
}
pub fn commit(&self, gens: &R1CSCommitmentGens) -> (R1CSCommitment, R1CSDecommitment) {
let (comm, dense) = SparseMatPolynomial::multi_commit(&[&self.A, &self.B, &self.C], &gens.gens);
let r1cs_comm = R1CSCommitment {
num_cons: self.num_cons,
num_vars: self.num_vars,
num_inputs: self.num_inputs,
comm,
};
let r1cs_decomm = R1CSDecommitment { dense };
(r1cs_comm, r1cs_decomm)
}
}
#[derive(Debug, Serialize, Deserialize)]
pub struct R1CSEvalProof {
proof: SparseMatPolyEvalProof,
}
impl R1CSEvalProof {
pub fn prove(
decomm: &R1CSDecommitment,
rx: &[Scalar], // point at which the polynomial is evaluated
ry: &[Scalar],
evals: &(Scalar, Scalar, Scalar),
gens: &R1CSCommitmentGens,
transcript: &mut Transcript,
random_tape: &mut RandomTape,
) -> R1CSEvalProof {
let timer = Timer::new("R1CSEvalProof::prove");
let proof = SparseMatPolyEvalProof::prove(
&decomm.dense,
rx,
ry,
&[evals.0, evals.1, evals.2],
&gens.gens,
transcript,
random_tape,
);
timer.stop();
R1CSEvalProof { proof }
}
pub fn verify(
&self,
comm: &R1CSCommitment,
rx: &[Scalar], // point at which the R1CS matrix polynomials are evaluated
ry: &[Scalar],
evals: &(Scalar, Scalar, Scalar),
gens: &R1CSCommitmentGens,
transcript: &mut Transcript,
) -> Result<(), ProofVerifyError> {
self.proof.verify(
&comm.comm,
rx,
ry,
&[evals.0, evals.1, evals.2],
&gens.gens,
transcript,
)
}
}

View File

@@ -0,0 +1,608 @@
#![allow(clippy::too_many_arguments)]
use super::commitments::{Commitments, MultiCommitGens};
use super::dense_mlpoly::{
DensePolynomial, EqPolynomial, PolyCommitment, PolyCommitmentGens, PolyEvalProof,
};
use super::errors::ProofVerifyError;
use super::group::{CompressedGroup, GroupElement, VartimeMultiscalarMul};
use super::math::Math;
use super::nizk::{EqualityProof, KnowledgeProof, ProductProof};
use super::r1csinstance::R1CSInstance;
use super::random::RandomTape;
use super::scalar::Scalar;
use super::sparse_mlpoly::{SparsePolyEntry, SparsePolynomial};
use super::sumcheck::ZKSumcheckInstanceProof;
use super::timer::Timer;
use super::transcript::{AppendToTranscript, ProofTranscript};
use crate::group::DecompressEncodedPoint;
use core::iter;
use merlin::Transcript;
use serde::{Deserialize, Serialize};
#[derive(Serialize, Deserialize, Debug)]
pub struct R1CSProof {
comm_vars: PolyCommitment,
sc_proof_phase1: ZKSumcheckInstanceProof,
claims_phase2: (
CompressedGroup,
CompressedGroup,
CompressedGroup,
CompressedGroup,
),
pok_claims_phase2: (KnowledgeProof, ProductProof),
proof_eq_sc_phase1: EqualityProof,
sc_proof_phase2: ZKSumcheckInstanceProof,
comm_vars_at_ry: CompressedGroup,
proof_eval_vars_at_ry: PolyEvalProof,
proof_eq_sc_phase2: EqualityProof,
}
pub struct R1CSSumcheckGens {
gens_1: MultiCommitGens,
gens_3: MultiCommitGens,
gens_4: MultiCommitGens,
}
// TODO: fix passing gens_1_ref
impl R1CSSumcheckGens {
pub fn new(label: &'static [u8], gens_1_ref: &MultiCommitGens) -> Self {
let gens_1 = gens_1_ref.clone();
let gens_3 = MultiCommitGens::new(3, label);
let gens_4 = MultiCommitGens::new(4, label);
R1CSSumcheckGens {
gens_1,
gens_3,
gens_4,
}
}
}
pub struct R1CSGens {
gens_sc: R1CSSumcheckGens,
gens_pc: PolyCommitmentGens,
}
impl R1CSGens {
pub fn new(label: &'static [u8], _num_cons: usize, num_vars: usize) -> Self {
let num_poly_vars = num_vars.log_2();
let gens_pc = PolyCommitmentGens::new(num_poly_vars, label);
let gens_sc = R1CSSumcheckGens::new(label, &gens_pc.gens.gens_1);
R1CSGens { gens_sc, gens_pc }
}
}
impl R1CSProof {
fn prove_phase_one(
num_rounds: usize,
evals_tau: &mut DensePolynomial,
evals_Az: &mut DensePolynomial,
evals_Bz: &mut DensePolynomial,
evals_Cz: &mut DensePolynomial,
gens: &R1CSSumcheckGens,
transcript: &mut Transcript,
random_tape: &mut RandomTape,
) -> (ZKSumcheckInstanceProof, Vec<Scalar>, Vec<Scalar>, Scalar) {
let comb_func = |poly_A_comp: &Scalar,
poly_B_comp: &Scalar,
poly_C_comp: &Scalar,
poly_D_comp: &Scalar|
-> Scalar { poly_A_comp * (poly_B_comp * poly_C_comp - poly_D_comp) };
let (sc_proof_phase_one, r, claims, blind_claim_postsc) =
ZKSumcheckInstanceProof::prove_cubic_with_additive_term(
&Scalar::zero(), // claim is zero
&Scalar::zero(), // blind for claim is also zero
num_rounds,
evals_tau,
evals_Az,
evals_Bz,
evals_Cz,
comb_func,
&gens.gens_1,
&gens.gens_4,
transcript,
random_tape,
);
(sc_proof_phase_one, r, claims, blind_claim_postsc)
}
fn prove_phase_two(
num_rounds: usize,
claim: &Scalar,
blind_claim: &Scalar,
evals_z: &mut DensePolynomial,
evals_ABC: &mut DensePolynomial,
gens: &R1CSSumcheckGens,
transcript: &mut Transcript,
random_tape: &mut RandomTape,
) -> (ZKSumcheckInstanceProof, Vec<Scalar>, Vec<Scalar>, Scalar) {
let comb_func =
|poly_A_comp: &Scalar, poly_B_comp: &Scalar| -> Scalar { poly_A_comp * poly_B_comp };
let (sc_proof_phase_two, r, claims, blind_claim_postsc) = ZKSumcheckInstanceProof::prove_quad(
claim,
blind_claim,
num_rounds,
evals_z,
evals_ABC,
comb_func,
&gens.gens_1,
&gens.gens_3,
transcript,
random_tape,
);
(sc_proof_phase_two, r, claims, blind_claim_postsc)
}
fn protocol_name() -> &'static [u8] {
b"R1CS proof"
}
pub fn prove(
inst: &R1CSInstance,
vars: Vec<Scalar>,
input: &[Scalar],
gens: &R1CSGens,
transcript: &mut Transcript,
random_tape: &mut RandomTape,
) -> (R1CSProof, Vec<Scalar>, Vec<Scalar>) {
let timer_prove = Timer::new("R1CSProof::prove");
transcript.append_protocol_name(R1CSProof::protocol_name());
// we currently require the number of |inputs| + 1 to be at most number of vars
assert!(input.len() < vars.len());
input.append_to_transcript(b"input", transcript);
let timer_commit = Timer::new("polycommit");
let (poly_vars, comm_vars, blinds_vars) = {
// create a multilinear polynomial using the supplied assignment for variables
let poly_vars = DensePolynomial::new(vars.clone());
// produce a commitment to the satisfying assignment
let (comm_vars, blinds_vars) = poly_vars.commit(&gens.gens_pc, Some(random_tape));
// add the commitment to the prover's transcript
comm_vars.append_to_transcript(b"poly_commitment", transcript);
(poly_vars, comm_vars, blinds_vars)
};
timer_commit.stop();
let timer_sc_proof_phase1 = Timer::new("prove_sc_phase_one");
// append input to variables to create a single vector z
let z = {
let num_inputs = input.len();
let num_vars = vars.len();
let mut z = vars;
z.extend(&vec![Scalar::one()]); // add constant term in z
z.extend(input);
z.extend(&vec![Scalar::zero(); num_vars - num_inputs - 1]); // we will pad with zeros
z
};
// derive the verifier's challenge tau
let (num_rounds_x, num_rounds_y) = (inst.get_num_cons().log_2(), z.len().log_2());
let tau = transcript.challenge_vector(b"challenge_tau", num_rounds_x);
// compute the initial evaluation table for R(\tau, x)
let mut poly_tau = DensePolynomial::new(EqPolynomial::new(tau).evals());
let (mut poly_Az, mut poly_Bz, mut poly_Cz) =
inst.multiply_vec(inst.get_num_cons(), z.len(), &z);
let (sc_proof_phase1, rx, _claims_phase1, blind_claim_postsc1) = R1CSProof::prove_phase_one(
num_rounds_x,
&mut poly_tau,
&mut poly_Az,
&mut poly_Bz,
&mut poly_Cz,
&gens.gens_sc,
transcript,
random_tape,
);
assert_eq!(poly_tau.len(), 1);
assert_eq!(poly_Az.len(), 1);
assert_eq!(poly_Bz.len(), 1);
assert_eq!(poly_Cz.len(), 1);
timer_sc_proof_phase1.stop();
let (tau_claim, Az_claim, Bz_claim, Cz_claim) =
(&poly_tau[0], &poly_Az[0], &poly_Bz[0], &poly_Cz[0]);
let (Az_blind, Bz_blind, Cz_blind, prod_Az_Bz_blind) = (
random_tape.random_scalar(b"Az_blind"),
random_tape.random_scalar(b"Bz_blind"),
random_tape.random_scalar(b"Cz_blind"),
random_tape.random_scalar(b"prod_Az_Bz_blind"),
);
let (pok_Cz_claim, comm_Cz_claim) = {
KnowledgeProof::prove(
&gens.gens_sc.gens_1,
transcript,
random_tape,
Cz_claim,
&Cz_blind,
)
};
let (proof_prod, comm_Az_claim, comm_Bz_claim, comm_prod_Az_Bz_claims) = {
let prod = Az_claim * Bz_claim;
ProductProof::prove(
&gens.gens_sc.gens_1,
transcript,
random_tape,
Az_claim,
&Az_blind,
Bz_claim,
&Bz_blind,
&prod,
&prod_Az_Bz_blind,
)
};
comm_Az_claim.append_to_transcript(b"comm_Az_claim", transcript);
comm_Bz_claim.append_to_transcript(b"comm_Bz_claim", transcript);
comm_Cz_claim.append_to_transcript(b"comm_Cz_claim", transcript);
comm_prod_Az_Bz_claims.append_to_transcript(b"comm_prod_Az_Bz_claims", transcript);
// prove the final step of sum-check #1
let taus_bound_rx = tau_claim;
let blind_expected_claim_postsc1 = taus_bound_rx * (prod_Az_Bz_blind - Cz_blind);
let claim_post_phase1 = (Az_claim * Bz_claim - Cz_claim) * taus_bound_rx;
let (proof_eq_sc_phase1, _C1, _C2) = EqualityProof::prove(
&gens.gens_sc.gens_1,
transcript,
random_tape,
&claim_post_phase1,
&blind_expected_claim_postsc1,
&claim_post_phase1,
&blind_claim_postsc1,
);
let timer_sc_proof_phase2 = Timer::new("prove_sc_phase_two");
// combine the three claims into a single claim
let r_A = transcript.challenge_scalar(b"challenege_Az");
let r_B = transcript.challenge_scalar(b"challenege_Bz");
let r_C = transcript.challenge_scalar(b"challenege_Cz");
let claim_phase2 = r_A * Az_claim + r_B * Bz_claim + r_C * Cz_claim;
let blind_claim_phase2 = r_A * Az_blind + r_B * Bz_blind + r_C * Cz_blind;
let evals_ABC = {
// compute the initial evaluation table for R(\tau, x)
let evals_rx = EqPolynomial::new(rx.clone()).evals();
let (evals_A, evals_B, evals_C) =
inst.compute_eval_table_sparse(inst.get_num_cons(), z.len(), &evals_rx);
assert_eq!(evals_A.len(), evals_B.len());
assert_eq!(evals_A.len(), evals_C.len());
(0..evals_A.len())
.map(|i| r_A * evals_A[i] + r_B * evals_B[i] + r_C * evals_C[i])
.collect::<Vec<Scalar>>()
};
// another instance of the sum-check protocol
let (sc_proof_phase2, ry, claims_phase2, blind_claim_postsc2) = R1CSProof::prove_phase_two(
num_rounds_y,
&claim_phase2,
&blind_claim_phase2,
&mut DensePolynomial::new(z),
&mut DensePolynomial::new(evals_ABC),
&gens.gens_sc,
transcript,
random_tape,
);
timer_sc_proof_phase2.stop();
let timer_polyeval = Timer::new("polyeval");
let eval_vars_at_ry = poly_vars.evaluate(&ry[1..]);
let blind_eval = random_tape.random_scalar(b"blind_eval");
let (proof_eval_vars_at_ry, comm_vars_at_ry) = PolyEvalProof::prove(
&poly_vars,
Some(&blinds_vars),
&ry[1..],
&eval_vars_at_ry,
Some(&blind_eval),
&gens.gens_pc,
transcript,
random_tape,
);
timer_polyeval.stop();
// prove the final step of sum-check #2
let blind_eval_Z_at_ry = (Scalar::one() - ry[0]) * blind_eval;
let blind_expected_claim_postsc2 = claims_phase2[1] * blind_eval_Z_at_ry;
let claim_post_phase2 = claims_phase2[0] * claims_phase2[1];
let (proof_eq_sc_phase2, _C1, _C2) = EqualityProof::prove(
&gens.gens_pc.gens.gens_1,
transcript,
random_tape,
&claim_post_phase2,
&blind_expected_claim_postsc2,
&claim_post_phase2,
&blind_claim_postsc2,
);
timer_prove.stop();
(
R1CSProof {
comm_vars,
sc_proof_phase1,
claims_phase2: (
comm_Az_claim,
comm_Bz_claim,
comm_Cz_claim,
comm_prod_Az_Bz_claims,
),
pok_claims_phase2: (pok_Cz_claim, proof_prod),
proof_eq_sc_phase1,
sc_proof_phase2,
comm_vars_at_ry,
proof_eval_vars_at_ry,
proof_eq_sc_phase2,
},
rx,
ry,
)
}
pub fn verify(
&self,
num_vars: usize,
num_cons: usize,
input: &[Scalar],
evals: &(Scalar, Scalar, Scalar),
transcript: &mut Transcript,
gens: &R1CSGens,
) -> Result<(Vec<Scalar>, Vec<Scalar>), ProofVerifyError> {
transcript.append_protocol_name(R1CSProof::protocol_name());
input.append_to_transcript(b"input", transcript);
let n = num_vars;
// add the commitment to the verifier's transcript
self
.comm_vars
.append_to_transcript(b"poly_commitment", transcript);
let (num_rounds_x, num_rounds_y) = (num_cons.log_2(), (2 * num_vars).log_2());
// derive the verifier's challenge tau
let tau = transcript.challenge_vector(b"challenge_tau", num_rounds_x);
// verify the first sum-check instance
let claim_phase1 = Scalar::zero()
.commit(&Scalar::zero(), &gens.gens_sc.gens_1)
.compress();
let (comm_claim_post_phase1, rx) = self.sc_proof_phase1.verify(
&claim_phase1,
num_rounds_x,
3,
&gens.gens_sc.gens_1,
&gens.gens_sc.gens_4,
transcript,
)?;
// perform the intermediate sum-check test with claimed Az, Bz, and Cz
let (comm_Az_claim, comm_Bz_claim, comm_Cz_claim, comm_prod_Az_Bz_claims) = &self.claims_phase2;
let (pok_Cz_claim, proof_prod) = &self.pok_claims_phase2;
pok_Cz_claim.verify(&gens.gens_sc.gens_1, transcript, comm_Cz_claim)?;
proof_prod.verify(
&gens.gens_sc.gens_1,
transcript,
comm_Az_claim,
comm_Bz_claim,
comm_prod_Az_Bz_claims,
)?;
comm_Az_claim.append_to_transcript(b"comm_Az_claim", transcript);
comm_Bz_claim.append_to_transcript(b"comm_Bz_claim", transcript);
comm_Cz_claim.append_to_transcript(b"comm_Cz_claim", transcript);
comm_prod_Az_Bz_claims.append_to_transcript(b"comm_prod_Az_Bz_claims", transcript);
let taus_bound_rx: Scalar = (0..rx.len())
.map(|i| rx[i] * tau[i] + (Scalar::one() - rx[i]) * (Scalar::one() - tau[i]))
.product();
let expected_claim_post_phase1 = (taus_bound_rx
* (comm_prod_Az_Bz_claims.decompress().unwrap() - comm_Cz_claim.decompress().unwrap()))
.compress();
// verify proof that expected_claim_post_phase1 == claim_post_phase1
self.proof_eq_sc_phase1.verify(
&gens.gens_sc.gens_1,
transcript,
&expected_claim_post_phase1,
&comm_claim_post_phase1,
)?;
// derive three public challenges and then derive a joint claim
let r_A = transcript.challenge_scalar(b"challenege_Az");
let r_B = transcript.challenge_scalar(b"challenege_Bz");
let r_C = transcript.challenge_scalar(b"challenege_Cz");
// r_A * comm_Az_claim + r_B * comm_Bz_claim + r_C * comm_Cz_claim;
let comm_claim_phase2 = GroupElement::vartime_multiscalar_mul(
iter::once(r_A)
.chain(iter::once(r_B))
.chain(iter::once(r_C))
.collect(),
iter::once(&comm_Az_claim)
.chain(iter::once(&comm_Bz_claim))
.chain(iter::once(&comm_Cz_claim))
.map(|pt| pt.decompress().unwrap())
.collect(),
)
.compress();
// verify the joint claim with a sum-check protocol
let (comm_claim_post_phase2, ry) = self.sc_proof_phase2.verify(
&comm_claim_phase2,
num_rounds_y,
2,
&gens.gens_sc.gens_1,
&gens.gens_sc.gens_3,
transcript,
)?;
// verify Z(ry) proof against the initial commitment
self.proof_eval_vars_at_ry.verify(
&gens.gens_pc,
transcript,
&ry[1..],
&self.comm_vars_at_ry,
&self.comm_vars,
)?;
let poly_input_eval = {
// constant term
let mut input_as_sparse_poly_entries = vec![SparsePolyEntry::new(0, Scalar::one())];
//remaining inputs
input_as_sparse_poly_entries.extend(
(0..input.len())
.map(|i| SparsePolyEntry::new(i + 1, input[i]))
.collect::<Vec<SparsePolyEntry>>(),
);
SparsePolynomial::new(n.log_2(), input_as_sparse_poly_entries).evaluate(&ry[1..])
};
// compute commitment to eval_Z_at_ry = (Scalar::one() - ry[0]) * self.eval_vars_at_ry + ry[0] * poly_input_eval
let comm_eval_Z_at_ry = GroupElement::vartime_multiscalar_mul(
iter::once(Scalar::one() - ry[0])
.chain(iter::once(ry[0]))
.map(|s| s)
.collect(),
iter::once(self.comm_vars_at_ry.decompress().unwrap())
.chain(iter::once(
poly_input_eval.commit(&Scalar::zero(), &gens.gens_pc.gens.gens_1),
))
.collect(),
);
// perform the final check in the second sum-check protocol
let (eval_A_r, eval_B_r, eval_C_r) = evals;
let expected_claim_post_phase2 =
((r_A * eval_A_r + r_B * eval_B_r + r_C * eval_C_r) * comm_eval_Z_at_ry).compress();
// verify proof that expected_claim_post_phase1 == claim_post_phase1
self.proof_eq_sc_phase2.verify(
&gens.gens_sc.gens_1,
transcript,
&expected_claim_post_phase2,
&comm_claim_post_phase2,
)?;
Ok((rx, ry))
}
}
#[cfg(test)]
mod tests {
use super::*;
use rand_core::OsRng;
fn produce_tiny_r1cs() -> (R1CSInstance, Vec<Scalar>, Vec<Scalar>) {
// three constraints over five variables Z1, Z2, Z3, Z4, and Z5
// rounded to the nearest power of two
let num_cons = 128;
let num_vars = 256;
let num_inputs = 2;
// encode the above constraints into three matrices
let mut A: Vec<(usize, usize, Scalar)> = Vec::new();
let mut B: Vec<(usize, usize, Scalar)> = Vec::new();
let mut C: Vec<(usize, usize, Scalar)> = Vec::new();
let one = Scalar::one();
// constraint 0 entries
// (Z1 + Z2) * I0 - Z3 = 0;
A.push((0, 0, one));
A.push((0, 1, one));
B.push((0, num_vars + 1, one));
C.push((0, 2, one));
// constraint 1 entries
// (Z1 + I1) * (Z3) - Z4 = 0
A.push((1, 0, one));
A.push((1, num_vars + 2, one));
B.push((1, 2, one));
C.push((1, 3, one));
// constraint 3 entries
// Z5 * 1 - 0 = 0
A.push((2, 4, one));
B.push((2, num_vars, one));
let inst = R1CSInstance::new(num_cons, num_vars, num_inputs, &A, &B, &C);
// compute a satisfying assignment
let mut csprng: OsRng = OsRng;
let i0 = Scalar::random(&mut csprng);
let i1 = Scalar::random(&mut csprng);
let z1 = Scalar::random(&mut csprng);
let z2 = Scalar::random(&mut csprng);
let z3 = (z1 + z2) * i0; // constraint 1: (Z1 + Z2) * I0 - Z3 = 0;
let z4 = (z1 + i1) * z3; // constraint 2: (Z1 + I1) * (Z3) - Z4 = 0
let z5 = Scalar::zero(); //constraint 3
let mut vars = vec![Scalar::zero(); num_vars];
vars[0] = z1;
vars[1] = z2;
vars[2] = z3;
vars[3] = z4;
vars[4] = z5;
let mut input = vec![Scalar::zero(); num_inputs];
input[0] = i0;
input[1] = i1;
(inst, vars, input)
}
#[test]
fn test_tiny_r1cs() {
let (inst, vars, input) = tests::produce_tiny_r1cs();
let is_sat = inst.is_sat(&vars, &input);
assert!(is_sat);
}
#[test]
fn test_synthetic_r1cs() {
let (inst, vars, input) = R1CSInstance::produce_synthetic_r1cs(1024, 1024, 10);
let is_sat = inst.is_sat(&vars, &input);
assert!(is_sat);
}
#[test]
pub fn check_r1cs_proof() {
let num_vars = 1024;
let num_cons = num_vars;
let num_inputs = 10;
let (inst, vars, input) = R1CSInstance::produce_synthetic_r1cs(num_cons, num_vars, num_inputs);
let gens = R1CSGens::new(b"test-m", num_cons, num_vars);
let mut random_tape = RandomTape::new(b"proof");
let mut prover_transcript = Transcript::new(b"example");
let (proof, rx, ry) = R1CSProof::prove(
&inst,
vars,
&input,
&gens,
&mut prover_transcript,
&mut random_tape,
);
let inst_evals = inst.evaluate(&rx, &ry);
let mut verifier_transcript = Transcript::new(b"example");
assert!(proof
.verify(
inst.get_num_vars(),
inst.get_num_cons(),
&input,
&inst_evals,
&mut verifier_transcript,
&gens,
)
.is_ok());
}
}

View File

@@ -0,0 +1,27 @@
use super::scalar::Scalar;
use super::transcript::ProofTranscript;
use merlin::Transcript;
use rand_core::OsRng;
pub struct RandomTape {
tape: Transcript,
}
impl RandomTape {
pub fn new(name: &'static [u8]) -> Self {
let tape = {
let mut rng = OsRng::default();
let mut tape = Transcript::new(name);
tape.append_scalar(b"init_randomness", &Scalar::random(&mut rng));
tape
};
Self { tape }
}
pub fn random_scalar(&mut self, label: &'static [u8]) -> Scalar {
self.tape.challenge_scalar(label)
}
pub fn random_vector(&mut self, label: &'static [u8], len: usize) -> Vec<Scalar> {
self.tape.challenge_vector(label, len)
}
}

View File

@@ -0,0 +1,46 @@
use secq256k1::elliptic_curve::ops::Reduce;
use secq256k1::U256;
mod scalar;
pub type Scalar = scalar::Scalar;
pub type ScalarBytes = secq256k1::Scalar;
pub trait ScalarFromPrimitives {
fn to_scalar(self) -> Scalar;
}
impl ScalarFromPrimitives for usize {
#[inline]
fn to_scalar(self) -> Scalar {
(0..self).map(|_i| Scalar::one()).sum()
}
}
impl ScalarFromPrimitives for bool {
#[inline]
fn to_scalar(self) -> Scalar {
if self {
Scalar::one()
} else {
Scalar::zero()
}
}
}
pub trait ScalarBytesFromScalar {
fn decompress_scalar(s: &Scalar) -> ScalarBytes;
fn decompress_vector(s: &[Scalar]) -> Vec<ScalarBytes>;
}
impl ScalarBytesFromScalar for Scalar {
fn decompress_scalar(s: &Scalar) -> ScalarBytes {
ScalarBytes::from_uint_reduced(U256::from_le_slice(&s.to_bytes()))
}
fn decompress_vector(s: &[Scalar]) -> Vec<ScalarBytes> {
(0..s.len())
.map(|i| Scalar::decompress_scalar(&s[i]))
.collect::<Vec<ScalarBytes>>()
}
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,778 @@
#![allow(clippy::too_many_arguments)]
#![allow(clippy::type_complexity)]
use super::commitments::{Commitments, MultiCommitGens};
use super::dense_mlpoly::DensePolynomial;
use super::errors::ProofVerifyError;
use super::group::{CompressedGroup, GroupElement, VartimeMultiscalarMul};
use super::nizk::DotProductProof;
use super::random::RandomTape;
use super::scalar::Scalar;
use super::transcript::{AppendToTranscript, ProofTranscript};
use super::unipoly::{CompressedUniPoly, UniPoly};
use crate::group::DecompressEncodedPoint;
use core::iter;
use itertools::izip;
use merlin::Transcript;
use serde::{Deserialize, Serialize};
#[derive(Serialize, Deserialize, Debug)]
pub struct SumcheckInstanceProof {
compressed_polys: Vec<CompressedUniPoly>,
}
impl SumcheckInstanceProof {
pub fn new(compressed_polys: Vec<CompressedUniPoly>) -> SumcheckInstanceProof {
SumcheckInstanceProof { compressed_polys }
}
pub fn verify(
&self,
claim: Scalar,
num_rounds: usize,
degree_bound: usize,
transcript: &mut Transcript,
) -> Result<(Scalar, Vec<Scalar>), ProofVerifyError> {
let mut e = claim;
let mut r: Vec<Scalar> = Vec::new();
// verify that there is a univariate polynomial for each round
assert_eq!(self.compressed_polys.len(), num_rounds);
for i in 0..self.compressed_polys.len() {
let poly = self.compressed_polys[i].decompress(&e);
// verify degree bound
assert_eq!(poly.degree(), degree_bound);
// check if G_k(0) + G_k(1) = e
assert_eq!(poly.eval_at_zero() + poly.eval_at_one(), e);
// append the prover's message to the transcript
poly.append_to_transcript(b"poly", transcript);
//derive the verifier's challenge for the next round
let r_i = transcript.challenge_scalar(b"challenge_nextround");
r.push(r_i);
// evaluate the claimed degree-ell polynomial at r_i
e = poly.evaluate(&r_i);
}
Ok((e, r))
}
}
#[derive(Serialize, Deserialize, Debug)]
pub struct ZKSumcheckInstanceProof {
comm_polys: Vec<CompressedGroup>,
comm_evals: Vec<CompressedGroup>,
proofs: Vec<DotProductProof>,
}
impl ZKSumcheckInstanceProof {
pub fn new(
comm_polys: Vec<CompressedGroup>,
comm_evals: Vec<CompressedGroup>,
proofs: Vec<DotProductProof>,
) -> Self {
ZKSumcheckInstanceProof {
comm_polys,
comm_evals,
proofs,
}
}
pub fn verify(
&self,
comm_claim: &CompressedGroup,
num_rounds: usize,
degree_bound: usize,
gens_1: &MultiCommitGens,
gens_n: &MultiCommitGens,
transcript: &mut Transcript,
) -> Result<(CompressedGroup, Vec<Scalar>), ProofVerifyError> {
// verify degree bound
assert_eq!(gens_n.n, degree_bound + 1);
// verify that there is a univariate polynomial for each round
assert_eq!(self.comm_polys.len(), num_rounds);
assert_eq!(self.comm_evals.len(), num_rounds);
let mut r: Vec<Scalar> = Vec::new();
for i in 0..self.comm_polys.len() {
let comm_poly = &self.comm_polys[i];
// append the prover's polynomial to the transcript
comm_poly.append_to_transcript(b"comm_poly", transcript);
//derive the verifier's challenge for the next round
let r_i = transcript.challenge_scalar(b"challenge_nextround");
// verify the proof of sum-check and evals
let res = {
let comm_claim_per_round = if i == 0 {
comm_claim
} else {
&self.comm_evals[i - 1]
};
let comm_eval = &self.comm_evals[i];
// add two claims to transcript
comm_claim_per_round.append_to_transcript(b"comm_claim_per_round", transcript);
comm_eval.append_to_transcript(b"comm_eval", transcript);
// produce two weights
let w = transcript.challenge_vector(b"combine_two_claims_to_one", 2);
// compute a weighted sum of the RHS
let comm_target = GroupElement::vartime_multiscalar_mul(
w.clone(),
iter::once(&comm_claim_per_round)
.chain(iter::once(&comm_eval))
.map(|pt| pt.decompress().unwrap())
.collect(),
)
.compress();
let a = {
// the vector to use to decommit for sum-check test
let a_sc = {
let mut a = vec![Scalar::one(); degree_bound + 1];
a[0] += Scalar::one();
a
};
// the vector to use to decommit for evaluation
let a_eval = {
let mut a = vec![Scalar::one(); degree_bound + 1];
for j in 1..a.len() {
a[j] = a[j - 1] * r_i;
}
a
};
// take weighted sum of the two vectors using w
assert_eq!(a_sc.len(), a_eval.len());
(0..a_sc.len())
.map(|i| w[0] * a_sc[i] + w[1] * a_eval[i])
.collect::<Vec<Scalar>>()
};
self.proofs[i]
.verify(
gens_1,
gens_n,
transcript,
&a,
&self.comm_polys[i],
&comm_target,
)
.is_ok()
};
if !res {
return Err(ProofVerifyError::InternalError);
}
r.push(r_i);
}
Ok((self.comm_evals[self.comm_evals.len() - 1], r))
}
}
impl SumcheckInstanceProof {
pub fn prove_cubic<F>(
claim: &Scalar,
num_rounds: usize,
poly_A: &mut DensePolynomial,
poly_B: &mut DensePolynomial,
poly_C: &mut DensePolynomial,
comb_func: F,
transcript: &mut Transcript,
) -> (Self, Vec<Scalar>, Vec<Scalar>)
where
F: Fn(&Scalar, &Scalar, &Scalar) -> Scalar,
{
let mut e = *claim;
let mut r: Vec<Scalar> = Vec::new();
let mut cubic_polys: Vec<CompressedUniPoly> = Vec::new();
for _j in 0..num_rounds {
let mut eval_point_0 = Scalar::zero();
let mut eval_point_2 = Scalar::zero();
let mut eval_point_3 = Scalar::zero();
let len = poly_A.len() / 2;
for i in 0..len {
// eval 0: bound_func is A(low)
eval_point_0 += comb_func(&poly_A[i], &poly_B[i], &poly_C[i]);
// eval 2: bound_func is -A(low) + 2*A(high)
let poly_A_bound_point = poly_A[len + i] + poly_A[len + i] - poly_A[i];
let poly_B_bound_point = poly_B[len + i] + poly_B[len + i] - poly_B[i];
let poly_C_bound_point = poly_C[len + i] + poly_C[len + i] - poly_C[i];
eval_point_2 += comb_func(
&poly_A_bound_point,
&poly_B_bound_point,
&poly_C_bound_point,
);
// eval 3: bound_func is -2A(low) + 3A(high); computed incrementally with bound_func applied to eval(2)
let poly_A_bound_point = poly_A_bound_point + poly_A[len + i] - poly_A[i];
let poly_B_bound_point = poly_B_bound_point + poly_B[len + i] - poly_B[i];
let poly_C_bound_point = poly_C_bound_point + poly_C[len + i] - poly_C[i];
eval_point_3 += comb_func(
&poly_A_bound_point,
&poly_B_bound_point,
&poly_C_bound_point,
);
}
let evals = vec![eval_point_0, e - eval_point_0, eval_point_2, eval_point_3];
let poly = UniPoly::from_evals(&evals);
// append the prover's message to the transcript
poly.append_to_transcript(b"poly", transcript);
//derive the verifier's challenge for the next round
let r_j = transcript.challenge_scalar(b"challenge_nextround");
r.push(r_j);
// bound all tables to the verifier's challenege
poly_A.bound_poly_var_top(&r_j);
poly_B.bound_poly_var_top(&r_j);
poly_C.bound_poly_var_top(&r_j);
e = poly.evaluate(&r_j);
cubic_polys.push(poly.compress());
}
(
SumcheckInstanceProof::new(cubic_polys),
r,
vec![poly_A[0], poly_B[0], poly_C[0]],
)
}
pub fn prove_cubic_batched<F>(
claim: &Scalar,
num_rounds: usize,
poly_vec_par: (
&mut Vec<&mut DensePolynomial>,
&mut Vec<&mut DensePolynomial>,
&mut DensePolynomial,
),
poly_vec_seq: (
&mut Vec<&mut DensePolynomial>,
&mut Vec<&mut DensePolynomial>,
&mut Vec<&mut DensePolynomial>,
),
coeffs: &[Scalar],
comb_func: F,
transcript: &mut Transcript,
) -> (
Self,
Vec<Scalar>,
(Vec<Scalar>, Vec<Scalar>, Scalar),
(Vec<Scalar>, Vec<Scalar>, Vec<Scalar>),
)
where
F: Fn(&Scalar, &Scalar, &Scalar) -> Scalar,
{
let (poly_A_vec_par, poly_B_vec_par, poly_C_par) = poly_vec_par;
let (poly_A_vec_seq, poly_B_vec_seq, poly_C_vec_seq) = poly_vec_seq;
//let (poly_A_vec_seq, poly_B_vec_seq, poly_C_vec_seq) = poly_vec_seq;
let mut e = *claim;
let mut r: Vec<Scalar> = Vec::new();
let mut cubic_polys: Vec<CompressedUniPoly> = Vec::new();
for _j in 0..num_rounds {
let mut evals: Vec<(Scalar, Scalar, Scalar)> = Vec::new();
for (poly_A, poly_B) in poly_A_vec_par.iter().zip(poly_B_vec_par.iter()) {
let mut eval_point_0 = Scalar::zero();
let mut eval_point_2 = Scalar::zero();
let mut eval_point_3 = Scalar::zero();
let len = poly_A.len() / 2;
for i in 0..len {
// eval 0: bound_func is A(low)
eval_point_0 += comb_func(&poly_A[i], &poly_B[i], &poly_C_par[i]);
// eval 2: bound_func is -A(low) + 2*A(high)
let poly_A_bound_point = poly_A[len + i] + poly_A[len + i] - poly_A[i];
let poly_B_bound_point = poly_B[len + i] + poly_B[len + i] - poly_B[i];
let poly_C_bound_point = poly_C_par[len + i] + poly_C_par[len + i] - poly_C_par[i];
eval_point_2 += comb_func(
&poly_A_bound_point,
&poly_B_bound_point,
&poly_C_bound_point,
);
// eval 3: bound_func is -2A(low) + 3A(high); computed incrementally with bound_func applied to eval(2)
let poly_A_bound_point = poly_A_bound_point + poly_A[len + i] - poly_A[i];
let poly_B_bound_point = poly_B_bound_point + poly_B[len + i] - poly_B[i];
let poly_C_bound_point = poly_C_bound_point + poly_C_par[len + i] - poly_C_par[i];
eval_point_3 += comb_func(
&poly_A_bound_point,
&poly_B_bound_point,
&poly_C_bound_point,
);
}
evals.push((eval_point_0, eval_point_2, eval_point_3));
}
for (poly_A, poly_B, poly_C) in izip!(
poly_A_vec_seq.iter(),
poly_B_vec_seq.iter(),
poly_C_vec_seq.iter()
) {
let mut eval_point_0 = Scalar::zero();
let mut eval_point_2 = Scalar::zero();
let mut eval_point_3 = Scalar::zero();
let len = poly_A.len() / 2;
for i in 0..len {
// eval 0: bound_func is A(low)
eval_point_0 += comb_func(&poly_A[i], &poly_B[i], &poly_C[i]);
// eval 2: bound_func is -A(low) + 2*A(high)
let poly_A_bound_point = poly_A[len + i] + poly_A[len + i] - poly_A[i];
let poly_B_bound_point = poly_B[len + i] + poly_B[len + i] - poly_B[i];
let poly_C_bound_point = poly_C[len + i] + poly_C[len + i] - poly_C[i];
eval_point_2 += comb_func(
&poly_A_bound_point,
&poly_B_bound_point,
&poly_C_bound_point,
);
// eval 3: bound_func is -2A(low) + 3A(high); computed incrementally with bound_func applied to eval(2)
let poly_A_bound_point = poly_A_bound_point + poly_A[len + i] - poly_A[i];
let poly_B_bound_point = poly_B_bound_point + poly_B[len + i] - poly_B[i];
let poly_C_bound_point = poly_C_bound_point + poly_C[len + i] - poly_C[i];
eval_point_3 += comb_func(
&poly_A_bound_point,
&poly_B_bound_point,
&poly_C_bound_point,
);
}
evals.push((eval_point_0, eval_point_2, eval_point_3));
}
let evals_combined_0 = (0..evals.len()).map(|i| evals[i].0 * coeffs[i]).sum();
let evals_combined_2 = (0..evals.len()).map(|i| evals[i].1 * coeffs[i]).sum();
let evals_combined_3 = (0..evals.len()).map(|i| evals[i].2 * coeffs[i]).sum();
let evals = vec![
evals_combined_0,
e - evals_combined_0,
evals_combined_2,
evals_combined_3,
];
let poly = UniPoly::from_evals(&evals);
// append the prover's message to the transcript
poly.append_to_transcript(b"poly", transcript);
//derive the verifier's challenge for the next round
let r_j = transcript.challenge_scalar(b"challenge_nextround");
r.push(r_j);
// bound all tables to the verifier's challenege
for (poly_A, poly_B) in poly_A_vec_par.iter_mut().zip(poly_B_vec_par.iter_mut()) {
poly_A.bound_poly_var_top(&r_j);
poly_B.bound_poly_var_top(&r_j);
}
poly_C_par.bound_poly_var_top(&r_j);
for (poly_A, poly_B, poly_C) in izip!(
poly_A_vec_seq.iter_mut(),
poly_B_vec_seq.iter_mut(),
poly_C_vec_seq.iter_mut()
) {
poly_A.bound_poly_var_top(&r_j);
poly_B.bound_poly_var_top(&r_j);
poly_C.bound_poly_var_top(&r_j);
}
e = poly.evaluate(&r_j);
cubic_polys.push(poly.compress());
}
let poly_A_par_final = (0..poly_A_vec_par.len())
.map(|i| poly_A_vec_par[i][0])
.collect();
let poly_B_par_final = (0..poly_B_vec_par.len())
.map(|i| poly_B_vec_par[i][0])
.collect();
let claims_prod = (poly_A_par_final, poly_B_par_final, poly_C_par[0]);
let poly_A_seq_final = (0..poly_A_vec_seq.len())
.map(|i| poly_A_vec_seq[i][0])
.collect();
let poly_B_seq_final = (0..poly_B_vec_seq.len())
.map(|i| poly_B_vec_seq[i][0])
.collect();
let poly_C_seq_final = (0..poly_C_vec_seq.len())
.map(|i| poly_C_vec_seq[i][0])
.collect();
let claims_dotp = (poly_A_seq_final, poly_B_seq_final, poly_C_seq_final);
(
SumcheckInstanceProof::new(cubic_polys),
r,
claims_prod,
claims_dotp,
)
}
}
impl ZKSumcheckInstanceProof {
pub fn prove_quad<F>(
claim: &Scalar,
blind_claim: &Scalar,
num_rounds: usize,
poly_A: &mut DensePolynomial,
poly_B: &mut DensePolynomial,
comb_func: F,
gens_1: &MultiCommitGens,
gens_n: &MultiCommitGens,
transcript: &mut Transcript,
random_tape: &mut RandomTape,
) -> (Self, Vec<Scalar>, Vec<Scalar>, Scalar)
where
F: Fn(&Scalar, &Scalar) -> Scalar,
{
let (blinds_poly, blinds_evals) = (
random_tape.random_vector(b"blinds_poly", num_rounds),
random_tape.random_vector(b"blinds_evals", num_rounds),
);
let mut claim_per_round = *claim;
let mut comm_claim_per_round = claim_per_round.commit(blind_claim, gens_1).compress();
let mut r: Vec<Scalar> = Vec::new();
let mut comm_polys: Vec<CompressedGroup> = Vec::new();
let mut comm_evals: Vec<CompressedGroup> = Vec::new();
let mut proofs: Vec<DotProductProof> = Vec::new();
for j in 0..num_rounds {
let (poly, comm_poly) = {
let mut eval_point_0 = Scalar::zero();
let mut eval_point_2 = Scalar::zero();
let len = poly_A.len() / 2;
for i in 0..len {
// eval 0: bound_func is A(low)
eval_point_0 += comb_func(&poly_A[i], &poly_B[i]);
// eval 2: bound_func is -A(low) + 2*A(high)
let poly_A_bound_point = poly_A[len + i] + poly_A[len + i] - poly_A[i];
let poly_B_bound_point = poly_B[len + i] + poly_B[len + i] - poly_B[i];
eval_point_2 += comb_func(&poly_A_bound_point, &poly_B_bound_point);
}
let evals = vec![eval_point_0, claim_per_round - eval_point_0, eval_point_2];
let poly = UniPoly::from_evals(&evals);
let comm_poly = poly.commit(gens_n, &blinds_poly[j]).compress();
(poly, comm_poly)
};
// append the prover's message to the transcript
comm_poly.append_to_transcript(b"comm_poly", transcript);
comm_polys.push(comm_poly);
//derive the verifier's challenge for the next round
let r_j = transcript.challenge_scalar(b"challenge_nextround");
// bound all tables to the verifier's challenege
poly_A.bound_poly_var_top(&r_j);
poly_B.bound_poly_var_top(&r_j);
// produce a proof of sum-check and of evaluation
let (proof, claim_next_round, comm_claim_next_round) = {
let eval = poly.evaluate(&r_j);
let comm_eval = eval.commit(&blinds_evals[j], gens_1).compress();
// we need to prove the following under homomorphic commitments:
// (1) poly(0) + poly(1) = claim_per_round
// (2) poly(r_j) = eval
// Our technique is to leverage dot product proofs:
// (1) we can prove: <poly_in_coeffs_form, (2, 1, 1, 1)> = claim_per_round
// (2) we can prove: <poly_in_coeffs_form, (1, r_j, r^2_j, ..) = eval
// for efficiency we batch them using random weights
// add two claims to transcript
comm_claim_per_round.append_to_transcript(b"comm_claim_per_round", transcript);
comm_eval.append_to_transcript(b"comm_eval", transcript);
// produce two weights
let w = transcript.challenge_vector(b"combine_two_claims_to_one", 2);
// compute a weighted sum of the RHS
let target = w[0] * claim_per_round + w[1] * eval;
let comm_target = GroupElement::vartime_multiscalar_mul(
w.clone(),
iter::once(&comm_claim_per_round)
.chain(iter::once(&comm_eval))
.map(|pt| pt.decompress().unwrap())
.collect(),
)
.compress();
let blind = {
let blind_sc = if j == 0 {
blind_claim
} else {
&blinds_evals[j - 1]
};
let blind_eval = &blinds_evals[j];
w[0] * blind_sc + w[1] * blind_eval
};
assert_eq!(target.commit(&blind, gens_1).compress(), comm_target);
let a = {
// the vector to use to decommit for sum-check test
let a_sc = {
let mut a = vec![Scalar::one(); poly.degree() + 1];
a[0] += Scalar::one();
a
};
// the vector to use to decommit for evaluation
let a_eval = {
let mut a = vec![Scalar::one(); poly.degree() + 1];
for j in 1..a.len() {
a[j] = a[j - 1] * r_j;
}
a
};
// take weighted sum of the two vectors using w
assert_eq!(a_sc.len(), a_eval.len());
(0..a_sc.len())
.map(|i| w[0] * a_sc[i] + w[1] * a_eval[i])
.collect::<Vec<Scalar>>()
};
let (proof, _comm_poly, _comm_sc_eval) = DotProductProof::prove(
gens_1,
gens_n,
transcript,
random_tape,
&poly.as_vec(),
&blinds_poly[j],
&a,
&target,
&blind,
);
(proof, eval, comm_eval)
};
claim_per_round = claim_next_round;
comm_claim_per_round = comm_claim_next_round;
proofs.push(proof);
r.push(r_j);
comm_evals.push(comm_claim_per_round);
}
(
ZKSumcheckInstanceProof::new(comm_polys, comm_evals, proofs),
r,
vec![poly_A[0], poly_B[0]],
blinds_evals[num_rounds - 1],
)
}
pub fn prove_cubic_with_additive_term<F>(
claim: &Scalar,
blind_claim: &Scalar,
num_rounds: usize,
poly_A: &mut DensePolynomial,
poly_B: &mut DensePolynomial,
poly_C: &mut DensePolynomial,
poly_D: &mut DensePolynomial,
comb_func: F,
gens_1: &MultiCommitGens,
gens_n: &MultiCommitGens,
transcript: &mut Transcript,
random_tape: &mut RandomTape,
) -> (Self, Vec<Scalar>, Vec<Scalar>, Scalar)
where
F: Fn(&Scalar, &Scalar, &Scalar, &Scalar) -> Scalar,
{
let (blinds_poly, blinds_evals) = (
random_tape.random_vector(b"blinds_poly", num_rounds),
random_tape.random_vector(b"blinds_evals", num_rounds),
);
let mut claim_per_round = *claim;
let mut comm_claim_per_round = claim_per_round.commit(blind_claim, gens_1).compress();
let mut r: Vec<Scalar> = Vec::new();
let mut comm_polys: Vec<CompressedGroup> = Vec::new();
let mut comm_evals: Vec<CompressedGroup> = Vec::new();
let mut proofs: Vec<DotProductProof> = Vec::new();
for j in 0..num_rounds {
let (poly, comm_poly) = {
let mut eval_point_0 = Scalar::zero();
let mut eval_point_2 = Scalar::zero();
let mut eval_point_3 = Scalar::zero();
let len = poly_A.len() / 2;
for i in 0..len {
// eval 0: bound_func is A(low)
eval_point_0 += comb_func(&poly_A[i], &poly_B[i], &poly_C[i], &poly_D[i]);
// eval 2: bound_func is -A(low) + 2*A(high)
let poly_A_bound_point = poly_A[len + i] + poly_A[len + i] - poly_A[i];
let poly_B_bound_point = poly_B[len + i] + poly_B[len + i] - poly_B[i];
let poly_C_bound_point = poly_C[len + i] + poly_C[len + i] - poly_C[i];
let poly_D_bound_point = poly_D[len + i] + poly_D[len + i] - poly_D[i];
eval_point_2 += comb_func(
&poly_A_bound_point,
&poly_B_bound_point,
&poly_C_bound_point,
&poly_D_bound_point,
);
// eval 3: bound_func is -2A(low) + 3A(high); computed incrementally with bound_func applied to eval(2)
let poly_A_bound_point = poly_A_bound_point + poly_A[len + i] - poly_A[i];
let poly_B_bound_point = poly_B_bound_point + poly_B[len + i] - poly_B[i];
let poly_C_bound_point = poly_C_bound_point + poly_C[len + i] - poly_C[i];
let poly_D_bound_point = poly_D_bound_point + poly_D[len + i] - poly_D[i];
eval_point_3 += comb_func(
&poly_A_bound_point,
&poly_B_bound_point,
&poly_C_bound_point,
&poly_D_bound_point,
);
}
let evals = vec![
eval_point_0,
claim_per_round - eval_point_0,
eval_point_2,
eval_point_3,
];
let poly = UniPoly::from_evals(&evals);
let comm_poly = poly.commit(gens_n, &blinds_poly[j]).compress();
(poly, comm_poly)
};
// append the prover's message to the transcript
comm_poly.append_to_transcript(b"comm_poly", transcript);
comm_polys.push(comm_poly);
//derive the verifier's challenge for the next round
let r_j = transcript.challenge_scalar(b"challenge_nextround");
// bound all tables to the verifier's challenege
poly_A.bound_poly_var_top(&r_j);
poly_B.bound_poly_var_top(&r_j);
poly_C.bound_poly_var_top(&r_j);
poly_D.bound_poly_var_top(&r_j);
// produce a proof of sum-check and of evaluation
let (proof, claim_next_round, comm_claim_next_round) = {
let eval = poly.evaluate(&r_j);
let comm_eval = eval.commit(&blinds_evals[j], gens_1).compress();
// we need to prove the following under homomorphic commitments:
// (1) poly(0) + poly(1) = claim_per_round
// (2) poly(r_j) = eval
// Our technique is to leverage dot product proofs:
// (1) we can prove: <poly_in_coeffs_form, (2, 1, 1, 1)> = claim_per_round
// (2) we can prove: <poly_in_coeffs_form, (1, r_j, r^2_j, ..) = eval
// for efficiency we batch them using random weights
// add two claims to transcript
comm_claim_per_round.append_to_transcript(b"comm_claim_per_round", transcript);
comm_eval.append_to_transcript(b"comm_eval", transcript);
// produce two weights
let w = transcript.challenge_vector(b"combine_two_claims_to_one", 2);
// compute a weighted sum of the RHS
let target = w[0] * claim_per_round + w[1] * eval;
let comm_target = GroupElement::vartime_multiscalar_mul(
w.clone(),
iter::once(&comm_claim_per_round)
.chain(iter::once(&comm_eval))
.map(|pt| pt.decompress().unwrap())
.collect::<Vec<GroupElement>>(),
)
.compress();
let blind = {
let blind_sc = if j == 0 {
blind_claim
} else {
&blinds_evals[j - 1]
};
let blind_eval = &blinds_evals[j];
w[0] * blind_sc + w[1] * blind_eval
};
assert_eq!(target.commit(&blind, gens_1).compress(), comm_target);
let a = {
// the vector to use to decommit for sum-check test
let a_sc = {
let mut a = vec![Scalar::one(); poly.degree() + 1];
a[0] += Scalar::one();
a
};
// the vector to use to decommit for evaluation
let a_eval = {
let mut a = vec![Scalar::one(); poly.degree() + 1];
for j in 1..a.len() {
a[j] = a[j - 1] * r_j;
}
a
};
// take weighted sum of the two vectors using w
assert_eq!(a_sc.len(), a_eval.len());
(0..a_sc.len())
.map(|i| w[0] * a_sc[i] + w[1] * a_eval[i])
.collect::<Vec<Scalar>>()
};
let (proof, _comm_poly, _comm_sc_eval) = DotProductProof::prove(
gens_1,
gens_n,
transcript,
random_tape,
&poly.as_vec(),
&blinds_poly[j],
&a,
&target,
&blind,
);
(proof, eval, comm_eval)
};
proofs.push(proof);
claim_per_round = claim_next_round;
comm_claim_per_round = comm_claim_next_round;
r.push(r_j);
comm_evals.push(comm_claim_per_round);
}
(
ZKSumcheckInstanceProof::new(comm_polys, comm_evals, proofs),
r,
vec![poly_A[0], poly_B[0], poly_C[0], poly_D[0]],
blinds_evals[num_rounds - 1],
)
}
}

View File

@@ -0,0 +1,88 @@
#[cfg(feature = "profile")]
use colored::Colorize;
#[cfg(feature = "profile")]
use core::sync::atomic::AtomicUsize;
#[cfg(feature = "profile")]
use core::sync::atomic::Ordering;
#[cfg(feature = "profile")]
use std::time::Instant;
#[cfg(feature = "profile")]
pub static CALL_DEPTH: AtomicUsize = AtomicUsize::new(0);
#[cfg(feature = "profile")]
pub struct Timer {
label: String,
timer: Instant,
}
#[cfg(feature = "profile")]
impl Timer {
#[inline(always)]
pub fn new(label: &str) -> Self {
let timer = Instant::now();
CALL_DEPTH.fetch_add(1, Ordering::Relaxed);
let star = "* ";
println!(
"{:indent$}{}{}",
"",
star,
label.yellow().bold(),
indent = 2 * CALL_DEPTH.fetch_add(0, Ordering::Relaxed)
);
Self {
label: label.to_string(),
timer,
}
}
#[inline(always)]
pub fn stop(&self) {
let duration = self.timer.elapsed();
let star = "* ";
println!(
"{:indent$}{}{} {:?}",
"",
star,
self.label.blue().bold(),
duration,
indent = 2 * CALL_DEPTH.fetch_add(0, Ordering::Relaxed)
);
CALL_DEPTH.fetch_sub(1, Ordering::Relaxed);
}
#[inline(always)]
pub fn print(msg: &str) {
CALL_DEPTH.fetch_add(1, Ordering::Relaxed);
let star = "* ";
println!(
"{:indent$}{}{}",
"",
star,
msg.to_string().green().bold(),
indent = 2 * CALL_DEPTH.fetch_add(0, Ordering::Relaxed)
);
CALL_DEPTH.fetch_sub(1, Ordering::Relaxed);
}
}
#[cfg(not(feature = "profile"))]
pub struct Timer {
_label: String,
}
#[cfg(not(feature = "profile"))]
impl Timer {
#[inline(always)]
pub fn new(label: &str) -> Self {
Self {
_label: label.to_string(),
}
}
#[inline(always)]
pub fn stop(&self) {}
#[inline(always)]
pub fn print(_msg: &str) {}
}

View File

@@ -0,0 +1,63 @@
use super::group::CompressedGroup;
use super::scalar::Scalar;
use merlin::Transcript;
pub trait ProofTranscript {
fn append_protocol_name(&mut self, protocol_name: &'static [u8]);
fn append_scalar(&mut self, label: &'static [u8], scalar: &Scalar);
fn append_point(&mut self, label: &'static [u8], point: &CompressedGroup);
fn challenge_scalar(&mut self, label: &'static [u8]) -> Scalar;
fn challenge_vector(&mut self, label: &'static [u8], len: usize) -> Vec<Scalar>;
}
impl ProofTranscript for Transcript {
fn append_protocol_name(&mut self, protocol_name: &'static [u8]) {
self.append_message(b"protocol-name", protocol_name);
}
fn append_scalar(&mut self, label: &'static [u8], scalar: &Scalar) {
self.append_message(label, &scalar.to_bytes());
}
fn append_point(&mut self, label: &'static [u8], point: &CompressedGroup) {
self.append_message(label, point.as_bytes());
}
fn challenge_scalar(&mut self, label: &'static [u8]) -> Scalar {
let mut buf = [0u8; 64];
self.challenge_bytes(label, &mut buf);
Scalar::from_bytes_wide(&buf)
}
fn challenge_vector(&mut self, label: &'static [u8], len: usize) -> Vec<Scalar> {
(0..len)
.map(|_i| self.challenge_scalar(label))
.collect::<Vec<Scalar>>()
}
}
pub trait AppendToTranscript {
fn append_to_transcript(&self, label: &'static [u8], transcript: &mut Transcript);
}
impl AppendToTranscript for Scalar {
fn append_to_transcript(&self, label: &'static [u8], transcript: &mut Transcript) {
transcript.append_scalar(label, self);
}
}
impl AppendToTranscript for [Scalar] {
fn append_to_transcript(&self, label: &'static [u8], transcript: &mut Transcript) {
transcript.append_message(label, b"begin_append_vector");
for item in self {
transcript.append_scalar(label, item);
}
transcript.append_message(label, b"end_append_vector");
}
}
impl AppendToTranscript for CompressedGroup {
fn append_to_transcript(&self, label: &'static [u8], transcript: &mut Transcript) {
transcript.append_point(label, self);
}
}

View File

@@ -0,0 +1,182 @@
use super::commitments::{Commitments, MultiCommitGens};
use super::group::GroupElement;
use super::scalar::{Scalar, ScalarFromPrimitives};
use super::transcript::{AppendToTranscript, ProofTranscript};
use merlin::Transcript;
use serde::{Deserialize, Serialize};
// ax^2 + bx + c stored as vec![c,b,a]
// ax^3 + bx^2 + cx + d stored as vec![d,c,b,a]
#[derive(Debug)]
pub struct UniPoly {
coeffs: Vec<Scalar>,
}
// ax^2 + bx + c stored as vec![c,a]
// ax^3 + bx^2 + cx + d stored as vec![d,b,a]
#[derive(Serialize, Deserialize, Debug)]
pub struct CompressedUniPoly {
coeffs_except_linear_term: Vec<Scalar>,
}
impl UniPoly {
pub fn from_evals(evals: &[Scalar]) -> Self {
// we only support degree-2 or degree-3 univariate polynomials
assert!(evals.len() == 3 || evals.len() == 4);
let coeffs = if evals.len() == 3 {
// ax^2 + bx + c
let two_inv = (2_usize).to_scalar().invert().unwrap();
let c = evals[0];
let a = two_inv * (evals[2] - evals[1] - evals[1] + c);
let b = evals[1] - c - a;
vec![c, b, a]
} else {
// ax^3 + bx^2 + cx + d
let two_inv = (2_usize).to_scalar().invert().unwrap();
let six_inv = (6_usize).to_scalar().invert().unwrap();
let d = evals[0];
let a = six_inv
* (evals[3] - evals[2] - evals[2] - evals[2] + evals[1] + evals[1] + evals[1] - evals[0]);
let b = two_inv
* (evals[0] + evals[0] - evals[1] - evals[1] - evals[1] - evals[1] - evals[1]
+ evals[2]
+ evals[2]
+ evals[2]
+ evals[2]
- evals[3]);
let c = evals[1] - d - a - b;
vec![d, c, b, a]
};
UniPoly { coeffs }
}
pub fn degree(&self) -> usize {
self.coeffs.len() - 1
}
pub fn as_vec(&self) -> Vec<Scalar> {
self.coeffs.clone()
}
pub fn eval_at_zero(&self) -> Scalar {
self.coeffs[0]
}
pub fn eval_at_one(&self) -> Scalar {
(0..self.coeffs.len()).map(|i| self.coeffs[i]).sum()
}
pub fn evaluate(&self, r: &Scalar) -> Scalar {
let mut eval = self.coeffs[0];
let mut power = *r;
for i in 1..self.coeffs.len() {
eval += power * self.coeffs[i];
power *= r;
}
eval
}
pub fn compress(&self) -> CompressedUniPoly {
let coeffs_except_linear_term = [&self.coeffs[..1], &self.coeffs[2..]].concat();
assert_eq!(coeffs_except_linear_term.len() + 1, self.coeffs.len());
CompressedUniPoly {
coeffs_except_linear_term,
}
}
pub fn commit(&self, gens: &MultiCommitGens, blind: &Scalar) -> GroupElement {
self.coeffs.commit(blind, gens)
}
}
impl CompressedUniPoly {
// we require eval(0) + eval(1) = hint, so we can solve for the linear term as:
// linear_term = hint - 2 * constant_term - deg2 term - deg3 term
pub fn decompress(&self, hint: &Scalar) -> UniPoly {
let mut linear_term =
hint - self.coeffs_except_linear_term[0] - self.coeffs_except_linear_term[0];
for i in 1..self.coeffs_except_linear_term.len() {
linear_term -= self.coeffs_except_linear_term[i];
}
let mut coeffs = vec![self.coeffs_except_linear_term[0], linear_term];
coeffs.extend(&self.coeffs_except_linear_term[1..]);
assert_eq!(self.coeffs_except_linear_term.len() + 1, coeffs.len());
UniPoly { coeffs }
}
}
impl AppendToTranscript for UniPoly {
fn append_to_transcript(&self, label: &'static [u8], transcript: &mut Transcript) {
transcript.append_message(label, b"UniPoly_begin");
for i in 0..self.coeffs.len() {
transcript.append_scalar(b"coeff", &self.coeffs[i]);
}
transcript.append_message(label, b"UniPoly_end");
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_from_evals_quad() {
// polynomial is 2x^2 + 3x + 1
let e0 = Scalar::one();
let e1 = (6_usize).to_scalar();
let e2 = (15_usize).to_scalar();
let evals = vec![e0, e1, e2];
let poly = UniPoly::from_evals(&evals);
assert_eq!(poly.eval_at_zero(), e0);
assert_eq!(poly.eval_at_one(), e1);
assert_eq!(poly.coeffs.len(), 3);
assert_eq!(poly.coeffs[0], Scalar::one());
assert_eq!(poly.coeffs[1], (3_usize).to_scalar());
assert_eq!(poly.coeffs[2], (2_usize).to_scalar());
let hint = e0 + e1;
let compressed_poly = poly.compress();
let decompressed_poly = compressed_poly.decompress(&hint);
for i in 0..decompressed_poly.coeffs.len() {
assert_eq!(decompressed_poly.coeffs[i], poly.coeffs[i]);
}
let e3 = (28_usize).to_scalar();
assert_eq!(poly.evaluate(&(3_usize).to_scalar()), e3);
}
#[test]
fn test_from_evals_cubic() {
// polynomial is x^3 + 2x^2 + 3x + 1
let e0 = Scalar::one();
let e1 = (7_usize).to_scalar();
let e2 = (23_usize).to_scalar();
let e3 = (55_usize).to_scalar();
let evals = vec![e0, e1, e2, e3];
let poly = UniPoly::from_evals(&evals);
assert_eq!(poly.eval_at_zero(), e0);
assert_eq!(poly.eval_at_one(), e1);
assert_eq!(poly.coeffs.len(), 4);
assert_eq!(poly.coeffs[0], Scalar::one());
assert_eq!(poly.coeffs[1], (3_usize).to_scalar());
assert_eq!(poly.coeffs[2], (2_usize).to_scalar());
assert_eq!(poly.coeffs[3], (1_usize).to_scalar());
let hint = e0 + e1;
let compressed_poly = poly.compress();
let decompressed_poly = compressed_poly.decompress(&hint);
for i in 0..decompressed_poly.coeffs.len() {
assert_eq!(decompressed_poly.coeffs[i], poly.coeffs[i]);
}
let e4 = (109_usize).to_scalar();
assert_eq!(poly.evaluate(&(4_usize).to_scalar()), e4);
}
}

View File

@@ -0,0 +1,21 @@
MIT License
Copyright (c) 2022 Ethereum Foundation
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@@ -0,0 +1,15 @@
## Node.js
Recommended: v18 or later
## Install dependencies
```
yarn
```
## Run benchmark
```
yarn bench
```

View File

@@ -8,7 +8,7 @@
},
"dependencies": {
"@ethereumjs/util": "^8.0.3",
"spartan-ecdsa": "*"
"@personaelabs/spartan-ecdsa": "file:./../../lib"
},
"devDependencies": {
"ts-node": "^10.9.1",

View File

@@ -4,13 +4,12 @@ import {
ecsign
} from "@ethereumjs/util";
import {
SpartanWasm,
Tree,
Poseidon,
MembershipProver,
defaultAddressMembershipConfig,
defaultWasmConfig
} from "spartan-ecdsa";
MembershipVerifier
} from "@personaelabs/spartan-ecdsa";
import * as path from "path";
const benchAddrMembership = async () => {
const privKey = Buffer.from("".padStart(16, "🧙"), "utf16le");
@@ -20,11 +19,9 @@ const benchAddrMembership = async () => {
const { v, r, s } = ecsign(msgHash, privKey);
const sig = `0x${r.toString("hex")}${s.toString("hex")}${v.toString(16)}`;
let wasm = new SpartanWasm(defaultWasmConfig);
// Init the Poseidon hash
const poseidon = new Poseidon();
await poseidon.initWasm(wasm);
await poseidon.initWasm();
const treeDepth = 20;
const tree = new Tree(treeDepth, poseidon);
@@ -50,17 +47,38 @@ const benchAddrMembership = async () => {
// Compute the merkle proof
const index = tree.indexOf(proverAddress);
const proverConfig = {
circuit: path.join(
__dirname,
"../../../circuits/build/addr_membership/addr_membership.circuit"
),
witnessGenWasm: path.join(
__dirname,
"../../../circuits/build/addr_membership/addr_membership_js/addr_membership.wasm"
),
enableProfiler: true
};
const merkleProof = tree.createProof(index);
// Init the prover
const prover = new MembershipProver({
...defaultAddressMembershipConfig,
enableProfiler: true
});
await prover.initWasm(wasm);
const prover = new MembershipProver(proverConfig);
await prover.initWasm();
// Prove membership
await prover.prove(sig, msgHash, merkleProof);
const { proof, publicInput } = await prover.prove(sig, msgHash, merkleProof);
const verifierConfig = {
circuit: proverConfig.circuit,
enableProfiler: true
};
// Init verifier
const verifier = new MembershipVerifier(verifierConfig);
await verifier.initWasm();
// Verify proof
await verifier.verify(proof, publicInput.serialize());
};
export default benchAddrMembership;

View File

@@ -2,16 +2,15 @@ import {
MembershipProver,
Poseidon,
Tree,
SpartanWasm,
defaultWasmConfig,
defaultPubkeyMembershipConfig
} from "spartan-ecdsa";
MembershipVerifier
} from "@personaelabs/spartan-ecdsa";
import {
hashPersonalMessage,
ecsign,
ecrecover,
privateToPublic
} from "@ethereumjs/util";
import * as path from "path";
const benchPubKeyMembership = async () => {
const privKey = Buffer.from("".padStart(16, "🧙"), "utf16le");
@@ -22,11 +21,9 @@ const benchPubKeyMembership = async () => {
const pubKey = ecrecover(msgHash, v, r, s);
const sig = `0x${r.toString("hex")}${s.toString("hex")}${v.toString(16)}`;
let wasm = new SpartanWasm(defaultWasmConfig);
// Init the Poseidon hash
const poseidon = new Poseidon();
await poseidon.initWasm(wasm);
await poseidon.initWasm();
const treeDepth = 20;
const tree = new Tree(treeDepth, poseidon);
@@ -49,17 +46,36 @@ const benchPubKeyMembership = async () => {
const index = tree.indexOf(proverPubkeyHash);
const merkleProof = tree.createProof(index);
// Init the prover
const prover = new MembershipProver({
...defaultPubkeyMembershipConfig,
const proverConfig = {
circuit: path.join(
__dirname,
"../../../circuits/build/pubkey_membership/pubkey_membership.circuit"
),
witnessGenWasm: path.join(
__dirname,
"../../../circuits/build/pubkey_membership/pubkey_membership_js/pubkey_membership.wasm"
),
enableProfiler: true
});
await prover.initWasm(wasm);
};
// Init the prover
const prover = new MembershipProver(proverConfig);
await prover.initWasm();
// Prove membership
await prover.prove(sig, msgHash, merkleProof);
const { proof, publicInput } = await prover.prove(sig, msgHash, merkleProof);
// TODO: Verify the proof
const verifierConfig = {
circuit: proverConfig.circuit,
enableProfiler: true
};
// Init verifier
const verifier = new MembershipVerifier(verifierConfig);
await verifier.initWasm();
// Verify proof
await verifier.verify(proof, publicInput.serialize());
};
export default benchPubKeyMembership;

View File

@@ -0,0 +1,21 @@
MIT License
Copyright (c) 2022 Ethereum Foundation
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@@ -1,3 +1,7 @@
## Node.js
Recommended: v18 or later
### Install dependencies
```

View File

@@ -9,7 +9,7 @@
"lint": "next lint"
},
"dependencies": {
"spartan-ecdsa": "*",
"@personaelabs/spartan-ecdsa": "file:./../../lib",
"@ethereumjs/util": "^8.0.3",
"comlink": "^4.3.1",
"elliptic": "^6.5.4",
@@ -17,8 +17,7 @@
"next": "13.0.0",
"react": "18.2.0",
"react-dom": "18.2.0",
"readline": "^1.3.0",
"snarkjs": "^0.5.0"
"readline": "^1.3.0"
},
"devDependencies": {
"@types/node": "18.11.7",

View File

@@ -1,13 +1,14 @@
import { useState } from "react";
import {
MembershipProver,
MembershipVerifier,
Tree,
Poseidon,
SpartanWasm,
defaultAddressMembershipConfig,
defaultWasmConfig,
defaultPubkeyMembershipConfig
} from "spartan-ecdsa";
defaultAddressMembershipPConfig,
defaultPubkeyMembershipPConfig,
defaultPubkeyMembershipVConfig,
defaultAddressMembershipVConfig
} from "@personaelabs/spartan-ecdsa";
import {
ecrecover,
ecsign,
@@ -18,8 +19,6 @@ import {
} from "@ethereumjs/util";
export default function Home() {
const [proof, setProof] = useState<any | undefined>();
const provePubKeyMembership = async () => {
const privKey = Buffer.from("".padStart(16, "🧙"), "utf16le");
const msg = Buffer.from("harry potter");
@@ -29,9 +28,8 @@ export default function Home() {
const pubKey = ecrecover(msgHash, v, r, s);
const sig = `0x${r.toString("hex")}${s.toString("hex")}${v.toString(16)}`;
const wasm = new SpartanWasm(defaultWasmConfig);
const poseidon = new Poseidon();
await poseidon.initWasm(wasm);
await poseidon.initWasm();
const treeDepth = 20;
const pubKeyTree = new Tree(treeDepth, poseidon);
@@ -54,9 +52,12 @@ export default function Home() {
console.log("Proving...");
console.time("Full proving time");
const prover = new MembershipProver(defaultPubkeyMembershipConfig);
const prover = new MembershipProver({
...defaultPubkeyMembershipPConfig,
enableProfiler: true
});
prover.initWasm(wasm);
await prover.initWasm();
const { proof, publicInput } = await prover.prove(
sig,
@@ -70,7 +71,23 @@ export default function Home() {
proof.length,
"bytes"
);
setProof({ proof, publicInput });
console.log("Verifying...");
const verifier = new MembershipVerifier({
...defaultPubkeyMembershipVConfig,
enableProfiler: true
});
await verifier.initWasm();
console.time("Verification time");
const result = await verifier.verify(proof, publicInput.serialize());
console.timeEnd("Verification time");
if (result) {
console.log("Successfully verified proof!");
} else {
console.log("Failed to verify proof :(");
}
};
const proverAddressMembership = async () => {
@@ -81,9 +98,8 @@ export default function Home() {
const { v, r, s } = ecsign(msgHash, privKey);
const sig = `0x${r.toString("hex")}${s.toString("hex")}${v.toString(16)}`;
const wasm = new SpartanWasm(defaultWasmConfig);
const poseidon = new Poseidon();
await poseidon.initWasm(wasm);
await poseidon.initWasm();
const treeDepth = 20;
const addressTree = new Tree(treeDepth, poseidon);
@@ -109,11 +125,11 @@ export default function Home() {
console.time("Full proving time");
const prover = new MembershipProver({
...defaultAddressMembershipConfig,
...defaultAddressMembershipPConfig,
enableProfiler: true
});
prover.initWasm(wasm);
await prover.initWasm();
const { proof, publicInput } = await prover.prove(
sig,
@@ -127,23 +143,24 @@ export default function Home() {
proof.length,
"bytes"
);
setProof({ proof, publicInput });
};
/*
const verify = async () => {
if (!proof) {
console.log("No proof yet!");
console.log("Verifying...");
const verifier = new MembershipVerifier({
...defaultAddressMembershipVConfig,
enableProfiler: true
});
await verifier.initWasm();
console.time("Verification time");
const result = await verifier.verify(proof, publicInput.serialize());
console.timeEnd("Verification time");
if (result) {
console.log("Successfully verified proof!");
} else {
const verifier = new EffECDSAVerifier({
enableProfiler: true
});
const verified = await verifier.verify(proof.proof, proof.publicInput);
console.log("Verified?", verified);
console.log("Failed to verify proof :(");
}
};
*/
return (
<div>

View File

@@ -0,0 +1,22 @@
[package]
name = "circuit_reader"
version = "0.1.0"
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
bincode = "1.3.3"
secq256k1 = { path = "../secq256k1" }
spartan = { path = "../Spartan-secq" }
ff = "0.12.0"
byteorder = "1.4.3"
group = "0.12.0"
itertools = "0.9.0"
[[bin]]
name = "gen_spartan_inst"
path = "src/bin/gen_spartan_inst.rs"

View File

@@ -0,0 +1,24 @@
#![allow(non_snake_case)]
use bincode;
use circuit_reader::load_as_spartan_inst;
use std::env::{args, current_dir};
use std::fs::File;
use std::io::Write;
fn main() {
let circom_r1cs_path = args().nth(1).unwrap();
let output_path = args().nth(2).unwrap();
let num_pub_inputs = args().nth(3).unwrap().parse::<usize>().unwrap();
let root = current_dir().unwrap();
let circom_r1cs_path = root.join(circom_r1cs_path);
let spartan_inst = load_as_spartan_inst(circom_r1cs_path, num_pub_inputs);
let sparta_inst_bytes = bincode::serialize(&spartan_inst).unwrap();
File::create(root.join(output_path.clone()))
.unwrap()
.write_all(sparta_inst_bytes.as_slice())
.unwrap();
println!("Written Spartan circuit to {}", output_path);
}

View File

@@ -0,0 +1,215 @@
// Code borrowed from Nova-Scotia https://github.com/nalinbhardwaj/Nova-Scotia
use byteorder::{LittleEndian, ReadBytesExt};
use ff::PrimeField;
use group::Group;
use itertools::Itertools;
use std::{
collections::HashMap,
io::{BufReader, Error, ErrorKind, Read, Result, Seek, SeekFrom},
};
pub type Constraint<Fr> = (Vec<(usize, Fr)>, Vec<(usize, Fr)>, Vec<(usize, Fr)>);
#[derive(Clone)]
pub struct R1CS<Fr: PrimeField> {
pub num_inputs: usize,
pub num_aux: usize,
pub num_variables: usize,
pub constraints: Vec<Constraint<Fr>>,
}
// R1CSFile's header
#[derive(Debug, Default)]
pub struct Header {
pub field_size: u32,
pub prime_size: Vec<u8>,
pub n_wires: u32,
pub n_pub_out: u32,
pub n_pub_in: u32,
pub n_prv_in: u32,
pub n_labels: u64,
pub n_constraints: u32,
}
// R1CSFile parse result
#[derive(Debug, Default)]
pub struct R1CSFile<Fr: PrimeField> {
pub version: u32,
pub header: Header,
pub constraints: Vec<Constraint<Fr>>,
pub wire_mapping: Vec<u64>,
}
use std::fs::OpenOptions;
use std::path::Path;
pub fn load_r1cs_from_bin_file<G1: Group>(filename: &Path) -> (R1CS<G1::Scalar>, Vec<usize>) {
let reader = OpenOptions::new()
.read(true)
.open(filename)
.expect("unable to open.");
load_r1cs_from_bin::<G1, _>(BufReader::new(reader))
}
pub fn load_r1cs_from_bin<G1: Group, R: Read + Seek>(reader: R) -> (R1CS<G1::Scalar>, Vec<usize>) {
let file = from_reader::<G1, R>(reader).expect("unable to read.");
let num_inputs = (1 + file.header.n_pub_in + file.header.n_pub_out) as usize;
let num_variables = file.header.n_wires as usize;
let num_aux = num_variables - num_inputs;
(
R1CS {
num_aux,
num_inputs,
num_variables,
constraints: file.constraints,
},
file.wire_mapping.iter().map(|e| *e as usize).collect_vec(),
)
}
pub(crate) fn read_field<R: Read, Fr: PrimeField>(mut reader: R) -> Result<Fr> {
let mut repr = Fr::zero().to_repr();
for digit in repr.as_mut().iter_mut() {
// TODO: may need to reverse order?
*digit = reader.read_u8()?;
}
let fr = Fr::from_repr(repr).unwrap();
Ok(fr)
}
fn read_header<R: Read>(mut reader: R, size: u64) -> Result<Header> {
let field_size = reader.read_u32::<LittleEndian>()?;
let mut prime_size = vec![0u8; field_size as usize];
reader.read_exact(&mut prime_size)?;
if size != 32 + field_size as u64 {
return Err(Error::new(
ErrorKind::InvalidData,
"Invalid header section size",
));
}
Ok(Header {
field_size,
prime_size,
n_wires: reader.read_u32::<LittleEndian>()?,
n_pub_out: reader.read_u32::<LittleEndian>()?,
n_pub_in: reader.read_u32::<LittleEndian>()?,
n_prv_in: reader.read_u32::<LittleEndian>()?,
n_labels: reader.read_u64::<LittleEndian>()?,
n_constraints: reader.read_u32::<LittleEndian>()?,
})
}
fn read_constraint_vec<R: Read, Fr: PrimeField>(mut reader: R) -> Result<Vec<(usize, Fr)>> {
let n_vec = reader.read_u32::<LittleEndian>()? as usize;
let mut vec = Vec::with_capacity(n_vec);
for _ in 0..n_vec {
vec.push((
reader.read_u32::<LittleEndian>()? as usize,
read_field::<&mut R, Fr>(&mut reader)?,
));
}
Ok(vec)
}
fn read_constraints<R: Read, Fr: PrimeField>(
mut reader: R,
header: &Header,
) -> Result<Vec<Constraint<Fr>>> {
// todo check section size
let mut vec = Vec::with_capacity(header.n_constraints as usize);
for _ in 0..header.n_constraints {
vec.push((
read_constraint_vec::<&mut R, Fr>(&mut reader)?,
read_constraint_vec::<&mut R, Fr>(&mut reader)?,
read_constraint_vec::<&mut R, Fr>(&mut reader)?,
));
}
Ok(vec)
}
fn read_map<R: Read>(mut reader: R, size: u64, header: &Header) -> Result<Vec<u64>> {
if size != header.n_wires as u64 * 8 {
return Err(Error::new(
ErrorKind::InvalidData,
"Invalid map section size",
));
}
let mut vec = Vec::with_capacity(header.n_wires as usize);
for _ in 0..header.n_wires {
vec.push(reader.read_u64::<LittleEndian>()?);
}
if vec[0] != 0 {
return Err(Error::new(
ErrorKind::InvalidData,
"Wire 0 should always be mapped to 0",
));
}
Ok(vec)
}
pub fn from_reader<G1: Group, R: Read + Seek>(mut reader: R) -> Result<R1CSFile<G1::Scalar>> {
let mut magic = [0u8; 4];
reader.read_exact(&mut magic)?;
if magic != [0x72, 0x31, 0x63, 0x73] {
// magic = "r1cs"
return Err(Error::new(ErrorKind::InvalidData, "Invalid magic number"));
}
let version = reader.read_u32::<LittleEndian>()?;
if version != 1 {
return Err(Error::new(ErrorKind::InvalidData, "Unsupported version"));
}
let num_sections = reader.read_u32::<LittleEndian>()?;
// section type -> file offset
let mut section_offsets = HashMap::<u32, u64>::new();
let mut section_sizes = HashMap::<u32, u64>::new();
// get file offset of each section
for _ in 0..num_sections {
let section_type = reader.read_u32::<LittleEndian>()?;
let section_size = reader.read_u64::<LittleEndian>()?;
let offset = reader.seek(SeekFrom::Current(0))?;
section_offsets.insert(section_type, offset);
section_sizes.insert(section_type, section_size);
reader.seek(SeekFrom::Current(section_size as i64))?;
}
let header_type = 1;
let constraint_type = 2;
let wire2label_type = 3;
reader.seek(SeekFrom::Start(*section_offsets.get(&header_type).unwrap()))?;
let header = read_header(&mut reader, *section_sizes.get(&header_type).unwrap())?;
if header.field_size != 32 {
return Err(Error::new(
ErrorKind::InvalidData,
"This parser only supports 32-byte fields",
));
}
// if header.prime_size != hex!("010000f093f5e1439170b97948e833285d588181b64550b829a031e1724e6430") {
// return Err(Error::new(ErrorKind::InvalidData, "This parser only supports bn256"));
// }
reader.seek(SeekFrom::Start(
*section_offsets.get(&constraint_type).unwrap(),
))?;
let constraints = read_constraints::<&mut R, <G1 as Group>::Scalar>(&mut reader, &header)?;
reader.seek(SeekFrom::Start(
*section_offsets.get(&wire2label_type).unwrap(),
))?;
let wire_mapping = read_map(
&mut reader,
*section_sizes.get(&wire2label_type).unwrap(),
&header,
)?;
Ok(R1CSFile {
version,
header,
constraints,
wire_mapping,
})
}

View File

@@ -0,0 +1,59 @@
mod circom_reader;
use circom_reader::{load_r1cs_from_bin_file, R1CS};
use ff::PrimeField;
use libspartan::Instance;
use secq256k1::AffinePoint;
use secq256k1::FieldBytes;
use std::path::PathBuf;
pub fn load_as_spartan_inst(circuit_file: PathBuf, num_pub_inputs: usize) -> Instance {
let (r1cs, _) = load_r1cs_from_bin_file::<AffinePoint>(&circuit_file);
let spartan_inst = convert_to_spartan_r1cs(&r1cs, num_pub_inputs);
spartan_inst
}
fn convert_to_spartan_r1cs<F: PrimeField<Repr = FieldBytes>>(
r1cs: &R1CS<F>,
num_pub_inputs: usize,
) -> Instance {
let num_cons = r1cs.constraints.len();
let num_vars = r1cs.num_variables;
let num_inputs = num_pub_inputs;
let mut A = vec![];
let mut B = vec![];
let mut C = vec![];
for (i, constraint) in r1cs.constraints.iter().enumerate() {
let (a, b, c) = constraint;
for (j, coeff) in a.iter() {
let bytes: [u8; 32] = coeff.to_repr().into();
A.push((i, *j, bytes));
}
for (j, coeff) in b.iter() {
let bytes: [u8; 32] = coeff.to_repr().into();
B.push((i, *j, bytes));
}
for (j, coeff) in c.iter() {
let bytes: [u8; 32] = coeff.to_repr().into();
C.push((i, *j, bytes));
}
}
let inst = Instance::new(
num_cons,
num_vars,
num_inputs,
A.as_slice(),
B.as_slice(),
C.as_slice(),
)
.unwrap();
inst
}

674
packages/circuits/LICENSE Normal file
View File

@@ -0,0 +1,674 @@
GNU GENERAL PUBLIC LICENSE
Version 3, 29 June 2007
Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
Preamble
The GNU General Public License is a free, copyleft license for
software and other kinds of works.
The licenses for most software and other practical works are designed
to take away your freedom to share and change the works. By contrast,
the GNU General Public License is intended to guarantee your freedom to
share and change all versions of a program--to make sure it remains free
software for all its users. We, the Free Software Foundation, use the
GNU General Public License for most of our software; it applies also to
any other work released this way by its authors. You can apply it to
your programs, too.
When we speak of free software, we are referring to freedom, not
price. Our General Public Licenses are designed to make sure that you
have the freedom to distribute copies of free software (and charge for
them if you wish), that you receive source code or can get it if you
want it, that you can change the software or use pieces of it in new
free programs, and that you know you can do these things.
To protect your rights, we need to prevent others from denying you
these rights or asking you to surrender the rights. Therefore, you have
certain responsibilities if you distribute copies of the software, or if
you modify it: responsibilities to respect the freedom of others.
For example, if you distribute copies of such a program, whether
gratis or for a fee, you must pass on to the recipients the same
freedoms that you received. You must make sure that they, too, receive
or can get the source code. And you must show them these terms so they
know their rights.
Developers that use the GNU GPL protect your rights with two steps:
(1) assert copyright on the software, and (2) offer you this License
giving you legal permission to copy, distribute and/or modify it.
For the developers' and authors' protection, the GPL clearly explains
that there is no warranty for this free software. For both users' and
authors' sake, the GPL requires that modified versions be marked as
changed, so that their problems will not be attributed erroneously to
authors of previous versions.
Some devices are designed to deny users access to install or run
modified versions of the software inside them, although the manufacturer
can do so. This is fundamentally incompatible with the aim of
protecting users' freedom to change the software. The systematic
pattern of such abuse occurs in the area of products for individuals to
use, which is precisely where it is most unacceptable. Therefore, we
have designed this version of the GPL to prohibit the practice for those
products. If such problems arise substantially in other domains, we
stand ready to extend this provision to those domains in future versions
of the GPL, as needed to protect the freedom of users.
Finally, every program is threatened constantly by software patents.
States should not allow patents to restrict development and use of
software on general-purpose computers, but in those that do, we wish to
avoid the special danger that patents applied to a free program could
make it effectively proprietary. To prevent this, the GPL assures that
patents cannot be used to render the program non-free.
The precise terms and conditions for copying, distribution and
modification follow.
TERMS AND CONDITIONS
0. Definitions.
"This License" refers to version 3 of the GNU General Public License.
"Copyright" also means copyright-like laws that apply to other kinds of
works, such as semiconductor masks.
"The Program" refers to any copyrightable work licensed under this
License. Each licensee is addressed as "you". "Licensees" and
"recipients" may be individuals or organizations.
To "modify" a work means to copy from or adapt all or part of the work
in a fashion requiring copyright permission, other than the making of an
exact copy. The resulting work is called a "modified version" of the
earlier work or a work "based on" the earlier work.
A "covered work" means either the unmodified Program or a work based
on the Program.
To "propagate" a work means to do anything with it that, without
permission, would make you directly or secondarily liable for
infringement under applicable copyright law, except executing it on a
computer or modifying a private copy. Propagation includes copying,
distribution (with or without modification), making available to the
public, and in some countries other activities as well.
To "convey" a work means any kind of propagation that enables other
parties to make or receive copies. Mere interaction with a user through
a computer network, with no transfer of a copy, is not conveying.
An interactive user interface displays "Appropriate Legal Notices"
to the extent that it includes a convenient and prominently visible
feature that (1) displays an appropriate copyright notice, and (2)
tells the user that there is no warranty for the work (except to the
extent that warranties are provided), that licensees may convey the
work under this License, and how to view a copy of this License. If
the interface presents a list of user commands or options, such as a
menu, a prominent item in the list meets this criterion.
1. Source Code.
The "source code" for a work means the preferred form of the work
for making modifications to it. "Object code" means any non-source
form of a work.
A "Standard Interface" means an interface that either is an official
standard defined by a recognized standards body, or, in the case of
interfaces specified for a particular programming language, one that
is widely used among developers working in that language.
The "System Libraries" of an executable work include anything, other
than the work as a whole, that (a) is included in the normal form of
packaging a Major Component, but which is not part of that Major
Component, and (b) serves only to enable use of the work with that
Major Component, or to implement a Standard Interface for which an
implementation is available to the public in source code form. A
"Major Component", in this context, means a major essential component
(kernel, window system, and so on) of the specific operating system
(if any) on which the executable work runs, or a compiler used to
produce the work, or an object code interpreter used to run it.
The "Corresponding Source" for a work in object code form means all
the source code needed to generate, install, and (for an executable
work) run the object code and to modify the work, including scripts to
control those activities. However, it does not include the work's
System Libraries, or general-purpose tools or generally available free
programs which are used unmodified in performing those activities but
which are not part of the work. For example, Corresponding Source
includes interface definition files associated with source files for
the work, and the source code for shared libraries and dynamically
linked subprograms that the work is specifically designed to require,
such as by intimate data communication or control flow between those
subprograms and other parts of the work.
The Corresponding Source need not include anything that users
can regenerate automatically from other parts of the Corresponding
Source.
The Corresponding Source for a work in source code form is that
same work.
2. Basic Permissions.
All rights granted under this License are granted for the term of
copyright on the Program, and are irrevocable provided the stated
conditions are met. This License explicitly affirms your unlimited
permission to run the unmodified Program. The output from running a
covered work is covered by this License only if the output, given its
content, constitutes a covered work. This License acknowledges your
rights of fair use or other equivalent, as provided by copyright law.
You may make, run and propagate covered works that you do not
convey, without conditions so long as your license otherwise remains
in force. You may convey covered works to others for the sole purpose
of having them make modifications exclusively for you, or provide you
with facilities for running those works, provided that you comply with
the terms of this License in conveying all material for which you do
not control copyright. Those thus making or running the covered works
for you must do so exclusively on your behalf, under your direction
and control, on terms that prohibit them from making any copies of
your copyrighted material outside their relationship with you.
Conveying under any other circumstances is permitted solely under
the conditions stated below. Sublicensing is not allowed; section 10
makes it unnecessary.
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
No covered work shall be deemed part of an effective technological
measure under any applicable law fulfilling obligations under article
11 of the WIPO copyright treaty adopted on 20 December 1996, or
similar laws prohibiting or restricting circumvention of such
measures.
When you convey a covered work, you waive any legal power to forbid
circumvention of technological measures to the extent such circumvention
is effected by exercising rights under this License with respect to
the covered work, and you disclaim any intention to limit operation or
modification of the work as a means of enforcing, against the work's
users, your or third parties' legal rights to forbid circumvention of
technological measures.
4. Conveying Verbatim Copies.
You may convey verbatim copies of the Program's source code as you
receive it, in any medium, provided that you conspicuously and
appropriately publish on each copy an appropriate copyright notice;
keep intact all notices stating that this License and any
non-permissive terms added in accord with section 7 apply to the code;
keep intact all notices of the absence of any warranty; and give all
recipients a copy of this License along with the Program.
You may charge any price or no price for each copy that you convey,
and you may offer support or warranty protection for a fee.
5. Conveying Modified Source Versions.
You may convey a work based on the Program, or the modifications to
produce it from the Program, in the form of source code under the
terms of section 4, provided that you also meet all of these conditions:
a) The work must carry prominent notices stating that you modified
it, and giving a relevant date.
b) The work must carry prominent notices stating that it is
released under this License and any conditions added under section
7. This requirement modifies the requirement in section 4 to
"keep intact all notices".
c) You must license the entire work, as a whole, under this
License to anyone who comes into possession of a copy. This
License will therefore apply, along with any applicable section 7
additional terms, to the whole of the work, and all its parts,
regardless of how they are packaged. This License gives no
permission to license the work in any other way, but it does not
invalidate such permission if you have separately received it.
d) If the work has interactive user interfaces, each must display
Appropriate Legal Notices; however, if the Program has interactive
interfaces that do not display Appropriate Legal Notices, your
work need not make them do so.
A compilation of a covered work with other separate and independent
works, which are not by their nature extensions of the covered work,
and which are not combined with it such as to form a larger program,
in or on a volume of a storage or distribution medium, is called an
"aggregate" if the compilation and its resulting copyright are not
used to limit the access or legal rights of the compilation's users
beyond what the individual works permit. Inclusion of a covered work
in an aggregate does not cause this License to apply to the other
parts of the aggregate.
6. Conveying Non-Source Forms.
You may convey a covered work in object code form under the terms
of sections 4 and 5, provided that you also convey the
machine-readable Corresponding Source under the terms of this License,
in one of these ways:
a) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by the
Corresponding Source fixed on a durable physical medium
customarily used for software interchange.
b) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by a
written offer, valid for at least three years and valid for as
long as you offer spare parts or customer support for that product
model, to give anyone who possesses the object code either (1) a
copy of the Corresponding Source for all the software in the
product that is covered by this License, on a durable physical
medium customarily used for software interchange, for a price no
more than your reasonable cost of physically performing this
conveying of source, or (2) access to copy the
Corresponding Source from a network server at no charge.
c) Convey individual copies of the object code with a copy of the
written offer to provide the Corresponding Source. This
alternative is allowed only occasionally and noncommercially, and
only if you received the object code with such an offer, in accord
with subsection 6b.
d) Convey the object code by offering access from a designated
place (gratis or for a charge), and offer equivalent access to the
Corresponding Source in the same way through the same place at no
further charge. You need not require recipients to copy the
Corresponding Source along with the object code. If the place to
copy the object code is a network server, the Corresponding Source
may be on a different server (operated by you or a third party)
that supports equivalent copying facilities, provided you maintain
clear directions next to the object code saying where to find the
Corresponding Source. Regardless of what server hosts the
Corresponding Source, you remain obligated to ensure that it is
available for as long as needed to satisfy these requirements.
e) Convey the object code using peer-to-peer transmission, provided
you inform other peers where the object code and Corresponding
Source of the work are being offered to the general public at no
charge under subsection 6d.
A separable portion of the object code, whose source code is excluded
from the Corresponding Source as a System Library, need not be
included in conveying the object code work.
A "User Product" is either (1) a "consumer product", which means any
tangible personal property which is normally used for personal, family,
or household purposes, or (2) anything designed or sold for incorporation
into a dwelling. In determining whether a product is a consumer product,
doubtful cases shall be resolved in favor of coverage. For a particular
product received by a particular user, "normally used" refers to a
typical or common use of that class of product, regardless of the status
of the particular user or of the way in which the particular user
actually uses, or expects or is expected to use, the product. A product
is a consumer product regardless of whether the product has substantial
commercial, industrial or non-consumer uses, unless such uses represent
the only significant mode of use of the product.
"Installation Information" for a User Product means any methods,
procedures, authorization keys, or other information required to install
and execute modified versions of a covered work in that User Product from
a modified version of its Corresponding Source. The information must
suffice to ensure that the continued functioning of the modified object
code is in no case prevented or interfered with solely because
modification has been made.
If you convey an object code work under this section in, or with, or
specifically for use in, a User Product, and the conveying occurs as
part of a transaction in which the right of possession and use of the
User Product is transferred to the recipient in perpetuity or for a
fixed term (regardless of how the transaction is characterized), the
Corresponding Source conveyed under this section must be accompanied
by the Installation Information. But this requirement does not apply
if neither you nor any third party retains the ability to install
modified object code on the User Product (for example, the work has
been installed in ROM).
The requirement to provide Installation Information does not include a
requirement to continue to provide support service, warranty, or updates
for a work that has been modified or installed by the recipient, or for
the User Product in which it has been modified or installed. Access to a
network may be denied when the modification itself materially and
adversely affects the operation of the network or violates the rules and
protocols for communication across the network.
Corresponding Source conveyed, and Installation Information provided,
in accord with this section must be in a format that is publicly
documented (and with an implementation available to the public in
source code form), and must require no special password or key for
unpacking, reading or copying.
7. Additional Terms.
"Additional permissions" are terms that supplement the terms of this
License by making exceptions from one or more of its conditions.
Additional permissions that are applicable to the entire Program shall
be treated as though they were included in this License, to the extent
that they are valid under applicable law. If additional permissions
apply only to part of the Program, that part may be used separately
under those permissions, but the entire Program remains governed by
this License without regard to the additional permissions.
When you convey a copy of a covered work, you may at your option
remove any additional permissions from that copy, or from any part of
it. (Additional permissions may be written to require their own
removal in certain cases when you modify the work.) You may place
additional permissions on material, added by you to a covered work,
for which you have or can give appropriate copyright permission.
Notwithstanding any other provision of this License, for material you
add to a covered work, you may (if authorized by the copyright holders of
that material) supplement the terms of this License with terms:
a) Disclaiming warranty or limiting liability differently from the
terms of sections 15 and 16 of this License; or
b) Requiring preservation of specified reasonable legal notices or
author attributions in that material or in the Appropriate Legal
Notices displayed by works containing it; or
c) Prohibiting misrepresentation of the origin of that material, or
requiring that modified versions of such material be marked in
reasonable ways as different from the original version; or
d) Limiting the use for publicity purposes of names of licensors or
authors of the material; or
e) Declining to grant rights under trademark law for use of some
trade names, trademarks, or service marks; or
f) Requiring indemnification of licensors and authors of that
material by anyone who conveys the material (or modified versions of
it) with contractual assumptions of liability to the recipient, for
any liability that these contractual assumptions directly impose on
those licensors and authors.
All other non-permissive additional terms are considered "further
restrictions" within the meaning of section 10. If the Program as you
received it, or any part of it, contains a notice stating that it is
governed by this License along with a term that is a further
restriction, you may remove that term. If a license document contains
a further restriction but permits relicensing or conveying under this
License, you may add to a covered work material governed by the terms
of that license document, provided that the further restriction does
not survive such relicensing or conveying.
If you add terms to a covered work in accord with this section, you
must place, in the relevant source files, a statement of the
additional terms that apply to those files, or a notice indicating
where to find the applicable terms.
Additional terms, permissive or non-permissive, may be stated in the
form of a separately written license, or stated as exceptions;
the above requirements apply either way.
8. Termination.
You may not propagate or modify a covered work except as expressly
provided under this License. Any attempt otherwise to propagate or
modify it is void, and will automatically terminate your rights under
this License (including any patent licenses granted under the third
paragraph of section 11).
However, if you cease all violation of this License, then your
license from a particular copyright holder is reinstated (a)
provisionally, unless and until the copyright holder explicitly and
finally terminates your license, and (b) permanently, if the copyright
holder fails to notify you of the violation by some reasonable means
prior to 60 days after the cessation.
Moreover, your license from a particular copyright holder is
reinstated permanently if the copyright holder notifies you of the
violation by some reasonable means, this is the first time you have
received notice of violation of this License (for any work) from that
copyright holder, and you cure the violation prior to 30 days after
your receipt of the notice.
Termination of your rights under this section does not terminate the
licenses of parties who have received copies or rights from you under
this License. If your rights have been terminated and not permanently
reinstated, you do not qualify to receive new licenses for the same
material under section 10.
9. Acceptance Not Required for Having Copies.
You are not required to accept this License in order to receive or
run a copy of the Program. Ancillary propagation of a covered work
occurring solely as a consequence of using peer-to-peer transmission
to receive a copy likewise does not require acceptance. However,
nothing other than this License grants you permission to propagate or
modify any covered work. These actions infringe copyright if you do
not accept this License. Therefore, by modifying or propagating a
covered work, you indicate your acceptance of this License to do so.
10. Automatic Licensing of Downstream Recipients.
Each time you convey a covered work, the recipient automatically
receives a license from the original licensors, to run, modify and
propagate that work, subject to this License. You are not responsible
for enforcing compliance by third parties with this License.
An "entity transaction" is a transaction transferring control of an
organization, or substantially all assets of one, or subdividing an
organization, or merging organizations. If propagation of a covered
work results from an entity transaction, each party to that
transaction who receives a copy of the work also receives whatever
licenses to the work the party's predecessor in interest had or could
give under the previous paragraph, plus a right to possession of the
Corresponding Source of the work from the predecessor in interest, if
the predecessor has it or can get it with reasonable efforts.
You may not impose any further restrictions on the exercise of the
rights granted or affirmed under this License. For example, you may
not impose a license fee, royalty, or other charge for exercise of
rights granted under this License, and you may not initiate litigation
(including a cross-claim or counterclaim in a lawsuit) alleging that
any patent claim is infringed by making, using, selling, offering for
sale, or importing the Program or any portion of it.
11. Patents.
A "contributor" is a copyright holder who authorizes use under this
License of the Program or a work on which the Program is based. The
work thus licensed is called the contributor's "contributor version".
A contributor's "essential patent claims" are all patent claims
owned or controlled by the contributor, whether already acquired or
hereafter acquired, that would be infringed by some manner, permitted
by this License, of making, using, or selling its contributor version,
but do not include claims that would be infringed only as a
consequence of further modification of the contributor version. For
purposes of this definition, "control" includes the right to grant
patent sublicenses in a manner consistent with the requirements of
this License.
Each contributor grants you a non-exclusive, worldwide, royalty-free
patent license under the contributor's essential patent claims, to
make, use, sell, offer for sale, import and otherwise run, modify and
propagate the contents of its contributor version.
In the following three paragraphs, a "patent license" is any express
agreement or commitment, however denominated, not to enforce a patent
(such as an express permission to practice a patent or covenant not to
sue for patent infringement). To "grant" such a patent license to a
party means to make such an agreement or commitment not to enforce a
patent against the party.
If you convey a covered work, knowingly relying on a patent license,
and the Corresponding Source of the work is not available for anyone
to copy, free of charge and under the terms of this License, through a
publicly available network server or other readily accessible means,
then you must either (1) cause the Corresponding Source to be so
available, or (2) arrange to deprive yourself of the benefit of the
patent license for this particular work, or (3) arrange, in a manner
consistent with the requirements of this License, to extend the patent
license to downstream recipients. "Knowingly relying" means you have
actual knowledge that, but for the patent license, your conveying the
covered work in a country, or your recipient's use of the covered work
in a country, would infringe one or more identifiable patents in that
country that you have reason to believe are valid.
If, pursuant to or in connection with a single transaction or
arrangement, you convey, or propagate by procuring conveyance of, a
covered work, and grant a patent license to some of the parties
receiving the covered work authorizing them to use, propagate, modify
or convey a specific copy of the covered work, then the patent license
you grant is automatically extended to all recipients of the covered
work and works based on it.
A patent license is "discriminatory" if it does not include within
the scope of its coverage, prohibits the exercise of, or is
conditioned on the non-exercise of one or more of the rights that are
specifically granted under this License. You may not convey a covered
work if you are a party to an arrangement with a third party that is
in the business of distributing software, under which you make payment
to the third party based on the extent of your activity of conveying
the work, and under which the third party grants, to any of the
parties who would receive the covered work from you, a discriminatory
patent license (a) in connection with copies of the covered work
conveyed by you (or copies made from those copies), or (b) primarily
for and in connection with specific products or compilations that
contain the covered work, unless you entered into that arrangement,
or that patent license was granted, prior to 28 March 2007.
Nothing in this License shall be construed as excluding or limiting
any implied license or other defenses to infringement that may
otherwise be available to you under applicable patent law.
12. No Surrender of Others' Freedom.
If conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not
excuse you from the conditions of this License. If you cannot convey a
covered work so as to satisfy simultaneously your obligations under this
License and any other pertinent obligations, then as a consequence you may
not convey it at all. For example, if you agree to terms that obligate you
to collect a royalty for further conveying from those to whom you convey
the Program, the only way you could satisfy both those terms and this
License would be to refrain entirely from conveying the Program.
13. Use with the GNU Affero General Public License.
Notwithstanding any other provision of this License, you have
permission to link or combine any covered work with a work licensed
under version 3 of the GNU Affero General Public License into a single
combined work, and to convey the resulting work. The terms of this
License will continue to apply to the part which is the covered work,
but the special requirements of the GNU Affero General Public License,
section 13, concerning interaction through a network will apply to the
combination as such.
14. Revised Versions of this License.
The Free Software Foundation may publish revised and/or new versions of
the GNU General Public License from time to time. Such new versions will
be similar in spirit to the present version, but may differ in detail to
address new problems or concerns.
Each version is given a distinguishing version number. If the
Program specifies that a certain numbered version of the GNU General
Public License "or any later version" applies to it, you have the
option of following the terms and conditions either of that numbered
version or of any later version published by the Free Software
Foundation. If the Program does not specify a version number of the
GNU General Public License, you may choose any version ever published
by the Free Software Foundation.
If the Program specifies that a proxy can decide which future
versions of the GNU General Public License can be used, that proxy's
public statement of acceptance of a version permanently authorizes you
to choose that version for the Program.
Later license versions may give you additional or different
permissions. However, no additional obligations are imposed on any
author or copyright holder as a result of your choosing to follow a
later version.
15. Disclaimer of Warranty.
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
16. Limitation of Liability.
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
SUCH DAMAGES.
17. Interpretation of Sections 15 and 16.
If the disclaimer of warranty and limitation of liability provided
above cannot be given local legal effect according to their terms,
reviewing courts shall apply local law that most closely approximates
an absolute waiver of all civil liability in connection with the
Program, unless a warranty or assumption of liability accompanies a
copy of the Program in return for a fee.
END OF TERMS AND CONDITIONS
How to Apply These Terms to Your New Programs
If you develop a new program, and you want it to be of the greatest
possible use to the public, the best way to achieve this is to make it
free software which everyone can redistribute and change under these terms.
To do so, attach the following notices to the program. It is safest
to attach them to the start of each source file to most effectively
state the exclusion of warranty; and each file should have at least
the "copyright" line and a pointer to where the full notice is found.
<one line to give the program's name and a brief idea of what it does.>
Copyright (C) <year> <name of author>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
Also add information on how to contact you by electronic and paper mail.
If the program does terminal interaction, make it output a short
notice like this when it starts in an interactive mode:
<program> Copyright (C) <year> <name of author>
This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
This is free software, and you are welcome to redistribute it
under certain conditions; type `show c' for details.
The hypothetical commands `show w' and `show c' should show the appropriate
parts of the General Public License. Of course, your program's commands
might be different; for a GUI interface, you would use an "about box".
You should also get your employer (if you work as a programmer) or school,
if any, to sign a "copyright disclaimer" for the program, if necessary.
For more information on this, and how to apply and follow the GNU GPL, see
<https://www.gnu.org/licenses/>.
The GNU General Public License does not permit incorporating your program
into proprietary programs. If your program is a subroutine library, you
may consider it more useful to permit linking proprietary applications with
the library. If this is what you want to do, use the GNU Lesser General
Public License instead of this License. But first, please read
<https://www.gnu.org/licenses/why-not-lgpl.html>.

View File

@@ -1,3 +1,13 @@
## Node.js
Recommended: v18 or later
## Install dependencies
```
yarn
```
## Run tests
Install [this](https://github.com/DanTehrani/circom-secq) fork of Circom that supports compiling to the secp256k1 base field.
@@ -7,7 +17,7 @@ git clone https://github.com/DanTehrani/circom-secq
```
```
cd circom && cargo build --release && cargo install --path circom
cd circom-secq && cargo build --release && cargo install --path circom
```
(In this directory) Install dependencies

View File

@@ -4,6 +4,17 @@ include "./eff_ecdsa.circom";
include "./tree.circom";
include "./to_address/zk-identity/eth.circom";
/**
* AddrMembership
* ==============
*
* Checks that an inputted efficient ECDSA signature (definition and discussion
* can be found at https://personaelabs.org/posts/efficient-ecdsa-1/)
* is signed by a public key that when converted to an address is a member of
* a Merkle tree of addresses. The public key is extracted from the efficient
* ECDSA signature in EfficientECDSA(), and converted to an address by Keccak
* hashing the public key in PubkeyToAddress().
*/
template AddrMembership(nLevels) {
signal input s;
signal input root;

View File

@@ -3,7 +3,13 @@ pragma circom 2.1.2;
include "./secp256k1/mul.circom";
include "../../../node_modules/circomlib/circuits/bitify.circom";
// ECDSA public key recovery without public key validation.
/**
* EfficientECDSA
* ====================
*
* Converts inputted efficient ECDSA signature to an public key. There is no
* public key validation included.
*/
template EfficientECDSA() {
var bits = 256;
signal input s;
@@ -15,17 +21,13 @@ template EfficientECDSA() {
signal output pubKeyX;
signal output pubKeyY;
var gX = 55066263022277343669578718895168534326250603453777594175500187360389116729240;
var gY = 32670510020758816978083085130507043184471273380659243275938904335757337482424;
var a = 7;
// t * R = s * r^-1 * R
// sMultT = s * T
component sMultT = Secp256k1Mul();
sMultT.scalar <== s;
sMultT.xP <== Tx;
sMultT.yP <== Ty;
// sMultT + U
// pubKey = sMultT + U
component pubKey = Secp256k1AddComplete();
pubKey.xP <== sMultT.outX;
pubKey.yP <== sMultT.outY;

View File

@@ -3,6 +3,12 @@ pragma circom 2.1.2;
include "./eff_ecdsa.circom";
include "./to_address/zk-identity/eth.circom";
/**
* EfficientECDSAToAddr
* ====================
*
* Converts inputted efficient ECDSA signature to an address.
*/
template EfficientECDSAToAddr() {
var bits = 256;
signal input s;

View File

@@ -4,6 +4,16 @@ include "./eff_ecdsa.circom";
include "./tree.circom";
include "../poseidon/poseidon.circom";
/**
* PubkeyMembership
* ================
*
* Checks that an inputted efficient ECDSA signature (definition and discussion
* can be found at https://personaelabs.org/posts/efficient-ecdsa-1/)
* is signed by a public key that is in a Merkle tree of public keys. Avoids the
* SNARK-unfriendly Keccak hash that must be performed when validating if the
* public key is in a Merkle tree of addresses.
*/
template PubKeyMembership(nLevels) {
signal input s;
signal input root;

View File

@@ -3,6 +3,16 @@ pragma circom 2.1.2;
include "../../../../node_modules/circomlib/circuits/comparators.circom";
include "../../../../node_modules/circomlib/circuits/gates.circom";
/**
* Secp256k1AddIncomplete
* ======================
*
* Adds two points (xP, yP) and (xQ, yQ) on the secp256k1 curve. This function
* only works for points where xP != xQ and are not at infinity. We can implement
* the raw formulae for this operation as we are doing right field arithmetic
* (we are doing secp256k1 base field arithmetic in the secq256k1 scalar field,
* which are equal). Should work for any short Weierstrass curve (Pasta, P-256).
*/
template Secp256k1AddIncomplete() {
signal input xP;
signal input yP;
@@ -25,7 +35,14 @@ template Secp256k1AddIncomplete() {
outY <== lambda * (xP - outX) - yP;
}
// Assuming that (0, 0) is not a valid point.
/**
* Secp256k1AddComplete
* ====================
*
* Implements https://zcash.github.io/halo2/design/gadgets/ecc/addition.html#complete-addition
* so we can add any pair of points. Assumes (0, 0) is not a valid point (which
* is true for secp256k1) and is used as the point at infinity.
*/
template Secp256k1AddComplete() {
signal input xP;
signal input yP;
@@ -49,7 +66,6 @@ template Secp256k1AddComplete() {
component isXEitherZero = IsZero();
isXEitherZero.in <== (1 - isXpZero.out) * (1 - isXqZero.out);
// dx = xQ - xP
// dy = xP != xQ ? yQ - yP : 0
@@ -94,6 +110,7 @@ template Secp256k1AddComplete() {
zeroizeB.b <== isXqZero.out;
// zeroize = (xP = xQ and yP = -yQ) or (xP = 0 and xQ = 0) ? 1 : 0
// for this case we want to output the point at infinity (0, 0)
component zeroize = OR();
zeroize.a <== zeroizeA.out;
zeroize.b <== zeroizeB.out;

View File

@@ -1,5 +1,12 @@
pragma circom 2.1.2;
/**
* Secp256k1Double
* ===============
*
* Double a specific point (xP, yP) on the secp256k1 curve. Should work for any
* short Weierstrass curve (Pasta, P-256).
*/
template Secp256k1Double() {
signal input xP;
signal input yP;

View File

@@ -6,7 +6,16 @@ include "../../../../node_modules/circomlib/circuits/bitify.circom";
include "../../../../node_modules/circomlib/circuits/comparators.circom";
include "../../../../node_modules/circomlib/circuits/gates.circom";
// Implements https://zcash.github.io/halo2/design/gadgets/ecc/var-base-scalar-mul.html
//
/**
* Secp256k1Mul
* ============
*
* Implements https://zcash.github.io/halo2/design/gadgets/ecc/var-base-scalar-mul.html
* which allows us to use incomplete addition for the majority of the addition steps
* and only use complete addition for the final 3 steps.
*/
template Secp256k1Mul() {
var bits = 256;
signal input scalar;
@@ -93,7 +102,7 @@ template Secp256k1Mul() {
outY <== out.outY;
}
// Calculate k = (s + tQ) % q as follwos:
// Calculate k = (s + tQ) % q as follows:
// Define notation: (s + tQ) / q = (quotient, remainder)
// We can calculate the quotient and remainder as:
// (s + tQ) < q ? = (0, s - tQ) : (1, (s - tQ) - q)

View File

@@ -2,8 +2,15 @@ pragma circom 2.1.2;
include "../poseidon/poseidon.circom";
include "../../../node_modules/circomlib/circuits/mux1.circom";
// Copy of this implementation: https://github.com/semaphore-protocol/semaphore/blob/main/packages/circuits/tree.circom
// We use our own Poseidon hash function instead of the one from circomlib.
/**
* MerkleTreeInclusionProof
* ========================
*
* Copy of the Merkle Tree implementation in Semaphore:
* https://github.com/semaphore-protocol/semaphore/blob/main/packages/circuits/tree.circom
* Instead of using the circomlib Poseidon, we use our own implementation which
* uses constants specific to the secp256k1 curve.
*/
template MerkleTreeInclusionProof(nLevels) {
signal input leaf;
signal input pathIndices[nLevels];

View File

@@ -1,6 +1,6 @@
{
"name": "circuits",
"version": "1.0.0",
"name": "@personaelabs/spartan-ecdsa-circuits",
"version": "0.1.0",
"main": "index.js",
"license": "MIT",
"dependencies": {
@@ -12,7 +12,7 @@
"elliptic": "^6.5.4"
},
"devDependencies": {
"spartan-ecdsa": "*",
"@personaelabs/spartan-ecdsa": "*",
"@zk-kit/incremental-merkle-tree": "^1.0.0",
"jest": "^29.3.1",
"ts-jest": "^29.0.3",

View File

@@ -1,3 +1,5 @@
pragma circom 2.1.2;
function ROUND_KEYS() {
return [
15180568604901803243989155929934437997245952775071395385994322939386074967328,
@@ -198,19 +200,19 @@ function ROUND_KEYS() {
function MDS_MATRIX() {
return [
[
23803367313109184982541472396887217823407491505476035214597249950862491174978,
99598264738478514965466329604335101878578484066948401815632841927360380418550,
115153730744008470922201668532093804785974940215174148228638117153769917815388
92469348809186613947252340883344274339611751744959319352506666082431267346705,
100938028378191533449096235266991198229563815869344032449592738345766724371160,
77486311749148948616988559783475694076613010381924638436641318334458515006661
],
[
113091279114074197935538706369506905304335018116466406918080279636714229809202,
52839207077696265966109252614080731042198774602627516394420475307554904992516,
31714026868000093936404190818455331704037953726328786865383369149182617311729
110352262556914082363749654180080464794716701228558638957603951672835474954408,
27607004873684391669404739690441550149894883072418944161048725383958774443141,
29671705769502357195586268679831947082918094959101307962374709600277676341325
],
[
70589234453346098611775931063930029749141121635522432940893113186813327872540,
89267224805530783265612822042944818966269909370588027009288423799106976950833,
13120047990786604980839974606890144531075498455983768009875235935875059189507
77762103796341032609398578911486222569419103128091016773380377798879650228751,
1753012011204964731088925227042671869111026487299375073665493007998674391999,
70274477372358662369456035572054501601454406272695978931839980644925236550307
]
];
}
}

View File

@@ -2,7 +2,7 @@ const wasm_tester = require("circom_tester").wasm;
var EC = require("elliptic").ec;
import * as path from "path";
const ec = new EC("secp256k1");
import { Poseidon, Tree, SpartanWasm, defaultWasmConfig } from "spartan-ecdsa";
import { Poseidon, Tree } from "@personaelabs/spartan-ecdsa";
import { getEffEcdsaCircuitInput } from "./test_utils";
import { privateToAddress } from "@ethereumjs/util";
@@ -16,11 +16,9 @@ describe("membership", () => {
}
);
const wasm = new SpartanWasm(defaultWasmConfig);
// Construct the tree
const poseidon = new Poseidon();
await poseidon.initWasm(wasm);
await poseidon.initWasm();
const nLevels = 10;
const tree = new Tree(nLevels, poseidon);

View File

@@ -20,7 +20,7 @@ describe("poseidon", () => {
const w = await circuit.calculateWitness(input, true);
await circuit.assertOut(w, {
out: "55864140790032987462805271262840606862500777900572169165625301625084550490622"
out: "46702443887670435486723478191273607819169644657419964658749776213559127696053"
});
await circuit.checkConstraints(w);

View File

@@ -1,7 +1,7 @@
const wasm_tester = require("circom_tester").wasm;
var EC = require("elliptic").ec;
import * as path from "path";
import { Poseidon, Tree, SpartanWasm, defaultWasmConfig } from "spartan-ecdsa";
import { Poseidon, Tree } from "@personaelabs/spartan-ecdsa";
import { privateToPublic } from "@ethereumjs/util";
import { getEffEcdsaCircuitInput } from "./test_utils";
@@ -15,11 +15,9 @@ describe("pubkey_membership", () => {
}
);
const wasm = new SpartanWasm(defaultWasmConfig);
// Construct the tree
const poseidon = new Poseidon();
await poseidon.initWasm(wasm);
await poseidon.initWasm();
const nLevels = 10;
const tree = new Tree(nLevels, poseidon);

View File

@@ -4,6 +4,11 @@ import * as path from "path";
const ec = new EC("secp256k1");
describe("secp256k1", () => {
/**
* Test adding two points that have different x coordinates; doubling a point or
* adding a point to its negative will not work as we will have a division by
* zero in the circuit.
*/
it("Secp256k1AddIncomplete", async () => {
const circuit = await wasm_tester(
path.join(__dirname, "./circuits/add_incomplete_test.circom"),
@@ -33,6 +38,10 @@ describe("secp256k1", () => {
await circuit.checkConstraints(w);
});
/**
* Go through all 6 cases included in the analysis of complete addition in
* https://zcash.github.io/halo2/design/gadgets/ecc/addition.html
*/
describe("Secp256k1AddComplete", () => {
let circuit;
const p1 = ec.keyFromPrivate(Buffer.from("🪄", "utf16le")).getPublic();
@@ -165,6 +174,9 @@ describe("secp256k1", () => {
});
});
/**
* Test doubling circuit on the generator point.
*/
it("Secp256k1Double", async () => {
const circuit = await wasm_tester(
path.join(__dirname, "./circuits/double_test.circom"),
@@ -192,6 +204,10 @@ describe("secp256k1", () => {
});
describe("mul", () => {
/**
* Test the K() function for correctness with the two cases: (s + tQ) > q and
* (s + tQ) < q.
*/
describe("K", () => {
let circuit;
const q = BigInt(
@@ -258,6 +274,11 @@ describe("secp256k1", () => {
});
});
/**
* Test the mul circuit for correctness with:
* 1. scalar = q-1 (highest possible scalar)
* 2. scalar < q-1 (a few random cases)
*/
describe("Secp256k1Mul", () => {
let circuit;
beforeAll(async () => {

View File

@@ -1,12 +1,12 @@
import { hashPersonalMessage, ecsign } from "@ethereumjs/util";
import { EffEcdsaCircuitPubInput } from "spartan-ecdsa";
import { computeEffEcdsaPubInput } from "@personaelabs/spartan-ecdsa";
export const getEffEcdsaCircuitInput = (privKey: Buffer, msg: Buffer) => {
const msgHash = hashPersonalMessage(msg);
const { v, r: _r, s } = ecsign(msgHash, privKey);
const r = BigInt("0x" + _r.toString("hex"));
const circuitPubInput = EffEcdsaCircuitPubInput.computeFromSig(r, v, msgHash);
const circuitPubInput = computeEffEcdsaPubInput(r, v, msgHash);
const input = {
s: BigInt("0x" + s.toString("hex")),
Tx: circuitPubInput.Tx,

7
packages/lib/.npmignore Normal file
View File

@@ -0,0 +1,7 @@
/node_modules
/src
/tests
tsconfig.json
jest.config.js
copy_artifacts.sh
load_wasm.ts

674
packages/lib/LICENSE Normal file
View File

@@ -0,0 +1,674 @@
GNU GENERAL PUBLIC LICENSE
Version 3, 29 June 2007
Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
Preamble
The GNU General Public License is a free, copyleft license for
software and other kinds of works.
The licenses for most software and other practical works are designed
to take away your freedom to share and change the works. By contrast,
the GNU General Public License is intended to guarantee your freedom to
share and change all versions of a program--to make sure it remains free
software for all its users. We, the Free Software Foundation, use the
GNU General Public License for most of our software; it applies also to
any other work released this way by its authors. You can apply it to
your programs, too.
When we speak of free software, we are referring to freedom, not
price. Our General Public Licenses are designed to make sure that you
have the freedom to distribute copies of free software (and charge for
them if you wish), that you receive source code or can get it if you
want it, that you can change the software or use pieces of it in new
free programs, and that you know you can do these things.
To protect your rights, we need to prevent others from denying you
these rights or asking you to surrender the rights. Therefore, you have
certain responsibilities if you distribute copies of the software, or if
you modify it: responsibilities to respect the freedom of others.
For example, if you distribute copies of such a program, whether
gratis or for a fee, you must pass on to the recipients the same
freedoms that you received. You must make sure that they, too, receive
or can get the source code. And you must show them these terms so they
know their rights.
Developers that use the GNU GPL protect your rights with two steps:
(1) assert copyright on the software, and (2) offer you this License
giving you legal permission to copy, distribute and/or modify it.
For the developers' and authors' protection, the GPL clearly explains
that there is no warranty for this free software. For both users' and
authors' sake, the GPL requires that modified versions be marked as
changed, so that their problems will not be attributed erroneously to
authors of previous versions.
Some devices are designed to deny users access to install or run
modified versions of the software inside them, although the manufacturer
can do so. This is fundamentally incompatible with the aim of
protecting users' freedom to change the software. The systematic
pattern of such abuse occurs in the area of products for individuals to
use, which is precisely where it is most unacceptable. Therefore, we
have designed this version of the GPL to prohibit the practice for those
products. If such problems arise substantially in other domains, we
stand ready to extend this provision to those domains in future versions
of the GPL, as needed to protect the freedom of users.
Finally, every program is threatened constantly by software patents.
States should not allow patents to restrict development and use of
software on general-purpose computers, but in those that do, we wish to
avoid the special danger that patents applied to a free program could
make it effectively proprietary. To prevent this, the GPL assures that
patents cannot be used to render the program non-free.
The precise terms and conditions for copying, distribution and
modification follow.
TERMS AND CONDITIONS
0. Definitions.
"This License" refers to version 3 of the GNU General Public License.
"Copyright" also means copyright-like laws that apply to other kinds of
works, such as semiconductor masks.
"The Program" refers to any copyrightable work licensed under this
License. Each licensee is addressed as "you". "Licensees" and
"recipients" may be individuals or organizations.
To "modify" a work means to copy from or adapt all or part of the work
in a fashion requiring copyright permission, other than the making of an
exact copy. The resulting work is called a "modified version" of the
earlier work or a work "based on" the earlier work.
A "covered work" means either the unmodified Program or a work based
on the Program.
To "propagate" a work means to do anything with it that, without
permission, would make you directly or secondarily liable for
infringement under applicable copyright law, except executing it on a
computer or modifying a private copy. Propagation includes copying,
distribution (with or without modification), making available to the
public, and in some countries other activities as well.
To "convey" a work means any kind of propagation that enables other
parties to make or receive copies. Mere interaction with a user through
a computer network, with no transfer of a copy, is not conveying.
An interactive user interface displays "Appropriate Legal Notices"
to the extent that it includes a convenient and prominently visible
feature that (1) displays an appropriate copyright notice, and (2)
tells the user that there is no warranty for the work (except to the
extent that warranties are provided), that licensees may convey the
work under this License, and how to view a copy of this License. If
the interface presents a list of user commands or options, such as a
menu, a prominent item in the list meets this criterion.
1. Source Code.
The "source code" for a work means the preferred form of the work
for making modifications to it. "Object code" means any non-source
form of a work.
A "Standard Interface" means an interface that either is an official
standard defined by a recognized standards body, or, in the case of
interfaces specified for a particular programming language, one that
is widely used among developers working in that language.
The "System Libraries" of an executable work include anything, other
than the work as a whole, that (a) is included in the normal form of
packaging a Major Component, but which is not part of that Major
Component, and (b) serves only to enable use of the work with that
Major Component, or to implement a Standard Interface for which an
implementation is available to the public in source code form. A
"Major Component", in this context, means a major essential component
(kernel, window system, and so on) of the specific operating system
(if any) on which the executable work runs, or a compiler used to
produce the work, or an object code interpreter used to run it.
The "Corresponding Source" for a work in object code form means all
the source code needed to generate, install, and (for an executable
work) run the object code and to modify the work, including scripts to
control those activities. However, it does not include the work's
System Libraries, or general-purpose tools or generally available free
programs which are used unmodified in performing those activities but
which are not part of the work. For example, Corresponding Source
includes interface definition files associated with source files for
the work, and the source code for shared libraries and dynamically
linked subprograms that the work is specifically designed to require,
such as by intimate data communication or control flow between those
subprograms and other parts of the work.
The Corresponding Source need not include anything that users
can regenerate automatically from other parts of the Corresponding
Source.
The Corresponding Source for a work in source code form is that
same work.
2. Basic Permissions.
All rights granted under this License are granted for the term of
copyright on the Program, and are irrevocable provided the stated
conditions are met. This License explicitly affirms your unlimited
permission to run the unmodified Program. The output from running a
covered work is covered by this License only if the output, given its
content, constitutes a covered work. This License acknowledges your
rights of fair use or other equivalent, as provided by copyright law.
You may make, run and propagate covered works that you do not
convey, without conditions so long as your license otherwise remains
in force. You may convey covered works to others for the sole purpose
of having them make modifications exclusively for you, or provide you
with facilities for running those works, provided that you comply with
the terms of this License in conveying all material for which you do
not control copyright. Those thus making or running the covered works
for you must do so exclusively on your behalf, under your direction
and control, on terms that prohibit them from making any copies of
your copyrighted material outside their relationship with you.
Conveying under any other circumstances is permitted solely under
the conditions stated below. Sublicensing is not allowed; section 10
makes it unnecessary.
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
No covered work shall be deemed part of an effective technological
measure under any applicable law fulfilling obligations under article
11 of the WIPO copyright treaty adopted on 20 December 1996, or
similar laws prohibiting or restricting circumvention of such
measures.
When you convey a covered work, you waive any legal power to forbid
circumvention of technological measures to the extent such circumvention
is effected by exercising rights under this License with respect to
the covered work, and you disclaim any intention to limit operation or
modification of the work as a means of enforcing, against the work's
users, your or third parties' legal rights to forbid circumvention of
technological measures.
4. Conveying Verbatim Copies.
You may convey verbatim copies of the Program's source code as you
receive it, in any medium, provided that you conspicuously and
appropriately publish on each copy an appropriate copyright notice;
keep intact all notices stating that this License and any
non-permissive terms added in accord with section 7 apply to the code;
keep intact all notices of the absence of any warranty; and give all
recipients a copy of this License along with the Program.
You may charge any price or no price for each copy that you convey,
and you may offer support or warranty protection for a fee.
5. Conveying Modified Source Versions.
You may convey a work based on the Program, or the modifications to
produce it from the Program, in the form of source code under the
terms of section 4, provided that you also meet all of these conditions:
a) The work must carry prominent notices stating that you modified
it, and giving a relevant date.
b) The work must carry prominent notices stating that it is
released under this License and any conditions added under section
7. This requirement modifies the requirement in section 4 to
"keep intact all notices".
c) You must license the entire work, as a whole, under this
License to anyone who comes into possession of a copy. This
License will therefore apply, along with any applicable section 7
additional terms, to the whole of the work, and all its parts,
regardless of how they are packaged. This License gives no
permission to license the work in any other way, but it does not
invalidate such permission if you have separately received it.
d) If the work has interactive user interfaces, each must display
Appropriate Legal Notices; however, if the Program has interactive
interfaces that do not display Appropriate Legal Notices, your
work need not make them do so.
A compilation of a covered work with other separate and independent
works, which are not by their nature extensions of the covered work,
and which are not combined with it such as to form a larger program,
in or on a volume of a storage or distribution medium, is called an
"aggregate" if the compilation and its resulting copyright are not
used to limit the access or legal rights of the compilation's users
beyond what the individual works permit. Inclusion of a covered work
in an aggregate does not cause this License to apply to the other
parts of the aggregate.
6. Conveying Non-Source Forms.
You may convey a covered work in object code form under the terms
of sections 4 and 5, provided that you also convey the
machine-readable Corresponding Source under the terms of this License,
in one of these ways:
a) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by the
Corresponding Source fixed on a durable physical medium
customarily used for software interchange.
b) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by a
written offer, valid for at least three years and valid for as
long as you offer spare parts or customer support for that product
model, to give anyone who possesses the object code either (1) a
copy of the Corresponding Source for all the software in the
product that is covered by this License, on a durable physical
medium customarily used for software interchange, for a price no
more than your reasonable cost of physically performing this
conveying of source, or (2) access to copy the
Corresponding Source from a network server at no charge.
c) Convey individual copies of the object code with a copy of the
written offer to provide the Corresponding Source. This
alternative is allowed only occasionally and noncommercially, and
only if you received the object code with such an offer, in accord
with subsection 6b.
d) Convey the object code by offering access from a designated
place (gratis or for a charge), and offer equivalent access to the
Corresponding Source in the same way through the same place at no
further charge. You need not require recipients to copy the
Corresponding Source along with the object code. If the place to
copy the object code is a network server, the Corresponding Source
may be on a different server (operated by you or a third party)
that supports equivalent copying facilities, provided you maintain
clear directions next to the object code saying where to find the
Corresponding Source. Regardless of what server hosts the
Corresponding Source, you remain obligated to ensure that it is
available for as long as needed to satisfy these requirements.
e) Convey the object code using peer-to-peer transmission, provided
you inform other peers where the object code and Corresponding
Source of the work are being offered to the general public at no
charge under subsection 6d.
A separable portion of the object code, whose source code is excluded
from the Corresponding Source as a System Library, need not be
included in conveying the object code work.
A "User Product" is either (1) a "consumer product", which means any
tangible personal property which is normally used for personal, family,
or household purposes, or (2) anything designed or sold for incorporation
into a dwelling. In determining whether a product is a consumer product,
doubtful cases shall be resolved in favor of coverage. For a particular
product received by a particular user, "normally used" refers to a
typical or common use of that class of product, regardless of the status
of the particular user or of the way in which the particular user
actually uses, or expects or is expected to use, the product. A product
is a consumer product regardless of whether the product has substantial
commercial, industrial or non-consumer uses, unless such uses represent
the only significant mode of use of the product.
"Installation Information" for a User Product means any methods,
procedures, authorization keys, or other information required to install
and execute modified versions of a covered work in that User Product from
a modified version of its Corresponding Source. The information must
suffice to ensure that the continued functioning of the modified object
code is in no case prevented or interfered with solely because
modification has been made.
If you convey an object code work under this section in, or with, or
specifically for use in, a User Product, and the conveying occurs as
part of a transaction in which the right of possession and use of the
User Product is transferred to the recipient in perpetuity or for a
fixed term (regardless of how the transaction is characterized), the
Corresponding Source conveyed under this section must be accompanied
by the Installation Information. But this requirement does not apply
if neither you nor any third party retains the ability to install
modified object code on the User Product (for example, the work has
been installed in ROM).
The requirement to provide Installation Information does not include a
requirement to continue to provide support service, warranty, or updates
for a work that has been modified or installed by the recipient, or for
the User Product in which it has been modified or installed. Access to a
network may be denied when the modification itself materially and
adversely affects the operation of the network or violates the rules and
protocols for communication across the network.
Corresponding Source conveyed, and Installation Information provided,
in accord with this section must be in a format that is publicly
documented (and with an implementation available to the public in
source code form), and must require no special password or key for
unpacking, reading or copying.
7. Additional Terms.
"Additional permissions" are terms that supplement the terms of this
License by making exceptions from one or more of its conditions.
Additional permissions that are applicable to the entire Program shall
be treated as though they were included in this License, to the extent
that they are valid under applicable law. If additional permissions
apply only to part of the Program, that part may be used separately
under those permissions, but the entire Program remains governed by
this License without regard to the additional permissions.
When you convey a copy of a covered work, you may at your option
remove any additional permissions from that copy, or from any part of
it. (Additional permissions may be written to require their own
removal in certain cases when you modify the work.) You may place
additional permissions on material, added by you to a covered work,
for which you have or can give appropriate copyright permission.
Notwithstanding any other provision of this License, for material you
add to a covered work, you may (if authorized by the copyright holders of
that material) supplement the terms of this License with terms:
a) Disclaiming warranty or limiting liability differently from the
terms of sections 15 and 16 of this License; or
b) Requiring preservation of specified reasonable legal notices or
author attributions in that material or in the Appropriate Legal
Notices displayed by works containing it; or
c) Prohibiting misrepresentation of the origin of that material, or
requiring that modified versions of such material be marked in
reasonable ways as different from the original version; or
d) Limiting the use for publicity purposes of names of licensors or
authors of the material; or
e) Declining to grant rights under trademark law for use of some
trade names, trademarks, or service marks; or
f) Requiring indemnification of licensors and authors of that
material by anyone who conveys the material (or modified versions of
it) with contractual assumptions of liability to the recipient, for
any liability that these contractual assumptions directly impose on
those licensors and authors.
All other non-permissive additional terms are considered "further
restrictions" within the meaning of section 10. If the Program as you
received it, or any part of it, contains a notice stating that it is
governed by this License along with a term that is a further
restriction, you may remove that term. If a license document contains
a further restriction but permits relicensing or conveying under this
License, you may add to a covered work material governed by the terms
of that license document, provided that the further restriction does
not survive such relicensing or conveying.
If you add terms to a covered work in accord with this section, you
must place, in the relevant source files, a statement of the
additional terms that apply to those files, or a notice indicating
where to find the applicable terms.
Additional terms, permissive or non-permissive, may be stated in the
form of a separately written license, or stated as exceptions;
the above requirements apply either way.
8. Termination.
You may not propagate or modify a covered work except as expressly
provided under this License. Any attempt otherwise to propagate or
modify it is void, and will automatically terminate your rights under
this License (including any patent licenses granted under the third
paragraph of section 11).
However, if you cease all violation of this License, then your
license from a particular copyright holder is reinstated (a)
provisionally, unless and until the copyright holder explicitly and
finally terminates your license, and (b) permanently, if the copyright
holder fails to notify you of the violation by some reasonable means
prior to 60 days after the cessation.
Moreover, your license from a particular copyright holder is
reinstated permanently if the copyright holder notifies you of the
violation by some reasonable means, this is the first time you have
received notice of violation of this License (for any work) from that
copyright holder, and you cure the violation prior to 30 days after
your receipt of the notice.
Termination of your rights under this section does not terminate the
licenses of parties who have received copies or rights from you under
this License. If your rights have been terminated and not permanently
reinstated, you do not qualify to receive new licenses for the same
material under section 10.
9. Acceptance Not Required for Having Copies.
You are not required to accept this License in order to receive or
run a copy of the Program. Ancillary propagation of a covered work
occurring solely as a consequence of using peer-to-peer transmission
to receive a copy likewise does not require acceptance. However,
nothing other than this License grants you permission to propagate or
modify any covered work. These actions infringe copyright if you do
not accept this License. Therefore, by modifying or propagating a
covered work, you indicate your acceptance of this License to do so.
10. Automatic Licensing of Downstream Recipients.
Each time you convey a covered work, the recipient automatically
receives a license from the original licensors, to run, modify and
propagate that work, subject to this License. You are not responsible
for enforcing compliance by third parties with this License.
An "entity transaction" is a transaction transferring control of an
organization, or substantially all assets of one, or subdividing an
organization, or merging organizations. If propagation of a covered
work results from an entity transaction, each party to that
transaction who receives a copy of the work also receives whatever
licenses to the work the party's predecessor in interest had or could
give under the previous paragraph, plus a right to possession of the
Corresponding Source of the work from the predecessor in interest, if
the predecessor has it or can get it with reasonable efforts.
You may not impose any further restrictions on the exercise of the
rights granted or affirmed under this License. For example, you may
not impose a license fee, royalty, or other charge for exercise of
rights granted under this License, and you may not initiate litigation
(including a cross-claim or counterclaim in a lawsuit) alleging that
any patent claim is infringed by making, using, selling, offering for
sale, or importing the Program or any portion of it.
11. Patents.
A "contributor" is a copyright holder who authorizes use under this
License of the Program or a work on which the Program is based. The
work thus licensed is called the contributor's "contributor version".
A contributor's "essential patent claims" are all patent claims
owned or controlled by the contributor, whether already acquired or
hereafter acquired, that would be infringed by some manner, permitted
by this License, of making, using, or selling its contributor version,
but do not include claims that would be infringed only as a
consequence of further modification of the contributor version. For
purposes of this definition, "control" includes the right to grant
patent sublicenses in a manner consistent with the requirements of
this License.
Each contributor grants you a non-exclusive, worldwide, royalty-free
patent license under the contributor's essential patent claims, to
make, use, sell, offer for sale, import and otherwise run, modify and
propagate the contents of its contributor version.
In the following three paragraphs, a "patent license" is any express
agreement or commitment, however denominated, not to enforce a patent
(such as an express permission to practice a patent or covenant not to
sue for patent infringement). To "grant" such a patent license to a
party means to make such an agreement or commitment not to enforce a
patent against the party.
If you convey a covered work, knowingly relying on a patent license,
and the Corresponding Source of the work is not available for anyone
to copy, free of charge and under the terms of this License, through a
publicly available network server or other readily accessible means,
then you must either (1) cause the Corresponding Source to be so
available, or (2) arrange to deprive yourself of the benefit of the
patent license for this particular work, or (3) arrange, in a manner
consistent with the requirements of this License, to extend the patent
license to downstream recipients. "Knowingly relying" means you have
actual knowledge that, but for the patent license, your conveying the
covered work in a country, or your recipient's use of the covered work
in a country, would infringe one or more identifiable patents in that
country that you have reason to believe are valid.
If, pursuant to or in connection with a single transaction or
arrangement, you convey, or propagate by procuring conveyance of, a
covered work, and grant a patent license to some of the parties
receiving the covered work authorizing them to use, propagate, modify
or convey a specific copy of the covered work, then the patent license
you grant is automatically extended to all recipients of the covered
work and works based on it.
A patent license is "discriminatory" if it does not include within
the scope of its coverage, prohibits the exercise of, or is
conditioned on the non-exercise of one or more of the rights that are
specifically granted under this License. You may not convey a covered
work if you are a party to an arrangement with a third party that is
in the business of distributing software, under which you make payment
to the third party based on the extent of your activity of conveying
the work, and under which the third party grants, to any of the
parties who would receive the covered work from you, a discriminatory
patent license (a) in connection with copies of the covered work
conveyed by you (or copies made from those copies), or (b) primarily
for and in connection with specific products or compilations that
contain the covered work, unless you entered into that arrangement,
or that patent license was granted, prior to 28 March 2007.
Nothing in this License shall be construed as excluding or limiting
any implied license or other defenses to infringement that may
otherwise be available to you under applicable patent law.
12. No Surrender of Others' Freedom.
If conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not
excuse you from the conditions of this License. If you cannot convey a
covered work so as to satisfy simultaneously your obligations under this
License and any other pertinent obligations, then as a consequence you may
not convey it at all. For example, if you agree to terms that obligate you
to collect a royalty for further conveying from those to whom you convey
the Program, the only way you could satisfy both those terms and this
License would be to refrain entirely from conveying the Program.
13. Use with the GNU Affero General Public License.
Notwithstanding any other provision of this License, you have
permission to link or combine any covered work with a work licensed
under version 3 of the GNU Affero General Public License into a single
combined work, and to convey the resulting work. The terms of this
License will continue to apply to the part which is the covered work,
but the special requirements of the GNU Affero General Public License,
section 13, concerning interaction through a network will apply to the
combination as such.
14. Revised Versions of this License.
The Free Software Foundation may publish revised and/or new versions of
the GNU General Public License from time to time. Such new versions will
be similar in spirit to the present version, but may differ in detail to
address new problems or concerns.
Each version is given a distinguishing version number. If the
Program specifies that a certain numbered version of the GNU General
Public License "or any later version" applies to it, you have the
option of following the terms and conditions either of that numbered
version or of any later version published by the Free Software
Foundation. If the Program does not specify a version number of the
GNU General Public License, you may choose any version ever published
by the Free Software Foundation.
If the Program specifies that a proxy can decide which future
versions of the GNU General Public License can be used, that proxy's
public statement of acceptance of a version permanently authorizes you
to choose that version for the Program.
Later license versions may give you additional or different
permissions. However, no additional obligations are imposed on any
author or copyright holder as a result of your choosing to follow a
later version.
15. Disclaimer of Warranty.
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
16. Limitation of Liability.
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
SUCH DAMAGES.
17. Interpretation of Sections 15 and 16.
If the disclaimer of warranty and limitation of liability provided
above cannot be given local legal effect according to their terms,
reviewing courts shall apply local law that most closely approximates
an absolute waiver of all civil liability in connection with the
Program, unless a warranty or assumption of liability accompanies a
copy of the Program in return for a fee.
END OF TERMS AND CONDITIONS
How to Apply These Terms to Your New Programs
If you develop a new program, and you want it to be of the greatest
possible use to the public, the best way to achieve this is to make it
free software which everyone can redistribute and change under these terms.
To do so, attach the following notices to the program. It is safest
to attach them to the start of each source file to most effectively
state the exclusion of warranty; and each file should have at least
the "copyright" line and a pointer to where the full notice is found.
<one line to give the program's name and a brief idea of what it does.>
Copyright (C) <year> <name of author>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
Also add information on how to contact you by electronic and paper mail.
If the program does terminal interaction, make it output a short
notice like this when it starts in an interactive mode:
<program> Copyright (C) <year> <name of author>
This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
This is free software, and you are welcome to redistribute it
under certain conditions; type `show c' for details.
The hypothetical commands `show w' and `show c' should show the appropriate
parts of the General Public License. Of course, your program's commands
might be different; for a GUI interface, you would use an "about box".
You should also get your employer (if you work as a programmer) or school,
if any, to sign a "copyright disclaimer" for the program, if necessary.
For more information on this, and how to apply and follow the GNU GPL, see
<https://www.gnu.org/licenses/>.
The GNU General Public License does not permit incorporating your program
into proprietary programs. If your program is a subroutine library, you
may consider it more useful to permit linking proprietary applications with
the library. If this is what you want to do, use the GNU Lesser General
Public License instead of this License. But first, please read
<https://www.gnu.org/licenses/why-not-lgpl.html>.

View File

@@ -1,40 +1,46 @@
# Spartan-ecdsa
Spartan-ecdsa (which to our knowledge) is the fastest open-source method to verify ECDSA (secp256k1) signatures in zero-knowledge.
## Disclaimers
- Spartan-ecdsa is unaudited. Please use it at your own risk.
- Usage on mobile browsers isnt currently supported.
## Usage example
### Proving membership to a group of public keys
```typescript
// Setup
const privKey = Buffer.from("".padStart(16, "🧙"), "utf16le");
const msg = Buffer.from("harry potter");
const msgHash = hashPersonalMessage(msg);
const { v, r, s } = ecsign(msgHash, privKey);
const pubKey = ecrecover(msgHash, v, r, s);
const sig = `0x${r.toString("hex")}${s.toString("hex")}${v.toString(16)}`;
let wasm = new SpartanWasm(defaultWasmConfig);
import {
MembershipProver,
MembershipVerifier,
Poseidon,
Tree,
defaultPubkeyMembershipPConfig,
defaultPubkeyMembershipVConfig
} from "@personaelabs/spartan-ecdsa";
import { hashPersonalMessage } from "@ethereumjs/util";
// Init the Poseidon hash
const poseidon = new Poseidon();
await poseidon.initWasm(wasm);
await poseidon.initWasm();
const treeDepth = 20;
const treeDepth = 20; // Provided circuits have tree depth = 20
const tree = new Tree(treeDepth, poseidon);
const proverPubKey = Buffer.from("...");
// Get the prover public key hash
const proverPubkeyHash = poseidon.hashPubKey(pubKey);
const proverPubkeyHash = poseidon.hashPubKey(proverPubKey);
// Insert prover public key hash into the tree
tree.insert(proverPubkeyHash);
// Insert other members into the tree
for (const member of ["🕵️", "🥷", "👩‍🔬"]) {
const pubKey = privateToPublic(
Buffer.from("".padStart(16, member), "utf16le")
tree.insert(
poseidon.hashPubKey(Buffer.from("".padStart(16, member), "utf16le"))
);
tree.insert(poseidon.hashPubKey(pubKey));
}
// Compute the merkle proof
@@ -42,61 +48,55 @@ const index = tree.indexOf(proverPubkeyHash);
const merkleProof = tree.createProof(index);
// Init the prover
const prover = new MembershipProver(defaultPubkeyMembershipConfig);
await prover.initWasm(wasm);
const prover = new MembershipProver(defaultPubkeyMembershipPConfig);
await prover.initWasm();
const sig = "0x...";
const msgHash = hashPersonalMessage(Buffer.from("harry potter"));
// Prove membership
await prover.prove(sig, msgHash, merkleProof);
const { proof, publicInput } = await prover.prove(sig, msgHash, merkleProof);
// Init verifier
const verifier = new MembershipVerifier(defaultPubkeyMembershipVConfig);
await verifier.initWasm();
// Verify proof
await verifier.verify(proof, publicInput.serialize());
```
### Proving membership to a group of addresses
```typescript
import {
hashPersonalMessage,
privateToAddress,
ecsign
} from "@ethereumjs/util";
import {
SpartanWasm,
Tree,
Poseidon,
MembershipProver,
defaultAddressMembershipConfig,
defaultWasmConfig
} from "spartan-ecdsa";
const privKey = Buffer.from("".padStart(16, "🧙"), "utf16le");
const msg = Buffer.from("harry potter");
const msgHash = hashPersonalMessage(msg);
const { v, r, s } = ecsign(msgHash, privKey);
const sig = `0x${r.toString("hex")}${s.toString("hex")}${v.toString(16)}`;
let wasm = new SpartanWasm(defaultWasmConfig);
MembershipVerifier,
Poseidon,
Tree,
defaultAddressMembershipPConfig,
defaultAddressMembershipVConfig
} from "@personaelabs/spartan-ecdsa";
import { hashPersonalMessage } from "@ethereumjs/util";
// Init the Poseidon hash
const poseidon = new Poseidon();
await poseidon.initWasm(wasm);
await poseidon.initWasm();
const treeDepth = 20;
const treeDepth = 20; // Provided circuits have tree depth = 20
const tree = new Tree(treeDepth, poseidon);
// Get the prover address
const proverAddress = BigInt("0x" + privateToAddress(privKey).toString("hex"));
// Get the prover public key hash
const proverAddress = BigInt("0x...");
// Insert prover address into the tree
// Insert prover public key hash into the tree
tree.insert(proverAddress);
// Insert other members into the tree
for (const member of ["🕵️", "🥷", "👩‍🔬"]) {
const address = BigInt(
"0x" +
privateToAddress(
Buffer.from("".padStart(16, member), "utf16le")
).toString("hex")
tree.insert(
BigInt(
"0x" + Buffer.from("".padStart(16, member), "utf16le").toString("hex")
)
);
tree.insert(address);
}
// Compute the merkle proof
@@ -104,29 +104,61 @@ const index = tree.indexOf(proverAddress);
const merkleProof = tree.createProof(index);
// Init the prover
const prover = new MembershipProver(defaultAddressMembershipConfig);
await prover.initWasm(wasm);
const prover = new MembershipProver(defaultAddressMembershipPConfig);
await prover.initWasm();
const sig = "0x...";
const msgHash = hashPersonalMessage(Buffer.from("harry potter"));
// Prove membership
await prover.prove(sig, msgHash, merkleProof);
const { proof, publicInput } = await prover.prove(sig, msgHash, merkleProof);
// Init verifier
const verifier = new MembershipVerifier(defaultAddressMembershipVConfig);
await verifier.initWasm();
// Verify proof
await verifier.verify(proof, publicInput.serialize());
```
## Circuit downloads
_Provided circuits have Merkle tree depth = 20.
Change in the tree depth doesn't significantly affect the proving time, hence we only provide a single tree depth that is adequate (2^20 ~= 1 million leaves) for most situations._
**Public key membership**
| | |
| --- | --- |
| circuit | https://storage.googleapis.com/personae-proving-keys/membership/pubkey_membership.circuit |
| witnessGenWasm | https://storage.googleapis.com/personae-proving-keys/membership/pubkey_membership.wasm |
**Ethereum address membership**
|||
| --- | --- |
| circuit | https://storage.googleapis.com/personae-proving-keys/membership/addr_membership.circuit |
| witnessGenWasm | https://storage.googleapis.com/personae-proving-keys/membership/addr_membership.wasm |
## Development
### Install dependencies
```
yarn
```
### Run tests
```
yarn jest
```
### Build
```
yarn build
```

View File

@@ -1,7 +0,0 @@
# Copy circuit artifacts into the lib build dir
mkdir -p ./build/circuits/ &&
cp ./src/circuits/* ./build/circuits/ &&
# Copy wasm into the lib build dir
cp ./src/wasm/build/*.wasm ./build/wasm/build/

View File

@@ -0,0 +1,18 @@
import * as fs from "fs";
/**
* Load the wasm file and output a typescript file with the wasm bytes embedded
*/
const embedWasmBytes = async () => {
let wasm = fs.readFileSync("../spartan_wasm/build/spartan_wasm_bg.wasm");
let bytes = new Uint8Array(wasm.buffer);
const file = `
export const wasmBytes = new Uint8Array([${bytes.toString()}]);
`;
fs.writeFileSync("./src/wasm/wasmBytes.ts", file);
};
embedWasmBytes();

View File

@@ -3,6 +3,10 @@ module.exports = {
preset: 'ts-jest',
testEnvironment: 'node',
transform: {
"^.+\\.js?$": "ts-jest"
}
"^.+\\.(ts|js)?$": "ts-jest"
},
moduleNameMapper: {
"@src/(.*)$": "<rootDir>/src/$1",
},
testTimeout: 600000,
};

View File

@@ -1,10 +1,33 @@
{
"name": "spartan-ecdsa",
"version": "1.0.0",
"main": "./build/lib.js",
"name": "@personaelabs/spartan-ecdsa",
"version": "2.3.1",
"description": "Spartan-ecdsa (which to our knowledge) is the fastest open-source method to verify ECDSA (secp256k1) signatures in zero-knowledge.",
"keywords": [
"spartan",
"spartan-ecdsa",
"zk",
"efficient-ecdsa"
],
"author": "Personae Labs",
"main": "./dist/index.js",
"types": "./dist/index.d.ts",
"license": "MIT",
"bugs": {
"url": "https://github.com/personaelabs/spartan-ecdsa/issues/new"
},
"homepage": "https://github.com/personaelabs/spartan-ecdsa",
"publishConfig": {
"access": "public"
},
"files": [
"dist/**/*"
],
"scripts": {
"build": "tsc && sh ./copy_artifacts.sh"
"build": "rm -rf ./dist && yarn embedWasmBytes && tsc --project tsconfig.build.json",
"prepublishOnly": "yarn build",
"prepare": "yarn embedWasmBytes",
"embedWasmBytes": "ts-node ./embedWasmBytes.ts",
"test": "jest"
},
"devDependencies": {
"@types/jest": "^29.2.5",
@@ -17,6 +40,6 @@
"@ethereumjs/util": "^8.0.3",
"@zk-kit/incremental-merkle-tree": "^1.0.0",
"elliptic": "^6.5.4",
"snarkjs": "^0.5.0"
"snarkjs": "^0.7.1"
}
}
}

View File

@@ -1,45 +0,0 @@
import * as path from "path";
const isWeb = typeof window !== "undefined";
import { LeafType, ProverConfig, WasmConfig } from "./types";
export const defaultWasmConfig: WasmConfig = {
pathOrUrl: isWeb
? "https://storage.googleapis.com/personae-proving-keys/spartan_wasm_bg.wasm"
: path.join(__dirname, "wasm/build/spartan_wasm_bg.wasm")
};
// Default configs for MembershipProver
// Default configs for pubkey membership proving
export const defaultPubkeyMembershipConfig: ProverConfig = {
spartanWasm: isWeb
? "https://storage.googleapis.com/personae-proving-keys/spartan_wasm_bg.wasm"
: path.join(__dirname, "wasm/build/spartan_wasm_bg.wasm"),
witnessGenWasm: isWeb
? "https://storage.googleapis.com/personae-proving-keys/membership/pubkey_membership.wasm"
: path.join(__dirname, "circuits/pubkey_membership.wasm"),
circuit: isWeb
? "https://storage.googleapis.com/personae-proving-keys/membership/pubkey_membership.circuit"
: path.join(__dirname, "circuits/pubkey_membership.circuit"),
leafType: LeafType.PubKeyHash
};
// Default configs for address membership proving
export const defaultAddressMembershipConfig: ProverConfig = {
spartanWasm: isWeb
? "https://storage.googleapis.com/personae-proving-keys/spartan_wasm_bg.wasm"
: path.join(__dirname, "wasm/build/spartan_wasm_bg.wasm"),
witnessGenWasm: isWeb
? "https://storage.googleapis.com/personae-proving-keys/membership/addr_membership.wasm"
: path.join(__dirname, "circuits/addr_membership.wasm"),
circuit: isWeb
? "https://storage.googleapis.com/personae-proving-keys/membership/addr_membership.circuit"
: path.join(__dirname, "circuits/addr_membership.circuit"),
leafType: LeafType.Address
};

View File

@@ -0,0 +1,25 @@
import { ProverConfig, VerifyConfig } from "@src/types";
// Default configs for pubkey membership proving/verifying
export const defaultPubkeyProverConfig: ProverConfig = {
witnessGenWasm:
"https://storage.googleapis.com/personae-proving-keys/membership/pubkey_membership.wasm",
circuit:
"https://storage.googleapis.com/personae-proving-keys/membership/pubkey_membership.circuit"
};
export const defaultPubkeyVerifierConfig: VerifyConfig = {
circuit: defaultPubkeyProverConfig.circuit
};
// Default configs for address membership proving/verifyign
export const defaultAddressProverConfig: ProverConfig = {
witnessGenWasm:
"https://storage.googleapis.com/personae-proving-keys/membership/addr_membership.wasm",
circuit:
"https://storage.googleapis.com/personae-proving-keys/membership/addr_membership.circuit"
};
export const defaultAddressVerifierConfig: VerifyConfig = {
circuit: defaultAddressProverConfig.circuit
};

View File

@@ -1,94 +0,0 @@
import { Profiler } from "../helpers/profiler";
import { IProver, MerkleProof, NIZK, ProverConfig, LeafType } from "../types";
import { SpartanWasm } from "../wasm";
import {
bigIntToBytes,
loadCircuit,
fromSig,
snarkJsWitnessGen
} from "../helpers/utils";
import {
EffEcdsaPubInput,
EffEcdsaCircuitPubInput
} from "../helpers/efficient_ecdsa";
/**
* ECDSA Membership Prover
*/
export class MembershipProver extends Profiler implements IProver {
spartanWasm!: SpartanWasm;
circuit: string;
witnessGenWasm: string;
leafType: LeafType;
constructor(options: ProverConfig) {
super({ enabled: options?.enableProfiler });
this.leafType = options.leafType;
this.circuit = options.circuit;
this.witnessGenWasm = options.witnessGenWasm;
}
async initWasm(wasm: SpartanWasm) {
this.spartanWasm = wasm;
this.spartanWasm.init();
}
// @ts-ignore
async prove(
sig: string,
msgHash: Buffer,
merkleProof: MerkleProof
): Promise<NIZK> {
const { r, s, v } = fromSig(sig);
const circuitPubInput = EffEcdsaCircuitPubInput.computeFromSig(
r,
v,
msgHash
);
const effEcdsaPubInput = new EffEcdsaPubInput(
r,
v,
msgHash,
circuitPubInput
);
const merkleRootSer: Uint8Array = bigIntToBytes(merkleProof.root, 32);
const circuitPubInputSer = circuitPubInput.serialize();
// Concatenate circuitPubInputSer and merkleRootSer to construct the full public input
const pubInput = new Uint8Array(
merkleRootSer.length + circuitPubInputSer.length
);
pubInput.set(merkleRootSer);
pubInput.set(circuitPubInputSer, merkleRootSer.length);
const witnessGenInput = {
s,
...merkleProof,
...effEcdsaPubInput.circuitPubInput
};
this.time("Generate witness");
const witness = await snarkJsWitnessGen(
witnessGenInput,
this.witnessGenWasm
);
this.timeEnd("Generate witness");
this.time("Load circuit");
const circuitBin = await loadCircuit(this.circuit);
this.timeEnd("Load circuit");
this.time("Prove");
let proof = await this.spartanWasm.prove(
circuitBin,
witness.data,
pubInput
);
this.timeEnd("Prove");
return { proof, publicInput: effEcdsaPubInput.serialize() };
}
}

View File

@@ -0,0 +1,100 @@
import { Profiler } from "@src/helpers/profiler";
import { IProver, MerkleProof, NIZK, ProveArgs, ProverConfig } from "@src/types";
import { loadCircuit, fromSig, snarkJsWitnessGen } from "@src/helpers/utils";
import {
PublicInput,
computeEffEcdsaPubInput,
CircuitPubInput
} from "@src/helpers/publicInputs";
import { init, wasm } from "@src/wasm";
import {
defaultPubkeyProverConfig,
defaultAddressProverConfig
} from "@src/config";
/**
* ECDSA Membership Prover
*/
export class MembershipProver extends Profiler implements IProver {
circuit: string;
witnessGenWasm: string;
useRemoteCircuit: boolean;
constructor({
enableProfiler,
circuit,
witnessGenWasm,
useRemoteCircuit
}: ProverConfig) {
super({ enabled: enableProfiler });
if (
circuit === defaultPubkeyProverConfig.circuit ||
witnessGenWasm ===
defaultPubkeyProverConfig.witnessGenWasm ||
circuit === defaultAddressProverConfig.circuit ||
witnessGenWasm === defaultAddressProverConfig.witnessGenWasm
) {
console.warn(`
Spartan-ecdsa default config warning:
We recommend using defaultPubkeyMembershipPConfig/defaultPubkeyMembershipVConfig only for testing purposes.
Please host and specify the circuit and witnessGenWasm files on your own server for sovereign control.
Download files: https://github.com/personaelabs/spartan-ecdsa/blob/main/packages/lib/README.md#circuit-downloads
`);
}
this.circuit = circuit;
this.witnessGenWasm = witnessGenWasm;
this.useRemoteCircuit = useRemoteCircuit ?? false;
}
async initWasm() {
await init();
}
async prove({ sig, msgHash, merkleProof }: ProveArgs): Promise<NIZK> {
const { r, s, v } = fromSig(sig);
const effEcdsaPubInput = computeEffEcdsaPubInput(r, v, msgHash);
const circuitPubInput = new CircuitPubInput(
merkleProof.root,
effEcdsaPubInput.Tx,
effEcdsaPubInput.Ty,
effEcdsaPubInput.Ux,
effEcdsaPubInput.Uy
);
const publicInput = new PublicInput(r, v, msgHash, circuitPubInput);
const witnessGenInput = {
s,
...merkleProof,
...effEcdsaPubInput
};
this.time("Generate witness");
const witness = await snarkJsWitnessGen(
witnessGenInput,
this.witnessGenWasm
);
this.timeEnd("Generate witness");
this.time("Load circuit");
const useRemoteCircuit =
this.useRemoteCircuit || typeof window !== "undefined";
const circuitBin = await loadCircuit(this.circuit, useRemoteCircuit);
this.timeEnd("Load circuit");
// Get the public input in bytes
const circuitPublicInput: Uint8Array =
publicInput.circuitPubInput.serialize();
this.time("Prove");
let proof = wasm.prove(circuitBin, witness.data, circuitPublicInput);
this.timeEnd("Prove");
return {
proof,
publicInput
};
}
}

View File

@@ -0,0 +1,71 @@
import {
defaultAddressVerifierConfig,
defaultPubkeyVerifierConfig
} from "@src/config";
import { Profiler } from "@src/helpers/profiler";
import { loadCircuit } from "@src/helpers/utils";
import { IVerifier, VerifyArgs, VerifyConfig } from "@src/types";
import { init, wasm } from "@src/wasm";
import { PublicInput, verifyEffEcdsaPubInput } from "@src/helpers/publicInputs";
/**
* ECDSA Membership Verifier
*/
export class MembershipVerifier extends Profiler implements IVerifier {
circuit: string;
useRemoteCircuit: boolean;
constructor({
circuit,
enableProfiler,
useRemoteCircuit
}: VerifyConfig) {
super({ enabled: enableProfiler });
if (
circuit === defaultAddressVerifierConfig.circuit ||
circuit === defaultPubkeyVerifierConfig.circuit
) {
console.warn(`
Spartan-ecdsa default config warning:
We recommend using defaultPubkeyMembershipPConfig/defaultPubkeyMembershipVConfig only for testing purposes.
Please host and specify the circuit and witnessGenWasm files on your own server for sovereign control.
Download files: https://github.com/personaelabs/spartan-ecdsa/blob/main/packages/lib/README.md#circuit-downloads
`);
}
this.circuit = circuit;
this.useRemoteCircuit =
useRemoteCircuit || typeof window !== "undefined";
}
async initWasm() {
await init();
}
async verify({ proof, publicInputSer }: VerifyArgs): Promise<boolean> {
this.time("Load circuit");
const circuitBin = await loadCircuit(this.circuit, this.useRemoteCircuit);
this.timeEnd("Load circuit");
this.time("Verify public input");
const publicInput = PublicInput.deserialize(publicInputSer);
const isPubInputValid = verifyEffEcdsaPubInput(publicInput);
this.timeEnd("Verify public input");
this.time("Verify proof");
let isProofValid;
try {
isProofValid = await wasm.verify(
circuitBin,
proof,
publicInput.circuitPubInput.serialize()
);
} catch (_e) {
isProofValid = false;
}
this.timeEnd("Verify proof");
return isProofValid && isPubInputValid;
}
}

View File

@@ -1,153 +0,0 @@
var EC = require("elliptic").ec;
const BN = require("bn.js");
import { bytesToBigInt, bigIntToBytes } from "./utils";
const ec = new EC("secp256k1");
const SECP256K1_N = new BN(
"fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141",
16
);
/**
* Public inputs that are passed into the efficient ECDSA circuit
* This doesn't include the other public values, which are the group element R and the msgHash.
*/
export class EffEcdsaCircuitPubInput {
Tx: bigint;
Ty: bigint;
Ux: bigint;
Uy: bigint;
constructor(Tx: bigint, Ty: bigint, Ux: bigint, Uy: bigint) {
this.Tx = Tx;
this.Ty = Ty;
this.Ux = Ux;
this.Uy = Uy;
}
static computeFromSig(
r: bigint,
v: bigint,
msgHash: Buffer
): EffEcdsaCircuitPubInput {
const isYOdd = (v - BigInt(27)) % BigInt(2);
const rPoint = ec.keyFromPublic(
ec.curve.pointFromX(new BN(r), isYOdd).encode("hex"),
"hex"
);
// Get the group element: -(m * r^1 * G)
const rInv = new BN(r).invm(SECP256K1_N);
// w = -(r^-1 * msg)
const w = rInv.mul(new BN(msgHash)).neg().umod(SECP256K1_N);
// U = -(w * G) = -(r^-1 * msg * G)
const U = ec.curve.g.mul(w);
// T = r^-1 * R
const T = rPoint.getPublic().mul(rInv);
return new EffEcdsaCircuitPubInput(
BigInt(T.getX().toString()),
BigInt(T.getY().toString()),
BigInt(U.getX().toString()),
BigInt(U.getY().toString())
);
}
serialize(): Uint8Array {
let serialized = new Uint8Array(32 * 4);
serialized.set(bigIntToBytes(this.Tx, 32), 0);
serialized.set(bigIntToBytes(this.Ty, 32), 32);
serialized.set(bigIntToBytes(this.Ux, 32), 64);
serialized.set(bigIntToBytes(this.Uy, 32), 96);
return serialized;
}
}
/**
* Public values of efficient ECDSA
*/
export class EffEcdsaPubInput {
r: bigint;
rV: bigint;
msgHash: Buffer;
circuitPubInput: EffEcdsaCircuitPubInput;
constructor(
r: bigint,
v: bigint,
msgHash: Buffer,
circuitPubInput: EffEcdsaCircuitPubInput
) {
this.r = r;
this.rV = v;
this.msgHash = msgHash;
this.circuitPubInput = circuitPubInput;
}
/**
* Serialize the public input into a Uint8Array
* @returns the serialized public input
*/
serialize(): Uint8Array {
let serialized = new Uint8Array(32 * 6 + 1);
serialized.set(bigIntToBytes(this.r, 32), 0);
serialized.set(bigIntToBytes(this.rV, 1), 32);
serialized.set(this.msgHash, 33);
serialized.set(bigIntToBytes(this.circuitPubInput.Tx, 32), 65);
serialized.set(bigIntToBytes(this.circuitPubInput.Ty, 32), 97);
serialized.set(bigIntToBytes(this.circuitPubInput.Ux, 32), 129);
serialized.set(bigIntToBytes(this.circuitPubInput.Uy, 32), 161);
return serialized;
}
/**
* Instantiate EffEcdsaPubInput from a serialized Uint8Array
* @param serialized Uint8Array serialized by the serialize() function
* @returns EffEcdsaPubInput
*/
static deserialize(serialized: Uint8Array): EffEcdsaPubInput {
const r = bytesToBigInt(serialized.slice(0, 32));
const rV = bytesToBigInt(serialized.slice(32, 33));
const msg = serialized.slice(33, 65);
const Tx = bytesToBigInt(serialized.slice(65, 97));
const Ty = bytesToBigInt(serialized.slice(97, 129));
const Ux = bytesToBigInt(serialized.slice(129, 161));
const Uy = bytesToBigInt(serialized.slice(161, 193));
return new EffEcdsaPubInput(
r,
rV,
Buffer.from(msg),
new EffEcdsaCircuitPubInput(Tx, Ty, Ux, Uy)
);
}
}
/**
* Verify the public values of the efficient ECDSA circuit
*/
export const verifyEffEcdsaPubInput = (pubInput: EffEcdsaPubInput): boolean => {
const expectedCircuitInput = EffEcdsaCircuitPubInput.computeFromSig(
pubInput.r,
pubInput.rV,
pubInput.msgHash
);
const circuitPubInput = pubInput.circuitPubInput;
const isValid =
expectedCircuitInput.Tx === circuitPubInput.Tx &&
expectedCircuitInput.Ty === circuitPubInput.Ty &&
expectedCircuitInput.Ux === circuitPubInput.Ux &&
expectedCircuitInput.Uy === circuitPubInput.Uy;
return isValid;
};

View File

@@ -1,25 +1,21 @@
import { SpartanWasm } from "../wasm";
import { init, wasm } from "@src/wasm";
import { bigIntToLeBytes, bytesLeToBigInt } from "./utils";
export class Poseidon {
wasm!: SpartanWasm;
constructor() {}
async initWasm(wasm: SpartanWasm) {
this.wasm = wasm;
await this.wasm.init();
}
hash(inputs: bigint[]): bigint {
const inputsBytes = new Uint8Array(32 * inputs.length);
for (let i = 0; i < inputs.length; i++) {
inputsBytes.set(bigIntToLeBytes(inputs[i], 32), i * 32);
}
const result = this.wasm.poseidon(inputsBytes);
const result = wasm.poseidon(inputsBytes);
return bytesLeToBigInt(result);
}
async initWasm() {
await init();
}
hashPubKey(pubKey: Buffer): bigint {
const pubKeyX = BigInt("0x" + pubKey.toString("hex").slice(0, 64));
const pubKeyY = BigInt("0x" + pubKey.toString("hex").slice(64, 128));

View File

@@ -0,0 +1,166 @@
var EC = require("elliptic").ec;
const BN = require("bn.js");
import { EffECDSAPubInput } from "@src/types";
import { bytesToBigInt, bigIntToBytes } from "./utils";
const ec = new EC("secp256k1");
const SECP256K1_N = new BN(
"fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141",
16
);
/**
* Public inputs that are passed into the membership circuit
* This doesn't include the public values that aren't passed into the circuit,
* which are the group element R and the msgHash.
*/
export class CircuitPubInput {
merkleRoot: bigint;
Tx: bigint;
Ty: bigint;
Ux: bigint;
Uy: bigint;
constructor(
merkleRoot: bigint,
Tx: bigint,
Ty: bigint,
Ux: bigint,
Uy: bigint
) {
this.merkleRoot = merkleRoot;
this.Tx = Tx;
this.Ty = Ty;
this.Ux = Ux;
this.Uy = Uy;
}
serialize(): Uint8Array {
let serialized = new Uint8Array(32 * 5);
serialized.set(bigIntToBytes(this.merkleRoot, 32), 0);
serialized.set(bigIntToBytes(this.Tx, 32), 32);
serialized.set(bigIntToBytes(this.Ty, 32), 64);
serialized.set(bigIntToBytes(this.Ux, 32), 96);
serialized.set(bigIntToBytes(this.Uy, 32), 128);
return serialized;
}
static deserialize(serialized: Uint8Array): CircuitPubInput {
const merkleRoot = bytesToBigInt(serialized.slice(0, 32));
const Tx = bytesToBigInt(serialized.slice(32, 64));
const Ty = bytesToBigInt(serialized.slice(64, 96));
const Ux = bytesToBigInt(serialized.slice(96, 128));
const Uy = bytesToBigInt(serialized.slice(128, 160));
return new CircuitPubInput(merkleRoot, Tx, Ty, Ux, Uy);
}
}
/**
* Public values of the membership circuit
*/
export class PublicInput {
r: bigint;
rV: bigint;
msgHash: Buffer;
circuitPubInput: CircuitPubInput;
constructor(
r: bigint,
v: bigint,
msgHash: Buffer,
circuitPubInput: CircuitPubInput
) {
this.r = r;
this.rV = v;
this.msgHash = msgHash;
this.circuitPubInput = circuitPubInput;
}
serialize(): Uint8Array {
const circuitPubInput: Uint8Array = this.circuitPubInput.serialize();
let serialized = new Uint8Array(
32 + 1 + this.msgHash.byteLength + circuitPubInput.byteLength
);
serialized.set(bigIntToBytes(this.r, 32), 0);
serialized.set(bigIntToBytes(this.rV, 1), 32);
serialized.set(circuitPubInput, 33);
serialized.set(this.msgHash, 33 + circuitPubInput.byteLength);
return serialized;
}
static deserialize(serialized: Uint8Array): PublicInput {
const r = bytesToBigInt(serialized.slice(0, 32));
const rV = bytesToBigInt(serialized.slice(32, 33));
const circuitPubInput: CircuitPubInput = CircuitPubInput.deserialize(
serialized.slice(32 + 1, 32 + 1 + 32 * 5)
);
const msgHash = serialized.slice(32 + 1 + 32 * 5);
return new PublicInput(r, rV, Buffer.from(msgHash), circuitPubInput);
}
}
/**
* Compute the group elements T and U for efficient ecdsa
* https://personaelabs.org/posts/efficient-ecdsa-1/
*/
export const computeEffEcdsaPubInput = (
r: bigint,
v: bigint,
msgHash: Buffer
): EffECDSAPubInput => {
const isYOdd = (v - BigInt(27)) % BigInt(2);
const rPoint = ec.keyFromPublic(
ec.curve.pointFromX(new BN(r), isYOdd).encode("hex"),
"hex"
);
// Get the group element: -(m * r^1 * G)
const rInv = new BN(r).invm(SECP256K1_N);
// w = -(r^-1 * msg)
const w = rInv.mul(new BN(msgHash)).neg().umod(SECP256K1_N);
// U = -(w * G) = -(r^-1 * msg * G)
const U = ec.curve.g.mul(w);
// T = r^-1 * R
const T = rPoint.getPublic().mul(rInv);
return {
Tx: BigInt(T.getX().toString()),
Ty: BigInt(T.getY().toString()),
Ux: BigInt(U.getX().toString()),
Uy: BigInt(U.getY().toString())
};
};
/**
* Verify the public values of the efficient ECDSA circuit
*/
export const verifyEffEcdsaPubInput = ({
r,
rV,
msgHash,
circuitPubInput
}: PublicInput): boolean => {
const expectedCircuitInput = computeEffEcdsaPubInput(
r,
rV,
msgHash
);
const isValid =
expectedCircuitInput.Tx === circuitPubInput.Tx &&
expectedCircuitInput.Ty === circuitPubInput.Ty &&
expectedCircuitInput.Ux === circuitPubInput.Ux &&
expectedCircuitInput.Uy === circuitPubInput.Uy;
return isValid;
};

View File

@@ -1,7 +1,6 @@
import { IncrementalMerkleTree } from "@zk-kit/incremental-merkle-tree";
import { Poseidon } from "./poseidon";
import { MerkleProof } from "../types";
import { bytesToBigInt } from "./utils";
export class Tree {
depth: number;
@@ -20,6 +19,14 @@ export class Tree {
this.treeInner.insert(leaf);
}
delete(index: number) {
this.treeInner.delete(index);
}
leaves(): bigint[] {
return this.treeInner.leaves;
}
root(): bigint {
return this.treeInner.root;
}
@@ -30,17 +37,14 @@ export class Tree {
createProof(index: number): MerkleProof {
const proof = this.treeInner.createProof(index);
const siblings = proof.siblings.map(s =>
typeof s[0] === "bigint" ? s : bytesToBigInt(s[0])
);
return {
siblings,
siblings: proof.siblings,
pathIndices: proof.pathIndices,
root: proof.root
};
}
// TODO: Add more functions that expose the IncrementalMerkleTree API
verifyProof(proof: MerkleProof, leaf: bigint): boolean {
return this.treeInner.verifyProof({ ...proof, leaf });
}
}

View File

@@ -18,8 +18,11 @@ export const snarkJsWitnessGen = async (input: any, wasmFile: string) => {
/**
* Load a circuit from a file or URL
*/
export const loadCircuit = async (pathOrUrl: string): Promise<Uint8Array> => {
if (pathOrUrl.startsWith("http")) {
export const loadCircuit = async (
pathOrUrl: string,
useRemoteCircuit: boolean
): Promise<Uint8Array> => {
if (useRemoteCircuit) {
return await fetchCircuit(pathOrUrl);
} else {
return await readCircuitFromFs(pathOrUrl);

View File

@@ -0,0 +1,8 @@
export { MembershipProver } from "@src/core/prover";
export { MembershipVerifier } from "@src/core/verifier";
export { CircuitPubInput, PublicInput, computeEffEcdsaPubInput, verifyEffEcdsaPubInput } from "@src/helpers/publicInputs";
export { Tree } from "@src/helpers/tree";
export { Poseidon } from "@src/helpers/poseidon";
export { init, wasm } from "@src/wasm/index";
export { defaultPubkeyProverConfig as defaultPubkeyMembershipPConfig, defaultPubkeyVerifierConfig as defaultPubkeyMembershipVConfig, defaultAddressProverConfig as defaultAddressMembershipPConfig, defaultAddressVerifierConfig as defaultAddressMembershipVConfig } from "@src/config";
export type { MerkleProof, EffECDSAPubInput, NIZK, ProverConfig, VerifyConfig, IProver, IVerifier } from "@src/types";

View File

@@ -1,7 +0,0 @@
export * from "./types";
export * from "./helpers/efficient_ecdsa";
export * from "./core/membership_prover";
export * from "./helpers/tree";
export * from "./helpers/poseidon";
export * from "./wasm/index";
export * from "./config";

View File

@@ -1,58 +0,0 @@
// The same structure as MerkleProof in @zk-kit/incremental-merkle-tree.
// Not directly using MerkleProof defined in @zk-kit/incremental-merkle-tree so
// library users can choose whatever merkle tree management method they want.
import { SpartanWasm } from "./wasm";
export interface MerkleProof {
root: bigint;
siblings: bigint[];
pathIndices: number[];
}
export interface NIZK {
proof: Uint8Array;
publicInput: Uint8Array;
}
export interface ProverConfig {
proverWasm?: string;
witnessGenWasm: string;
circuit: string;
spartanWasm: string;
enableProfiler?: boolean;
leafType: LeafType;
}
export interface WasmConfig {
pathOrUrl: string;
}
export interface VerifyOptions {
circuit?: string; // Path to circuit file compiled by Nova-Scotia
spartanWasm?: string; // Path to spartan wasm file
enableProfiler?: boolean;
}
export interface IProver {
spartanWasm: SpartanWasm;
circuit: string; // Path to circuit file compiled by Nova-Scotia
witnessGenWasm: string; // Path to witness generator wasm file generated by Circom
prove(...args: any): Promise<NIZK>;
}
export interface IVerifier {
spartanWasm: SpartanWasm; // Path to spartan wasm file
circuit: string; // Path to circuit file compiled by Nova-Scotia
verify(proof: Uint8Array, publicInput: Uint8Array): Promise<boolean>;
}
export interface SpartanWasmOptions {
spartanWasm: string;
}
export enum LeafType {
PubKeyHash,
Address
}

View File

@@ -0,0 +1,58 @@
import { PublicInput } from "@src/helpers/publicInputs";
// The same structure as MerkleProof in @zk-kit/incremental-merkle-tree.
// Not directly using MerkleProof defined in @zk-kit/incremental-merkle-tree so
// library users can choose whatever merkle tree management method they want.
export interface MerkleProof {
root: bigint;
siblings: [bigint][];
pathIndices: number[];
}
export interface EffECDSAPubInput {
Tx: bigint;
Ty: bigint;
Ux: bigint;
Uy: bigint;
}
export interface NIZK {
proof: Uint8Array;
publicInput: PublicInput;
}
export interface ProverConfig {
witnessGenWasm: string;
circuit: string;
enableProfiler?: boolean;
useRemoteCircuit?: boolean;
}
export interface ProveArgs {
sig: string;
msgHash: Buffer,
merkleProof: MerkleProof;
}
export interface VerifyArgs {
proof: Uint8Array,
publicInputSer: Uint8Array
}
export interface VerifyConfig {
circuit: string; // Path to circuit file compiled by Nova-Scotia
enableProfiler?: boolean;
useRemoteCircuit?: boolean;
}
export interface IProver {
circuit: string; // Path to circuit file compiled by Nova-Scotia
witnessGenWasm: string; // Path to witness generator wasm file generated by Circom
prove({ sig, msgHash, merkleProof }: ProveArgs): Promise<NIZK>;
}
export interface IVerifier {
circuit: string; // Path to circuit file compiled by Nova-Scotia
verify({ proof, publicInputSer }: VerifyArgs): Promise<boolean>;
}

View File

@@ -1,46 +1,10 @@
import * as wasm from "./wasm";
import _initWeb from "./wasm.js";
import fs from "fs";
import path from "path";
import { WasmConfig } from "../types";
// TODO: Rename this to just Wasm since it includes not only Spartan but also Poseidon
export class SpartanWasm {
private spartanWasmPathOrUrl: any;
import { wasmBytes } from "./wasmBytes";
constructor(config: WasmConfig) {
this.spartanWasmPathOrUrl = config.pathOrUrl;
}
export const init = async () => {
await wasm.initSync(wasmBytes.buffer);
wasm.init_panic_hook();
};
async init() {
if (typeof window === "undefined") {
await this.initNode();
} else {
await this.initWeb();
}
}
prove(circuit: Uint8Array, vars: Uint8Array, public_inputs: Uint8Array) {
return wasm.prove(circuit, vars, public_inputs);
}
verify(circuit: Uint8Array, vars: Uint8Array, public_inputs: Uint8Array) {
return wasm.verify(circuit, vars, public_inputs);
}
poseidon(inputs: Uint8Array) {
return wasm.poseidon(inputs);
}
private async initNode() {
const bytes = fs.readFileSync(this.spartanWasmPathOrUrl);
await wasm.initSync(bytes);
await wasm.init_panic_hook();
}
private async initWeb() {
await _initWeb(this.spartanWasmPathOrUrl);
await wasm.init_panic_hook();
}
}
export { wasm };

View File

@@ -26,18 +26,16 @@ export function poseidon(input_bytes: Uint8Array): Uint8Array;
export type InitInput = RequestInfo | URL | Response | BufferSource | WebAssembly.Module;
export interface InitOutput {
readonly memory: WebAssembly.Memory;
readonly prove: (a: number, b: number, c: number, d: number, e: number, f: number, g: number) => void;
readonly verify: (a: number, b: number, c: number, d: number, e: number, f: number, g: number) => void;
readonly poseidon: (a: number, b: number, c: number) => void;
readonly init_panic_hook: () => void;
readonly memory: WebAssembly.Memory;
readonly __wbindgen_add_to_stack_pointer: (a: number) => number;
readonly __wbindgen_malloc: (a: number) => number;
readonly __wbindgen_free: (a: number, b: number) => void;
readonly __wbindgen_exn_store: (a: number) => void;
readonly __wbindgen_realloc: (a: number, b: number, c: number) => number;
readonly __wbindgen_thread_destroy: () => void;
readonly __wbindgen_start: () => void;
}
export type SyncInitInput = BufferSource | WebAssembly.Module;
@@ -46,12 +44,10 @@ export type SyncInitInput = BufferSource | WebAssembly.Module;
* a precompiled `WebAssembly.Module`.
*
* @param {SyncInitInput} module
* @param {WebAssembly.Memory} maybe_memory
*
* @returns {InitOutput}
*/
export function initSync(module: SyncInitInput, maybe_memory?: WebAssembly.Memory): InitOutput;
export function initSync(module: SyncInitInput): InitOutput;
/**
* If `module_or_path` is {RequestInfo} or {URL}, makes a request and
* for everything else, calls `WebAssembly.instantiate` directly.
@@ -61,4 +57,4 @@ export function initSync(module: SyncInitInput, maybe_memory?: WebAssembly.Memor
*
* @returns {Promise<InitOutput>}
*/
export default function init (module_or_path?: InitInput | Promise<InitInput>, maybe_memory?: WebAssembly.Memory): Promise<InitOutput>;
export default function __wbg_init (module_or_path?: InitInput | Promise<InitInput>, maybe_memory?: WebAssembly.Memory): Promise<InitOutput>;

View File

@@ -1,7 +1,6 @@
let wasm;
const heap = new Array(32).fill(undefined);
const heap = new Array(128).fill(undefined);
heap.push(undefined, null, true, false);
@@ -10,7 +9,7 @@ function getObject(idx) { return heap[idx]; }
let heap_next = heap.length;
function dropObject(idx) {
if (idx < 36) return;
if (idx < 132) return;
heap[idx] = heap_next;
heap_next = idx;
}
@@ -21,21 +20,22 @@ function takeObject(idx) {
return ret;
}
const cachedTextDecoder = new TextDecoder('utf-8', { ignoreBOM: true, fatal: true });
const cachedTextDecoder = (typeof TextDecoder !== 'undefined' ? new TextDecoder('utf-8', { ignoreBOM: true, fatal: true }) : { decode: () => { throw Error('TextDecoder not available') } } );
cachedTextDecoder.decode();
if (typeof TextDecoder !== 'undefined') { cachedTextDecoder.decode(); };
let cachedUint8Memory0 = new Uint8Array();
let cachedUint8Memory0 = null;
function getUint8Memory0() {
if (cachedUint8Memory0.buffer !== wasm.memory.buffer) {
if (cachedUint8Memory0 === null || cachedUint8Memory0.byteLength === 0) {
cachedUint8Memory0 = new Uint8Array(wasm.memory.buffer);
}
return cachedUint8Memory0;
}
function getStringFromWasm0(ptr, len) {
return cachedTextDecoder.decode(getUint8Memory0().slice(ptr, ptr + len));
ptr = ptr >>> 0;
return cachedTextDecoder.decode(getUint8Memory0().subarray(ptr, ptr + len));
}
function addHeapObject(obj) {
@@ -55,22 +55,23 @@ export function init_panic_hook() {
let WASM_VECTOR_LEN = 0;
function passArray8ToWasm0(arg, malloc) {
const ptr = malloc(arg.length * 1);
const ptr = malloc(arg.length * 1) >>> 0;
getUint8Memory0().set(arg, ptr / 1);
WASM_VECTOR_LEN = arg.length;
return ptr;
}
let cachedInt32Memory0 = new Int32Array();
let cachedInt32Memory0 = null;
function getInt32Memory0() {
if (cachedInt32Memory0.buffer !== wasm.memory.buffer) {
if (cachedInt32Memory0 === null || cachedInt32Memory0.byteLength === 0) {
cachedInt32Memory0 = new Int32Array(wasm.memory.buffer);
}
return cachedInt32Memory0;
}
function getArrayU8FromWasm0(ptr, len) {
ptr = ptr >>> 0;
return getUint8Memory0().subarray(ptr / 1, ptr / 1 + len);
}
/**
@@ -96,9 +97,9 @@ export function prove(circuit, vars, public_inputs) {
if (r3) {
throw takeObject(r2);
}
var v3 = getArrayU8FromWasm0(r0, r1).slice();
var v4 = getArrayU8FromWasm0(r0, r1).slice();
wasm.__wbindgen_free(r0, r1 * 1);
return v3;
return v4;
} finally {
wasm.__wbindgen_add_to_stack_pointer(16);
}
@@ -149,9 +150,9 @@ export function poseidon(input_bytes) {
if (r3) {
throw takeObject(r2);
}
var v1 = getArrayU8FromWasm0(r0, r1).slice();
var v2 = getArrayU8FromWasm0(r0, r1).slice();
wasm.__wbindgen_free(r0, r1 * 1);
return v1;
return v2;
} finally {
wasm.__wbindgen_add_to_stack_pointer(16);
}
@@ -165,29 +166,33 @@ function handleError(f, args) {
}
}
const cachedTextEncoder = new TextEncoder('utf-8');
const cachedTextEncoder = (typeof TextEncoder !== 'undefined' ? new TextEncoder('utf-8') : { encode: () => { throw Error('TextEncoder not available') } } );
const encodeString = function (arg, view) {
const encodeString = (typeof cachedTextEncoder.encodeInto === 'function'
? function (arg, view) {
return cachedTextEncoder.encodeInto(arg, view);
}
: function (arg, view) {
const buf = cachedTextEncoder.encode(arg);
view.set(buf);
return {
read: arg.length,
written: buf.length
};
};
});
function passStringToWasm0(arg, malloc, realloc) {
if (realloc === undefined) {
const buf = cachedTextEncoder.encode(arg);
const ptr = malloc(buf.length);
const ptr = malloc(buf.length) >>> 0;
getUint8Memory0().subarray(ptr, ptr + buf.length).set(buf);
WASM_VECTOR_LEN = buf.length;
return ptr;
}
let len = arg.length;
let ptr = malloc(len);
let ptr = malloc(len) >>> 0;
const mem = getUint8Memory0();
@@ -203,7 +208,7 @@ function passStringToWasm0(arg, malloc, realloc) {
if (offset !== 0) {
arg = arg.slice(offset);
}
ptr = realloc(ptr, len, len = offset + arg.length * 3);
ptr = realloc(ptr, len, len = offset + arg.length * 3) >>> 0;
const view = getUint8Memory0().subarray(ptr + offset, ptr + len);
const ret = encodeString(arg, view);
@@ -214,7 +219,7 @@ function passStringToWasm0(arg, malloc, realloc) {
return ptr;
}
async function load(module, imports) {
async function __wbg_load(module, imports) {
if (typeof Response === 'function' && module instanceof Response) {
if (typeof WebAssembly.instantiateStreaming === 'function') {
try {
@@ -245,10 +250,10 @@ async function load(module, imports) {
}
}
function getImports() {
function __wbg_get_imports() {
const imports = {};
imports.wbg = {};
imports.wbg.__wbg_crypto_e1d53a1d73fb10b8 = function(arg0) {
imports.wbg.__wbg_crypto_70a96de3b6b73dac = function(arg0) {
const ret = getObject(arg0).crypto;
return addHeapObject(ret);
};
@@ -257,15 +262,15 @@ function getImports() {
const ret = typeof(val) === 'object' && val !== null;
return ret;
};
imports.wbg.__wbg_process_038c26bf42b093f8 = function(arg0) {
imports.wbg.__wbg_process_dd1577445152112e = function(arg0) {
const ret = getObject(arg0).process;
return addHeapObject(ret);
};
imports.wbg.__wbg_versions_ab37218d2f0b24a8 = function(arg0) {
imports.wbg.__wbg_versions_58036bec3add9e6f = function(arg0) {
const ret = getObject(arg0).versions;
return addHeapObject(ret);
};
imports.wbg.__wbg_node_080f4b19d15bc1fe = function(arg0) {
imports.wbg.__wbg_node_6a9d28205ed5b0d8 = function(arg0) {
const ret = getObject(arg0).node;
return addHeapObject(ret);
};
@@ -276,11 +281,11 @@ function getImports() {
imports.wbg.__wbindgen_object_drop_ref = function(arg0) {
takeObject(arg0);
};
imports.wbg.__wbg_msCrypto_6e7d3e1f92610cbb = function(arg0) {
imports.wbg.__wbg_msCrypto_adbc770ec9eca9c7 = function(arg0) {
const ret = getObject(arg0).msCrypto;
return addHeapObject(ret);
};
imports.wbg.__wbg_require_78a3dcfbdba9cbce = function() { return handleError(function () {
imports.wbg.__wbg_require_f05d779769764e82 = function() { return handleError(function () {
const ret = module.require;
return addHeapObject(ret);
}, arguments) };
@@ -292,17 +297,17 @@ function getImports() {
const ret = getStringFromWasm0(arg0, arg1);
return addHeapObject(ret);
};
imports.wbg.__wbg_getRandomValues_805f1c3d65988a5a = function() { return handleError(function (arg0, arg1) {
imports.wbg.__wbg_getRandomValues_3774744e221a22ad = function() { return handleError(function (arg0, arg1) {
getObject(arg0).getRandomValues(getObject(arg1));
}, arguments) };
imports.wbg.__wbg_randomFillSync_6894564c2c334c42 = function() { return handleError(function (arg0, arg1, arg2) {
getObject(arg0).randomFillSync(getArrayU8FromWasm0(arg1, arg2));
imports.wbg.__wbg_randomFillSync_e950366c42764a07 = function() { return handleError(function (arg0, arg1) {
getObject(arg0).randomFillSync(takeObject(arg1));
}, arguments) };
imports.wbg.__wbg_newnoargs_b5b063fc6c2f0376 = function(arg0, arg1) {
imports.wbg.__wbg_newnoargs_e643855c6572a4a8 = function(arg0, arg1) {
const ret = new Function(getStringFromWasm0(arg0, arg1));
return addHeapObject(ret);
};
imports.wbg.__wbg_call_97ae9d8645dc388b = function() { return handleError(function (arg0, arg1) {
imports.wbg.__wbg_call_f96b398515635514 = function() { return handleError(function (arg0, arg1) {
const ret = getObject(arg0).call(getObject(arg1));
return addHeapObject(ret);
}, arguments) };
@@ -310,19 +315,19 @@ function getImports() {
const ret = getObject(arg0);
return addHeapObject(ret);
};
imports.wbg.__wbg_self_6d479506f72c6a71 = function() { return handleError(function () {
imports.wbg.__wbg_self_b9aad7f1c618bfaf = function() { return handleError(function () {
const ret = self.self;
return addHeapObject(ret);
}, arguments) };
imports.wbg.__wbg_window_f2557cc78490aceb = function() { return handleError(function () {
imports.wbg.__wbg_window_55e469842c98b086 = function() { return handleError(function () {
const ret = window.window;
return addHeapObject(ret);
}, arguments) };
imports.wbg.__wbg_globalThis_7f206bda628d5286 = function() { return handleError(function () {
imports.wbg.__wbg_globalThis_d0957e302752547e = function() { return handleError(function () {
const ret = globalThis.globalThis;
return addHeapObject(ret);
}, arguments) };
imports.wbg.__wbg_global_ba75c50d1cf384f4 = function() { return handleError(function () {
imports.wbg.__wbg_global_ae2f87312b8987fb = function() { return handleError(function () {
const ret = global.global;
return addHeapObject(ret);
}, arguments) };
@@ -330,30 +335,30 @@ function getImports() {
const ret = getObject(arg0) === undefined;
return ret;
};
imports.wbg.__wbg_call_168da88779e35f61 = function() { return handleError(function (arg0, arg1, arg2) {
imports.wbg.__wbg_call_35782e9a1aa5e091 = function() { return handleError(function (arg0, arg1, arg2) {
const ret = getObject(arg0).call(getObject(arg1), getObject(arg2));
return addHeapObject(ret);
}, arguments) };
imports.wbg.__wbg_buffer_3f3d764d4747d564 = function(arg0) {
imports.wbg.__wbg_buffer_fcbfb6d88b2732e9 = function(arg0) {
const ret = getObject(arg0).buffer;
return addHeapObject(ret);
};
imports.wbg.__wbg_new_8c3f0052272a457a = function(arg0) {
imports.wbg.__wbg_newwithbyteoffsetandlength_92c251989c485785 = function(arg0, arg1, arg2) {
const ret = new Uint8Array(getObject(arg0), arg1 >>> 0, arg2 >>> 0);
return addHeapObject(ret);
};
imports.wbg.__wbg_new_bc5d9aad3f9ac80e = function(arg0) {
const ret = new Uint8Array(getObject(arg0));
return addHeapObject(ret);
};
imports.wbg.__wbg_set_83db9690f9353e79 = function(arg0, arg1, arg2) {
imports.wbg.__wbg_set_4b3aa8445ac1e91c = function(arg0, arg1, arg2) {
getObject(arg0).set(getObject(arg1), arg2 >>> 0);
};
imports.wbg.__wbg_length_9e1ae1900cb0fbd5 = function(arg0) {
const ret = getObject(arg0).length;
return ret;
};
imports.wbg.__wbg_newwithlength_f5933855e4f48a19 = function(arg0) {
imports.wbg.__wbg_newwithlength_89eca18f2603a999 = function(arg0) {
const ret = new Uint8Array(arg0 >>> 0);
return addHeapObject(ret);
};
imports.wbg.__wbg_subarray_58ad4efbb5bcb886 = function(arg0, arg1, arg2) {
imports.wbg.__wbg_subarray_7649d027b2b141b3 = function(arg0, arg1, arg2) {
const ret = getObject(arg0).subarray(arg1 >>> 0, arg2 >>> 0);
return addHeapObject(ret);
};
@@ -363,16 +368,20 @@ function getImports() {
};
imports.wbg.__wbg_stack_658279fe44541cf6 = function(arg0, arg1) {
const ret = getObject(arg1).stack;
const ptr0 = passStringToWasm0(ret, wasm.__wbindgen_malloc, wasm.__wbindgen_realloc);
const len0 = WASM_VECTOR_LEN;
getInt32Memory0()[arg0 / 4 + 1] = len0;
getInt32Memory0()[arg0 / 4 + 0] = ptr0;
const ptr1 = passStringToWasm0(ret, wasm.__wbindgen_malloc, wasm.__wbindgen_realloc);
const len1 = WASM_VECTOR_LEN;
getInt32Memory0()[arg0 / 4 + 1] = len1;
getInt32Memory0()[arg0 / 4 + 0] = ptr1;
};
imports.wbg.__wbg_error_f851667af71bcfc6 = function(arg0, arg1) {
let deferred0_0;
let deferred0_1;
try {
deferred0_0 = arg0;
deferred0_1 = arg1;
console.error(getStringFromWasm0(arg0, arg1));
} finally {
wasm.__wbindgen_free(arg0, arg1);
wasm.__wbindgen_free(deferred0_0, deferred0_1);
}
};
imports.wbg.__wbindgen_throw = function(arg0, arg1) {
@@ -386,52 +395,60 @@ function getImports() {
return imports;
}
function initMemory(imports, maybe_memory) {
imports.wbg.memory = maybe_memory || new WebAssembly.Memory({initial:18,maximum:65536,shared:true});
function __wbg_init_memory(imports, maybe_memory) {
}
function finalizeInit(instance, module) {
function __wbg_finalize_init(instance, module) {
wasm = instance.exports;
init.__wbindgen_wasm_module = module;
cachedInt32Memory0 = new Int32Array();
cachedUint8Memory0 = new Uint8Array();
__wbg_init.__wbindgen_wasm_module = module;
cachedInt32Memory0 = null;
cachedUint8Memory0 = null;
wasm.__wbindgen_start();
return wasm;
}
function initSync(module, maybe_memory) {
const imports = getImports();
async function initSync(module, maybe_memory) {
if (wasm !== undefined) return wasm;
initMemory(imports, maybe_memory);
const imports = __wbg_get_imports();
__wbg_init_memory(imports, maybe_memory);
/*
if (!(module instanceof WebAssembly.Module)) {
module = new WebAssembly.Module(module);
}
*/
const compiled = WebAssembly.compile(module);
const instance = new WebAssembly.Instance(module, imports);
const instance = await WebAssembly.instantiate(await compiled, imports);
return finalizeInit(instance, module);
return __wbg_finalize_init(instance, module);
}
async function init(input, maybe_memory) {
async function __wbg_init(input, maybe_memory) {
if (wasm !== undefined) return wasm;
/*
if (typeof input === 'undefined') {
input = new URL('spartan_wasm_bg.wasm', import.meta.url);
}
*/
const imports = getImports();
const imports = __wbg_get_imports();
if (typeof input === 'string' || (typeof Request === 'function' && input instanceof Request) || (typeof URL === 'function' && input instanceof URL)) {
input = fetch(input);
}
initMemory(imports, maybe_memory);
__wbg_init_memory(imports, maybe_memory);
const { instance, module } = await load(await input, imports);
const { instance, module } = await __wbg_load(await input, imports);
return finalizeInit(instance, module);
return __wbg_finalize_init(instance, module);
}
export { initSync }
export default init;
export default __wbg_init;

View File

@@ -1,11 +1,12 @@
import {
EffEcdsaCircuitPubInput,
EffEcdsaPubInput,
verifyEffEcdsaPubInput
} from "../src/helpers/efficient_ecdsa";
import { hashPersonalMessage } from "@ethereumjs/util";
describe("efficient_ecdsa", () => {
import {
CircuitPubInput,
PublicInput,
verifyEffEcdsaPubInput
} from "../src/helpers/publicInputs";
describe("public_input", () => {
/**
Hard coded values were computed in sage using the following code
p = 0xfffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f
@@ -27,6 +28,7 @@ describe("efficient_ecdsa", () => {
*/
it("should verify valid public input", () => {
const merkleRoot = BigInt("0xbeef");
const msg = Buffer.from("harry potter");
const msgHash = hashPersonalMessage(msg);
@@ -47,13 +49,8 @@ describe("efficient_ecdsa", () => {
);
const v = BigInt(28);
const circuitPubInput = new EffEcdsaCircuitPubInput(Tx, Ty, Ux, Uy);
const effEcdsaPubInput = new EffEcdsaPubInput(
rX,
v,
msgHash,
circuitPubInput
);
const circuitPubInput = new CircuitPubInput(merkleRoot, Tx, Ty, Ux, Uy);
const effEcdsaPubInput = new PublicInput(rX, v, msgHash, circuitPubInput);
const isValid = verifyEffEcdsaPubInput(effEcdsaPubInput);
expect(isValid).toBe(true);

View File

@@ -0,0 +1,201 @@
import {
hashPersonalMessage,
ecsign,
privateToAddress,
privateToPublic
} from "@ethereumjs/util";
import * as path from "path";
import {
MembershipProver,
MembershipVerifier,
Tree,
Poseidon,
NIZK
} from "../src";
describe("membership prove and verify", () => {
// Init prover
const treeDepth = 20;
const privKeys = ["1", "a", "bb", "ccc", "dddd", "ffff"].map(val =>
Buffer.from(val.padStart(64, "0"), "hex")
);
// Sign (Use privKeys[0] for proving)
const proverIndex = 0;
const proverPrivKey = privKeys[proverIndex];
let msg = Buffer.from("harry potter");
const msgHash = hashPersonalMessage(msg);
const { v, r, s } = ecsign(msgHash, proverPrivKey);
const sig = `0x${r.toString("hex")}${s.toString("hex")}${v.toString(16)}`;
let poseidon: Poseidon;
beforeAll(async () => {
// Init Poseidon
poseidon = new Poseidon();
await poseidon.initWasm();
});
describe("pubkey_membership prover and verify", () => {
const config = {
witnessGenWasm: path.join(
__dirname,
"../../circuits/build/pubkey_membership/pubkey_membership_js/pubkey_membership.wasm"
),
circuit: path.join(
__dirname,
"../../circuits/build/pubkey_membership/pubkey_membership.circuit"
)
};
let pubKeyMembershipVerifier: MembershipVerifier, nizk: NIZK;
beforeAll(async () => {
pubKeyMembershipVerifier = new MembershipVerifier({
circuit: config.circuit
});
await pubKeyMembershipVerifier.initWasm();
});
it("should prove and verify valid signature and merkle proof", async () => {
const pubKeyTree = new Tree(treeDepth, poseidon);
let proverPubKeyHash;
// Insert the members into the tree
for (const privKey of privKeys) {
const pubKey = privateToPublic(privKey);
const pubKeyHash = poseidon.hashPubKey(pubKey);
pubKeyTree.insert(pubKeyHash);
// Set prover's public key hash for the reference below
if (proverPrivKey === privKey) proverPubKeyHash = pubKeyHash;
}
const pubKeyMembershipProver = new MembershipProver(config);
await pubKeyMembershipProver.initWasm();
const index = pubKeyTree.indexOf(proverPubKeyHash as bigint);
const merkleProof = pubKeyTree.createProof(index);
nizk = await pubKeyMembershipProver.prove({ sig, msgHash, merkleProof });
const { proof, publicInput } = nizk;
expect(
await pubKeyMembershipVerifier.verify({
proof,
publicInputSer: publicInput.serialize()
})
).toBe(true);
});
it("should assert invalid proof", async () => {
const { publicInput } = nizk;
let proof = nizk.proof;
proof[0] = proof[0] += 1;
expect(
await pubKeyMembershipVerifier.verify({
proof,
publicInputSer: publicInput.serialize()
})
).toBe(false);
});
it("should assert invalid public input", async () => {
const { proof } = nizk;
let publicInputSer = nizk.publicInput.serialize();
publicInputSer[0] = publicInputSer[0] += 1;
expect(
await pubKeyMembershipVerifier.verify({
proof,
publicInputSer
})
).toBe(false);
});
});
describe("addr_membership prover and verify", () => {
const config = {
witnessGenWasm: path.join(
__dirname,
"../../circuits/build/addr_membership/addr_membership_js/addr_membership.wasm"
),
circuit: path.join(
__dirname,
"../../circuits/build/addr_membership/addr_membership.circuit"
)
};
let addressMembershipVerifier: MembershipVerifier, nizk: NIZK;
beforeAll(async () => {
addressMembershipVerifier = new MembershipVerifier({
circuit: config.circuit
});
await addressMembershipVerifier.initWasm();
});
it("should prove and verify valid signature and merkle proof", async () => {
const addressTree = new Tree(treeDepth, poseidon);
let proverAddress;
// Insert the members into the tree
for (const privKey of privKeys) {
const address = BigInt(
"0x" + privateToAddress(privKey).toString("hex")
);
addressTree.insert(address);
// Set prover's public key hash for the reference below
if (proverPrivKey === privKey) proverAddress = address;
}
const index = addressTree.indexOf(proverAddress as bigint);
const merkleProof = addressTree.createProof(index);
const addressMembershipProver = new MembershipProver(config);
await addressMembershipProver.initWasm();
nizk = await addressMembershipProver.prove({ sig, msgHash, merkleProof });
await addressMembershipVerifier.initWasm();
expect(
await addressMembershipVerifier.verify({
proof: nizk.proof,
publicInputSer: nizk.publicInput.serialize()
})
).toBe(true);
});
it("should assert invalid proof", async () => {
const { publicInput } = nizk;
let proof = nizk.proof;
proof[0] = proof[0] += 1;
expect(
await addressMembershipVerifier.verify({
proof,
publicInputSer: publicInput.serialize()
})
).toBe(false);
});
it("should assert invalid public input", async () => {
const { proof } = nizk;
let publicInputSer = nizk.publicInput.serialize();
publicInputSer[0] = publicInputSer[0] += 1;
expect(
await addressMembershipVerifier.verify({
proof,
publicInputSer
})
).toBe(false);
});
});
});

View File

@@ -1,118 +0,0 @@
import * as path from "path";
import {
MembershipProver,
Tree,
Poseidon,
defaultAddressMembershipConfig,
defaultPubkeyMembershipConfig,
SpartanWasm,
defaultWasmConfig
} from "../src/lib";
import {
hashPersonalMessage,
ecsign,
privateToAddress,
privateToPublic
} from "@ethereumjs/util";
var EC = require("elliptic").ec;
describe("membership prove and verify", () => {
// Init prover
const treeDepth = 20;
const privKeys = ["1", "a", "bb", "ccc", "dddd", "ffff"].map(val =>
Buffer.from(val.padStart(64, "0"), "hex")
);
// Sign (Use privKeys[0] for proving)
const proverIndex = 0;
const proverPrivKey = privKeys[proverIndex];
let proverAddress: bigint;
let msg = Buffer.from("harry potter");
const msgHash = hashPersonalMessage(msg);
const { v, r, s } = ecsign(msgHash, proverPrivKey);
const sig = `0x${r.toString("hex")}${s.toString("hex")}${v.toString(16)}`;
let poseidon: Poseidon;
let wasm: SpartanWasm;
beforeAll(async () => {
// Init Wasm
wasm = new SpartanWasm(defaultWasmConfig);
// Init Poseidon
poseidon = new Poseidon();
await poseidon.initWasm(wasm);
});
describe("pubkey_membership prover and verify", () => {
it("should prove and verify valid signature and merkle proof", async () => {
const pubKeyTree = new Tree(treeDepth, poseidon);
let proverPubKeyHash;
// Insert the members into the tree
for (const privKey of privKeys) {
const pubKey = privateToPublic(privKey);
const pubKeyHash = poseidon.hashPubKey(pubKey);
pubKeyTree.insert(pubKeyHash);
// Set prover's public key hash for the reference below
if (proverPrivKey === privKey) proverPubKeyHash = pubKeyHash;
}
const pubKeyMembershipProver = new MembershipProver(
defaultPubkeyMembershipConfig
);
await pubKeyMembershipProver.initWasm(wasm);
const index = pubKeyTree.indexOf(proverPubKeyHash as bigint);
const merkleProof = pubKeyTree.createProof(index);
const { proof, publicInput } = await pubKeyMembershipProver.prove(
sig,
msgHash,
merkleProof
);
// TODO: Verify the proof
});
});
describe("adddr_membership prover and verify", () => {
it("should prove and verify valid signature and merkle proof", async () => {
const addressTree = new Tree(treeDepth, poseidon);
let proverAddress;
// Insert the members into the tree
for (const privKey of privKeys) {
const address = BigInt(
"0x" + privateToAddress(privKey).toString("hex")
);
addressTree.insert(address);
// Set prover's public key hash for the reference below
if (proverPrivKey === privKey) proverAddress = address;
}
const addressMembershipProver = new MembershipProver(
defaultAddressMembershipConfig
);
await addressMembershipProver.initWasm(wasm);
const index = addressTree.indexOf(proverAddress as bigint);
const merkleProof = addressTree.createProof(index);
const { proof, publicInput } = await addressMembershipProver.prove(
sig,
msgHash,
merkleProof
);
// TODO: Verify the proof
});
});
});

View File

@@ -0,0 +1,31 @@
import { Tree, Poseidon } from "../src";
describe("Merkle tree prove and verify", () => {
let poseidon: Poseidon;
let tree: Tree;
const members = new Array(10).fill(0).map((_, i) => BigInt(i));
beforeAll(async () => {
// Init Poseidon
poseidon = new Poseidon();
await poseidon.initWasm();
const treeDepth = 20;
tree = new Tree(treeDepth, poseidon);
for (const member of members) {
tree.insert(member);
}
});
it("should prove and verify a valid Merkle proof", async () => {
const proof = tree.createProof(0);
expect(tree.verifyProof(proof, members[0])).toBe(true);
});
it("should assert an invalid Merkle proof", async () => {
const proof = tree.createProof(0);
proof.siblings[0][0] = proof.siblings[0][0] += BigInt(1);
expect(tree.verifyProof(proof, members[0])).toBe(false);
proof.siblings[0][0] = proof.siblings[0][0] -= BigInt(1);
});
});

Some files were not shown because too many files have changed in this diff Show More