Add bellman modifications to tree

This commit is contained in:
Evan Laufer
2022-10-24 07:55:30 -07:00
parent ee30c0c958
commit 65afa14da8
45 changed files with 14842 additions and 15 deletions

18
Cargo.lock generated
View File

@@ -343,7 +343,7 @@ dependencies = [
"rand_chacha",
"rand_core",
"rayon",
"sha2 0.10.2",
"sha2 0.10.6",
"subtle",
]
@@ -429,9 +429,9 @@ dependencies = [
[[package]]
name = "block-buffer"
version = "0.10.2"
version = "0.10.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0bf7fe51849ea569fd452f37822f606a5cabb684dc918707a0193fd4664ff324"
checksum = "69cce20737498f97b993470a6e536b8523f0af7892a4f928cceb1ac5e52ebe7e"
dependencies = [
"generic-array 0.14.5",
]
@@ -684,11 +684,11 @@ dependencies = [
[[package]]
name = "digest"
version = "0.10.3"
version = "0.10.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f2fb860ca6fafa5552fb6d0e816a69c8e49f0908bf524e30a90d97c85892d506"
checksum = "adfbc57365a37acbd2ebf2b64d7e69bb766e2fea813521ed536f5d0520dcf86c"
dependencies = [
"block-buffer 0.10.2",
"block-buffer 0.10.3",
"crypto-common",
]
@@ -1629,13 +1629,13 @@ dependencies = [
[[package]]
name = "sha2"
version = "0.10.2"
version = "0.10.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "55deaec60f81eefe3cce0dc50bda92d6d8e88f2a27df7c5033b42afeb1ed2676"
checksum = "82e6b795fe2e3b1e845bafcb27aa35405c4d47cdfc92af5fc8d3002f76cebdc0"
dependencies = [
"cfg-if",
"cpufeatures",
"digest 0.10.3",
"digest 0.10.5",
]
[[package]]

View File

@@ -22,7 +22,7 @@ zokrates_pest_ast = { path = "third_party/ZoKrates/zokrates_pest_ast", optional
typed-arena = "2.0"
log = "0.4"
thiserror = "1.0"
bellman-proof = { path = "../bellman" , optional = true, package = "bellman", features=["groth16"] }
bellman = { path = "third_party/bellman", optional = true }
ff = "0.12"
fxhash = "0.2"
good_lp = { version = "1.1", features = ["lp-solvers", "coin_cbc"], default-features = false, optional = true }
@@ -67,7 +67,7 @@ bls12381 = []
ff_dfl = []
c = ["lang-c"]
lp = ["good_lp", "lp-solvers"]
r1cs = ["bellman-proof"]
r1cs = ["bellman"]
smt = ["rsmt2"]
zok = ["zokrates_parser", "zokrates_pest_ast"]
marlin = ["ark-marlin", "ark-relations", "ark-ff", "ark-poly-commit", "ark-poly", "ark-serialize", "ark-bls12-381", "sha2", "rand_chacha", "digest"]
@@ -78,7 +78,7 @@ name = "circ"
[[example]]
name = "zk"
required-features = ["r1cs", "marlin"]
required-features = ["r1cs"]
[[example]]
name = "zxi"

View File

@@ -1,5 +1,5 @@
//! Exporting our R1CS to bellman
use bellman_proof::{
use ::bellman::{
groth16::{
create_random_proof, generate_random_parameters, prepare_verifying_key, verify_proof,
Parameters, Proof, VerifyingKey,

View File

@@ -1,5 +1,5 @@
//! Exporting our R1CS to bellman
use bellman_proof::{
//! Exporting our R1CS to mirage
use ::bellman::{
mirage::{
create_random_proof, generate_random_parameters, prepare_verifying_key, verify_proof,
Parameters, Proof, VerifyingKey,

92
third_party/bellman/CHANGELOG.md vendored Normal file
View File

@@ -0,0 +1,92 @@
# Changelog
All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
and this project adheres to Rust's notion of
[Semantic Versioning](https://semver.org/spec/v2.0.0.html).
## [Unreleased]
## [0.13.1] - 2022-07-05
### Added
- `bellman::groth16::batch::Verifier` now has a `verify_multicore` method (when
the `multicore` feature is enabled) which will internally use the global rayon
thread pool to parallelize the verification of a batch of proofs.
### Changed
- The `multicore` feature now enables the `getrandom` feature of the `rand_core`
crate.
## [0.13.0] - 2022-05-06
### Added
- `bellman::multiexp::Exponent`
### Changed
- `bellman::multiexp::multiexp` now takes exponents as `Arc<Vec<Exponent<_>>>`
instead of `Arc<Vec<FieldBits<_>>>`.
### Fixed
- Migrating from `bitvec 0.22` to `bitvec 1.0` caused a performance regression
in `bellman::multiexp::multiexp`, slowing down proof creation. Some of that
performance has been regained by refactoring `multiexp`.
## [0.12.0] - 2022-05-04
### Changed
- MSRV bumped to `1.56.0`
- Bumped dependencies to `ff 0.12`, `group 0.12`, `pairing 0.22`, `bitvec 1.0`, `blake2s_simd 1.0`.
## [0.11.2] - 2022-05-04
### Fixed
- Groth16 prover now correctly computes query densitites with respect to linear
combinations that contain coefficients of zero.
- Fixed an infinite recursion bug in the `Display` implementation for `SynthesisError`.
## [0.11.1] - 2021-09-09
### Fixed
- Compiling with `--no-default-features --features groth16` (i.e. disabling the
`multicore` feature flag) works again.
### Changed
- `bellman::multicore::Waiter::wait` now consumes `self` (better reflecting the
fact that you can't wait on the same result twice), instead of taking `&self`
with `multicore` enabled and `&mut self` with multicore disabled.
## [0.11.0] - 2021-09-08
### Added
- `bellman` now uses `rayon` for multithreading when the (default) `multicore`
feature flag is enabled. This means that, when this flag is enabled, the
`RAYON_NUM_THREADS` environment variable controls the number of threads that
`bellman` will use. The default, which has not changed, is to use the same
number of threads as logical CPUs.
- `bellman::multicore::Waiter`
- `Default` bound for `bellman::multiexp::DensityTracker`.
- `Default` bound for `bellman::gadgets::test::TestConstraintSystem`.
### Changed
- Bumped dependencies to `ff 0.11`, `group 0.11`, `pairing 0.21`.
- `bellman::multicore` has migrated from `crossbeam` to `rayon`:
- `bellman::multicore::Worker::compute` now returns
`bellman::multicore::Waiter`.
- `bellman::multiexp::multiexp` now returns
`bellman::multicore::Waiter<Result<G, SynthesisError>>` instead of
`Box<dyn Future<Item = G, Error = SynthesisError>>`.
- `bellman::multicore::log_num_cpus` is renamed to `log_num_threads`.
- `bellman::multiexp::SourceBuilder::new` is renamed to `SourceBuilder::build`.
### Removed
- `bellman::multicore::WorkerFuture` (replaced by `Waiter`).
## [0.10.0] - 2021-06-04
### Added
- `bellman::groth16::batch::Verifier`, for performing batched Groth16 proof
verification.
### Changed
- Bumped dependencies to `bitvec 0.22`, `ff 0.10`, `group 0.10`, `pairing 0.20`.
- MSRV is now 1.51.0.
## [0.9.0] - 2021-01-26
### Changed
- Bumped dependencies to `bitvec 0.20`, `ff 0.9`, `group 0.9`, `pairing 0.19`,
`rand_core 0.6`.
- MSRV is now 1.47.0.

14
third_party/bellman/COPYRIGHT vendored Normal file
View File

@@ -0,0 +1,14 @@
Copyrights in the "bellman" library are retained by their contributors. No
copyright assignment is required to contribute to the "bellman" library.
The "bellman" library is licensed under either of
* Apache License, Version 2.0, (see ./LICENSE-APACHE or http://www.apache.org/licenses/LICENSE-2.0)
* MIT license (see ./LICENSE-MIT or http://opensource.org/licenses/MIT)
at your option.
Unless you explicitly state otherwise, any contribution intentionally
submitted for inclusion in the work by you, as defined in the Apache-2.0
license, shall be dual licensed as above, without any additional terms or
conditions.

876
third_party/bellman/Cargo.lock generated vendored Normal file
View File

@@ -0,0 +1,876 @@
# This file is automatically @generated by Cargo.
# It is not intended for manual editing.
version = 3
[[package]]
name = "arrayref"
version = "0.3.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a4c527152e37cf757a3f78aae5a06fbeefdb07ccc535c980a3208ee3060dd544"
[[package]]
name = "arrayvec"
version = "0.7.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8da52d66c7071e2e3fa2a1e5c6d088fec47b593032b254f5e980de8ea54454d6"
[[package]]
name = "atty"
version = "0.2.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8"
dependencies = [
"hermit-abi",
"libc",
"winapi",
]
[[package]]
name = "autocfg"
version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa"
[[package]]
name = "bellman"
version = "0.13.1"
dependencies = [
"bitvec",
"blake2s_simd",
"bls12_381",
"byteorder",
"criterion",
"crossbeam-channel",
"ff",
"group",
"hex-literal",
"lazy_static",
"log",
"num_cpus",
"pairing",
"rand",
"rand_chacha",
"rand_core",
"rand_xorshift",
"rayon",
"sha2",
"subtle",
]
[[package]]
name = "bitflags"
version = "1.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a"
[[package]]
name = "bitvec"
version = "1.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1bc2832c24239b0141d5674bb9174f9d68a8b5b3f2753311927c172ca46f7e9c"
dependencies = [
"funty",
"radium",
"tap",
"wyz",
]
[[package]]
name = "blake2s_simd"
version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "db539cc2b5f6003621f1cd9ef92d7ded8ea5232c7de0f9faa2de251cd98730d4"
dependencies = [
"arrayref",
"arrayvec",
"constant_time_eq",
]
[[package]]
name = "block-buffer"
version = "0.10.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0bf7fe51849ea569fd452f37822f606a5cabb684dc918707a0193fd4664ff324"
dependencies = [
"generic-array",
]
[[package]]
name = "bls12_381"
version = "0.7.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "62250ece575fa9b22068b3a8d59586f01d426dd7785522efd97632959e71c986"
dependencies = [
"ff",
"group",
"pairing",
"rand_core",
"subtle",
]
[[package]]
name = "bstr"
version = "0.2.17"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ba3569f383e8f1598449f1a423e72e99569137b47740b1da11ef19af3d5c3223"
dependencies = [
"lazy_static",
"memchr",
"regex-automata",
"serde",
]
[[package]]
name = "bumpalo"
version = "3.10.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "37ccbd214614c6783386c1af30caf03192f17891059cecc394b4fb119e363de3"
[[package]]
name = "byteorder"
version = "1.4.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610"
[[package]]
name = "cast"
version = "0.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5"
[[package]]
name = "cfg-if"
version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
[[package]]
name = "clap"
version = "2.34.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a0610544180c38b88101fecf2dd634b174a62eef6946f84dfc6a7127512b381c"
dependencies = [
"bitflags",
"textwrap",
"unicode-width",
]
[[package]]
name = "constant_time_eq"
version = "0.1.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "245097e9a4535ee1e3e3931fcfcd55a796a44c643e8596ff6566d68f09b87bbc"
[[package]]
name = "cpufeatures"
version = "0.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "59a6001667ab124aebae2a495118e11d30984c3a653e99d86d58971708cf5e4b"
dependencies = [
"libc",
]
[[package]]
name = "criterion"
version = "0.3.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b01d6de93b2b6c65e17c634a26653a29d107b3c98c607c765bf38d041531cd8f"
dependencies = [
"atty",
"cast",
"clap",
"criterion-plot",
"csv",
"itertools",
"lazy_static",
"num-traits",
"oorandom",
"plotters",
"rayon",
"regex",
"serde",
"serde_cbor",
"serde_derive",
"serde_json",
"tinytemplate",
"walkdir",
]
[[package]]
name = "criterion-plot"
version = "0.4.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2673cc8207403546f45f5fd319a974b1e6983ad1a3ee7e6041650013be041876"
dependencies = [
"cast",
"itertools",
]
[[package]]
name = "crossbeam-channel"
version = "0.5.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c2dd04ddaf88237dc3b8d8f9a3c1004b506b54b3313403944054d23c0870c521"
dependencies = [
"cfg-if",
"crossbeam-utils",
]
[[package]]
name = "crossbeam-deque"
version = "0.8.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "715e8152b692bba2d374b53d4875445368fdf21a94751410af607a5ac677d1fc"
dependencies = [
"cfg-if",
"crossbeam-epoch",
"crossbeam-utils",
]
[[package]]
name = "crossbeam-epoch"
version = "0.9.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "045ebe27666471bb549370b4b0b3e51b07f56325befa4284db65fc89c02511b1"
dependencies = [
"autocfg",
"cfg-if",
"crossbeam-utils",
"memoffset",
"once_cell",
"scopeguard",
]
[[package]]
name = "crossbeam-utils"
version = "0.8.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "51887d4adc7b564537b15adcfb307936f8075dfcd5f00dde9a9f1d29383682bc"
dependencies = [
"cfg-if",
"once_cell",
]
[[package]]
name = "crypto-common"
version = "0.1.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3"
dependencies = [
"generic-array",
"typenum",
]
[[package]]
name = "csv"
version = "1.1.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "22813a6dc45b335f9bade10bf7271dc477e81113e89eb251a0bc2a8a81c536e1"
dependencies = [
"bstr",
"csv-core",
"itoa 0.4.8",
"ryu",
"serde",
]
[[package]]
name = "csv-core"
version = "0.1.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2b2466559f260f48ad25fe6317b3c8dac77b5bdb5763ac7d9d6103530663bc90"
dependencies = [
"memchr",
]
[[package]]
name = "digest"
version = "0.10.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f2fb860ca6fafa5552fb6d0e816a69c8e49f0908bf524e30a90d97c85892d506"
dependencies = [
"block-buffer",
"crypto-common",
]
[[package]]
name = "either"
version = "1.7.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3f107b87b6afc2a64fd13cac55fe06d6c8859f12d4b14cbcdd2c67d0976781be"
[[package]]
name = "ff"
version = "0.12.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "df689201f395c6b90dfe87127685f8dbfc083a5e779e613575d8bd7314300c3e"
dependencies = [
"bitvec",
"rand_core",
"subtle",
]
[[package]]
name = "funty"
version = "2.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e6d5a32815ae3f33302d95fdcb2ce17862f8c65363dcfd29360480ba1001fc9c"
[[package]]
name = "generic-array"
version = "0.14.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fd48d33ec7f05fbfa152300fdad764757cbded343c1aa1cff2fbaf4134851803"
dependencies = [
"typenum",
"version_check",
]
[[package]]
name = "getrandom"
version = "0.2.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4eb1a864a501629691edf6c15a593b7a51eebaa1e8468e9ddc623de7c9b58ec6"
dependencies = [
"cfg-if",
"libc",
"wasi",
]
[[package]]
name = "group"
version = "0.12.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7391856def869c1c81063a03457c676fbcd419709c3dfb33d8d319de484b154d"
dependencies = [
"byteorder",
"ff",
"rand_core",
"subtle",
]
[[package]]
name = "half"
version = "1.8.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "eabb4a44450da02c90444cf74558da904edde8fb4e9035a9a6a4e15445af0bd7"
[[package]]
name = "hermit-abi"
version = "0.1.19"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33"
dependencies = [
"libc",
]
[[package]]
name = "hex-literal"
version = "0.3.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7ebdb29d2ea9ed0083cd8cece49bbd968021bd99b0849edb4a9a7ee0fdf6a4e0"
[[package]]
name = "itertools"
version = "0.10.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a9a9d19fa1e79b6215ff29b9d6880b706147f16e9b1dbb1e4e5947b5b02bc5e3"
dependencies = [
"either",
]
[[package]]
name = "itoa"
version = "0.4.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b71991ff56294aa922b450139ee08b3bfc70982c6b2c7562771375cf73542dd4"
[[package]]
name = "itoa"
version = "1.0.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "112c678d4050afce233f4f2852bb2eb519230b3cf12f33585275537d7e41578d"
[[package]]
name = "js-sys"
version = "0.3.59"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "258451ab10b34f8af53416d1fdab72c22e805f0c92a1136d59470ec0b11138b2"
dependencies = [
"wasm-bindgen",
]
[[package]]
name = "lazy_static"
version = "1.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646"
[[package]]
name = "libc"
version = "0.2.126"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "349d5a591cd28b49e1d1037471617a32ddcda5731b99419008085f72d5a53836"
[[package]]
name = "log"
version = "0.4.17"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "abb12e687cfb44aa40f41fc3978ef76448f9b6038cad6aef4259d3c095a2382e"
dependencies = [
"cfg-if",
]
[[package]]
name = "memchr"
version = "2.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d"
[[package]]
name = "memoffset"
version = "0.6.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5aa361d4faea93603064a027415f07bd8e1d5c88c9fbf68bf56a285428fd79ce"
dependencies = [
"autocfg",
]
[[package]]
name = "num-traits"
version = "0.2.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "578ede34cf02f8924ab9447f50c28075b4d3e5b269972345e7e0372b38c6cdcd"
dependencies = [
"autocfg",
]
[[package]]
name = "num_cpus"
version = "1.13.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "19e64526ebdee182341572e50e9ad03965aa510cd94427a4549448f285e957a1"
dependencies = [
"hermit-abi",
"libc",
]
[[package]]
name = "once_cell"
version = "1.13.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "18a6dbe30758c9f83eb00cbea4ac95966305f5a7772f3f42ebfc7fc7eddbd8e1"
[[package]]
name = "oorandom"
version = "11.1.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0ab1bc2a289d34bd04a330323ac98a1b4bc82c9d9fcb1e66b63caa84da26b575"
[[package]]
name = "pairing"
version = "0.22.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "135590d8bdba2b31346f9cd1fb2a912329f5135e832a4f422942eb6ead8b6b3b"
dependencies = [
"group",
]
[[package]]
name = "plotters"
version = "0.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9428003b84df1496fb9d6eeee9c5f8145cb41ca375eb0dad204328888832811f"
dependencies = [
"num-traits",
"plotters-backend",
"plotters-svg",
"wasm-bindgen",
"web-sys",
]
[[package]]
name = "plotters-backend"
version = "0.3.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "193228616381fecdc1224c62e96946dfbc73ff4384fba576e052ff8c1bea8142"
[[package]]
name = "plotters-svg"
version = "0.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e0918736323d1baff32ee0eade54984f6f201ad7e97d5cfb5d6ab4a358529615"
dependencies = [
"plotters-backend",
]
[[package]]
name = "ppv-lite86"
version = "0.2.16"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "eb9f9e6e233e5c4a35559a617bf40a4ec447db2e84c20b55a6f83167b7e57872"
[[package]]
name = "proc-macro2"
version = "1.0.41"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cdcc2916cde080c1876ff40292a396541241fe0072ef928cd76582e9ea5d60d2"
dependencies = [
"unicode-ident",
]
[[package]]
name = "quote"
version = "1.0.20"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3bcdf212e9776fbcb2d23ab029360416bb1706b1aea2d1a5ba002727cbcab804"
dependencies = [
"proc-macro2",
]
[[package]]
name = "radium"
version = "0.7.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "dc33ff2d4973d518d823d61aa239014831e521c75da58e3df4840d3f47749d09"
[[package]]
name = "rand"
version = "0.8.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404"
dependencies = [
"libc",
"rand_chacha",
"rand_core",
]
[[package]]
name = "rand_chacha"
version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88"
dependencies = [
"ppv-lite86",
"rand_core",
]
[[package]]
name = "rand_core"
version = "0.6.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d34f1408f55294453790c48b2f1ebbb1c5b4b7563eb1f418bcfcfdbb06ebb4e7"
dependencies = [
"getrandom",
]
[[package]]
name = "rand_xorshift"
version = "0.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d25bf25ec5ae4a3f1b92f929810509a2f53d7dca2f50b794ff57e3face536c8f"
dependencies = [
"rand_core",
]
[[package]]
name = "rayon"
version = "1.5.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bd99e5772ead8baa5215278c9b15bf92087709e9c1b2d1f97cdb5a183c933a7d"
dependencies = [
"autocfg",
"crossbeam-deque",
"either",
"rayon-core",
]
[[package]]
name = "rayon-core"
version = "1.9.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "258bcdb5ac6dad48491bb2992db6b7cf74878b0384908af124823d118c99683f"
dependencies = [
"crossbeam-channel",
"crossbeam-deque",
"crossbeam-utils",
"num_cpus",
]
[[package]]
name = "regex"
version = "1.6.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4c4eb3267174b8c6c2f654116623910a0fef09c4753f8dd83db29c48a0df988b"
dependencies = [
"regex-syntax",
]
[[package]]
name = "regex-automata"
version = "0.1.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6c230d73fb8d8c1b9c0b3135c5142a8acee3a0558fb8db5cf1cb65f8d7862132"
[[package]]
name = "regex-syntax"
version = "0.6.27"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a3f87b73ce11b1619a3c6332f45341e0047173771e8b8b73f87bfeefb7b56244"
[[package]]
name = "ryu"
version = "1.0.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f3f6f92acf49d1b98f7a81226834412ada05458b7364277387724a237f062695"
[[package]]
name = "same-file"
version = "1.0.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502"
dependencies = [
"winapi-util",
]
[[package]]
name = "scopeguard"
version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd"
[[package]]
name = "serde"
version = "1.0.140"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fc855a42c7967b7c369eb5860f7164ef1f6f81c20c7cc1141f2a604e18723b03"
[[package]]
name = "serde_cbor"
version = "0.11.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2bef2ebfde456fb76bbcf9f59315333decc4fda0b2b44b420243c11e0f5ec1f5"
dependencies = [
"half",
"serde",
]
[[package]]
name = "serde_derive"
version = "1.0.140"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6f2122636b9fe3b81f1cb25099fcf2d3f542cdb1d45940d56c713158884a05da"
dependencies = [
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "serde_json"
version = "1.0.82"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "82c2c1fdcd807d1098552c5b9a36e425e42e9fbd7c6a37a8425f390f781f7fa7"
dependencies = [
"itoa 1.0.2",
"ryu",
"serde",
]
[[package]]
name = "sha2"
version = "0.10.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "55deaec60f81eefe3cce0dc50bda92d6d8e88f2a27df7c5033b42afeb1ed2676"
dependencies = [
"cfg-if",
"cpufeatures",
"digest",
]
[[package]]
name = "subtle"
version = "2.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6bdef32e8150c2a081110b42772ffe7d7c9032b606bc226c8260fd97e0976601"
[[package]]
name = "syn"
version = "1.0.98"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c50aef8a904de4c23c788f104b7dddc7d6f79c647c7c8ce4cc8f73eb0ca773dd"
dependencies = [
"proc-macro2",
"quote",
"unicode-ident",
]
[[package]]
name = "tap"
version = "1.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369"
[[package]]
name = "textwrap"
version = "0.11.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d326610f408c7a4eb6f51c37c330e496b08506c9457c9d34287ecc38809fb060"
dependencies = [
"unicode-width",
]
[[package]]
name = "tinytemplate"
version = "1.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "be4d6b5f19ff7664e8c98d03e2139cb510db9b0a60b55f8e8709b689d939b6bc"
dependencies = [
"serde",
"serde_json",
]
[[package]]
name = "typenum"
version = "1.15.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "dcf81ac59edc17cc8697ff311e8f5ef2d99fcbd9817b34cec66f90b6c3dfd987"
[[package]]
name = "unicode-ident"
version = "1.0.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "15c61ba63f9235225a22310255a29b806b907c9b8c964bcbd0a2c70f3f2deea7"
[[package]]
name = "unicode-width"
version = "0.1.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3ed742d4ea2bd1176e236172c8429aaf54486e7ac098db29ffe6529e0ce50973"
[[package]]
name = "version_check"
version = "0.9.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f"
[[package]]
name = "walkdir"
version = "2.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "808cf2735cd4b6866113f648b791c6adc5714537bc222d9347bb203386ffda56"
dependencies = [
"same-file",
"winapi",
"winapi-util",
]
[[package]]
name = "wasi"
version = "0.11.0+wasi-snapshot-preview1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423"
[[package]]
name = "wasm-bindgen"
version = "0.2.82"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fc7652e3f6c4706c8d9cd54832c4a4ccb9b5336e2c3bd154d5cccfbf1c1f5f7d"
dependencies = [
"cfg-if",
"wasm-bindgen-macro",
]
[[package]]
name = "wasm-bindgen-backend"
version = "0.2.82"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "662cd44805586bd52971b9586b1df85cdbbd9112e4ef4d8f41559c334dc6ac3f"
dependencies = [
"bumpalo",
"log",
"once_cell",
"proc-macro2",
"quote",
"syn",
"wasm-bindgen-shared",
]
[[package]]
name = "wasm-bindgen-macro"
version = "0.2.82"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b260f13d3012071dfb1512849c033b1925038373aea48ced3012c09df952c602"
dependencies = [
"quote",
"wasm-bindgen-macro-support",
]
[[package]]
name = "wasm-bindgen-macro-support"
version = "0.2.82"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5be8e654bdd9b79216c2929ab90721aa82faf65c48cdf08bdc4e7f51357b80da"
dependencies = [
"proc-macro2",
"quote",
"syn",
"wasm-bindgen-backend",
"wasm-bindgen-shared",
]
[[package]]
name = "wasm-bindgen-shared"
version = "0.2.82"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6598dd0bd3c7d51095ff6531a5b23e02acdc81804e30d8f07afb77b7215a140a"
[[package]]
name = "web-sys"
version = "0.3.59"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ed055ab27f941423197eb86b2035720b1a3ce40504df082cac2ecc6ed73335a1"
dependencies = [
"js-sys",
"wasm-bindgen",
]
[[package]]
name = "winapi"
version = "0.3.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419"
dependencies = [
"winapi-i686-pc-windows-gnu",
"winapi-x86_64-pc-windows-gnu",
]
[[package]]
name = "winapi-i686-pc-windows-gnu"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
[[package]]
name = "winapi-util"
version = "0.1.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "70ec6ce85bb158151cae5e5c87f95a8e97d2c0c4b001223f33a334e3ce5de178"
dependencies = [
"winapi",
]
[[package]]
name = "winapi-x86_64-pc-windows-gnu"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
[[package]]
name = "wyz"
version = "0.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "30b31594f29d27036c383b53b59ed3476874d518f0efb151b27a4c275141390e"
dependencies = [
"tap",
]

61
third_party/bellman/Cargo.toml vendored Normal file
View File

@@ -0,0 +1,61 @@
[package]
authors = [
"Sean Bowe <ewillbefull@gmail.com>",
"Jack Grigg <thestr4d@gmail.com>",
]
description = "zk-SNARK library"
readme = "README.md"
homepage = "https://github.com/zkcrypto/bellman"
license = "MIT/Apache-2.0"
name = "bellman"
repository = "https://github.com/zkcrypto/bellman"
version = "0.13.1"
edition = "2021"
[dependencies]
bitvec = "1"
blake2s_simd = "1"
ff = { version = "0.12", features = ["bits"] }
group = "0.12"
pairing = { version = "0.22", optional = true }
rand_core = "0.6"
byteorder = "1"
subtle = "2.2.1"
# Multicore dependencies
crossbeam-channel = { version = "0.5.1", optional = true }
lazy_static = { version = "1.4.0", optional = true }
log = { version = "0.4", optional = true }
num_cpus = { version = "1", optional = true }
rayon = { version = "1.5.1", optional = true }
sha2 = "0.10"
rand_chacha = "0.3.1"
[dev-dependencies]
bls12_381 = "0.7"
criterion = "0.3"
hex-literal = "0.3"
rand = "0.8"
rand_xorshift = "0.3"
sha2 = "0.10"
[features]
groth16 = ["pairing"]
multicore = ["crossbeam-channel", "lazy_static", "log", "num_cpus", "rayon", "rand_core/getrandom"]
default = ["groth16", "multicore"]
[[test]]
name = "mimc"
path = "tests/mimc.rs"
required-features = ["groth16"]
[[bench]]
name = "batch"
harness = false
[[bench]]
name = "slow"
harness = false
[badges]
maintenance = { status = "actively-developed" }

201
third_party/bellman/LICENSE-APACHE vendored Normal file
View File

@@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

23
third_party/bellman/LICENSE-MIT vendored Normal file
View File

@@ -0,0 +1,23 @@
Permission is hereby granted, free of charge, to any
person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the
Software without restriction, including without
limitation the rights to use, copy, modify, merge,
publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software
is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice
shall be included in all copies or substantial portions
of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.

33
third_party/bellman/README.md vendored Normal file
View File

@@ -0,0 +1,33 @@
# bellman [![Crates.io](https://img.shields.io/crates/v/bellman.svg)](https://crates.io/crates/bellman) #
`bellman` is a crate for building zk-SNARK circuits. It provides circuit traits
and primitive structures, as well as basic gadget implementations such as
booleans and number abstractions.
`bellman` uses the `ff` and `group` crates to build circuits generically over a
scalar field type, which is used as the "word" of a circuit. Arithmetic
operations modulo the scalar field's prime are efficient, while other operations
(such as boolean logic) are implemented using these words.
## Roadmap
Currently `bellman` bundles an implementation of the Groth16 proving system.
This will be moved into a separate crate in the future, and `bellman` will
contain any utilities that make implementing proving systems easier.
## License
Licensed under either of
* Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
http://www.apache.org/licenses/LICENSE-2.0)
* MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT)
at your option.
### Contribution
Unless you explicitly state otherwise, any contribution intentionally
submitted for inclusion in the work by you, as defined in the Apache-2.0
license, shall be dual licensed as above, without any additional terms or
conditions.

98
third_party/bellman/benches/batch.rs vendored Normal file
View File

@@ -0,0 +1,98 @@
use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion, Throughput};
use bls12_381::Bls12;
use ff::Field;
use rand::thread_rng;
use bellman::groth16::{
batch, create_random_proof, generate_random_parameters, prepare_verifying_key, verify_proof,
};
#[path = "../tests/common/mod.rs"]
mod common;
use common::*;
fn bench_batch_verify(c: &mut Criterion) {
let mut group = c.benchmark_group("Batch Verification");
for &n in [8usize, 16, 24, 32, 40, 48, 56, 64].iter() {
group.throughput(Throughput::Elements(n as u64));
let mut rng = thread_rng();
// Generate the MiMC round constants
let constants = (0..MIMC_ROUNDS)
.map(|_| bls12_381::Scalar::random(&mut rng))
.collect::<Vec<_>>();
// Create parameters for our circuit
let params = {
let c = MiMCDemo {
xl: None,
xr: None,
constants: &constants,
};
generate_random_parameters::<Bls12, _, _>(c, &mut rng).unwrap()
};
// Prepare the verification key (for proof verification)
let pvk = prepare_verifying_key(&params.vk);
let proofs = {
std::iter::repeat_with(|| {
// Generate a random preimage and compute the image
let xl = bls12_381::Scalar::random(&mut rng);
let xr = bls12_381::Scalar::random(&mut rng);
let image = mimc(xl, xr, &constants);
// Create an instance of our circuit (with the
// witness)
let c = MiMCDemo {
xl: Some(xl),
xr: Some(xr),
constants: &constants,
};
// Create a groth16 proof with our parameters.
let proof = create_random_proof(c, &params, &mut rng).unwrap();
(proof, image)
})
}
.take(n)
.collect::<Vec<_>>();
group.bench_with_input(
BenchmarkId::new("Unbatched verification", n),
&proofs,
|b, proofs| {
b.iter(|| {
for (proof, input) in proofs.iter() {
let _ = verify_proof(&pvk, proof, &[*input]);
}
})
},
);
group.bench_with_input(
BenchmarkId::new("Batched verification", n),
&proofs,
|b, proofs| {
b.iter(|| {
let mut batch = batch::Verifier::new();
for (proof, input) in proofs.iter() {
batch.queue((proof.clone(), vec![*input]));
}
batch.verify(&mut rng, &params.vk)
})
},
);
}
group.finish();
}
criterion_group!(benches, bench_batch_verify);
criterion_main!(benches);

47
third_party/bellman/benches/slow.rs vendored Normal file
View File

@@ -0,0 +1,47 @@
use bellman::{
multicore::Worker,
multiexp::{multiexp, FullDensity},
};
use bls12_381::{Bls12, Scalar};
use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion};
use ff::Field;
use group::{Curve, Group};
use pairing::Engine;
use rand_core::SeedableRng;
use rand_xorshift::XorShiftRng;
use std::sync::Arc;
fn bench_parts(c: &mut Criterion) {
let mut rng = XorShiftRng::from_seed([7; 16]);
let samples = 1 << 16;
let v = Arc::new(
(0..samples)
.map(|_| Scalar::random(&mut rng))
.collect::<Vec<_>>(),
);
let v_bits = Arc::new(v.iter().map(|e| e.into()).collect::<Vec<_>>());
let g = Arc::new(
(0..samples)
.map(|_| <Bls12 as Engine>::G1::random(&mut rng).to_affine())
.collect::<Vec<_>>(),
);
let pool = Worker::new();
c.bench_with_input(
BenchmarkId::new("multiexp", samples),
&(pool, g, v_bits),
|b, (pool, g, v_bits)| {
b.iter(|| {
let _: <Bls12 as Engine>::G1 =
multiexp(pool, (g.clone(), 0), FullDensity, v_bits.clone())
.wait()
.unwrap();
})
},
);
}
criterion_group!(benches, bench_parts);
criterion_main!(benches);

1
third_party/bellman/rust-toolchain vendored Normal file
View File

@@ -0,0 +1 @@
1.56.0

498
third_party/bellman/src/domain.rs vendored Normal file
View File

@@ -0,0 +1,498 @@
//! This module contains an [`EvaluationDomain`] abstraction for performing
//! various kinds of polynomial arithmetic on top of the scalar field.
//!
//! In pairing-based SNARKs like [Groth16], we need to calculate a quotient
//! polynomial over a target polynomial with roots at distinct points associated
//! with each constraint of the constraint system. In order to be efficient, we
//! choose these roots to be the powers of a 2<sup>n</sup> root of unity in the
//! field. This allows us to perform polynomial operations in O(n) by performing
//! an O(n log n) FFT over such a domain.
//!
//! [`EvaluationDomain`]: crate::domain::EvaluationDomain
//! [Groth16]: https://eprint.iacr.org/2016/260
use ff::PrimeField;
use group::cofactor::CofactorCurve;
use super::SynthesisError;
use super::multicore::Worker;
pub struct EvaluationDomain<S: PrimeField, G: Group<S>> {
coeffs: Vec<G>,
exp: u32,
omega: S,
omegainv: S,
geninv: S,
minv: S,
}
impl<S: PrimeField, G: Group<S>> AsRef<[G]> for EvaluationDomain<S, G> {
fn as_ref(&self) -> &[G] {
&self.coeffs
}
}
impl<S: PrimeField, G: Group<S>> AsMut<[G]> for EvaluationDomain<S, G> {
fn as_mut(&mut self) -> &mut [G] {
&mut self.coeffs
}
}
impl<S: PrimeField, G: Group<S>> EvaluationDomain<S, G> {
pub fn into_coeffs(self) -> Vec<G> {
self.coeffs
}
pub fn from_coeffs(mut coeffs: Vec<G>) -> Result<EvaluationDomain<S, G>, SynthesisError> {
// Compute the size of our evaluation domain
let mut m = 1;
let mut exp = 0;
while m < coeffs.len() {
m *= 2;
exp += 1;
// The pairing-friendly curve may not be able to support
// large enough (radix2) evaluation domains.
if exp >= S::S {
return Err(SynthesisError::PolynomialDegreeTooLarge);
}
}
// Compute omega, the 2^exp primitive root of unity
let mut omega = S::root_of_unity();
for _ in exp..S::S {
omega = omega.square();
}
// Extend the coeffs vector with zeroes if necessary
coeffs.resize(m, G::group_zero());
Ok(EvaluationDomain {
coeffs,
exp,
omega,
omegainv: omega.invert().unwrap(),
geninv: S::multiplicative_generator().invert().unwrap(),
minv: S::from(m as u64).invert().unwrap(),
})
}
pub fn fft(&mut self, worker: &Worker) {
best_fft(&mut self.coeffs, worker, &self.omega, self.exp);
}
pub fn ifft(&mut self, worker: &Worker) {
best_fft(&mut self.coeffs, worker, &self.omegainv, self.exp);
worker.scope(self.coeffs.len(), |scope, chunk| {
let minv = self.minv;
for v in self.coeffs.chunks_mut(chunk) {
scope.spawn(move |_scope| {
for v in v {
v.group_mul_assign(&minv);
}
});
}
});
}
pub fn distribute_powers(&mut self, worker: &Worker, g: S) {
worker.scope(self.coeffs.len(), |scope, chunk| {
for (i, v) in self.coeffs.chunks_mut(chunk).enumerate() {
scope.spawn(move |_scope| {
let mut u = g.pow_vartime(&[(i * chunk) as u64]);
for v in v.iter_mut() {
v.group_mul_assign(&u);
u.mul_assign(&g);
}
});
}
});
}
pub fn coset_fft(&mut self, worker: &Worker) {
self.distribute_powers(worker, S::multiplicative_generator());
self.fft(worker);
}
pub fn icoset_fft(&mut self, worker: &Worker) {
let geninv = self.geninv;
self.ifft(worker);
self.distribute_powers(worker, geninv);
}
/// This evaluates t(tau) for this domain, which is
/// tau^m - 1 for these radix-2 domains.
pub fn z(&self, tau: &S) -> S {
let mut tmp = tau.pow_vartime(&[self.coeffs.len() as u64]);
tmp.sub_assign(&S::one());
tmp
}
/// The target polynomial is the zero polynomial in our
/// evaluation domain, so we must perform division over
/// a coset.
pub fn divide_by_z_on_coset(&mut self, worker: &Worker) {
let i = self.z(&S::multiplicative_generator()).invert().unwrap();
worker.scope(self.coeffs.len(), |scope, chunk| {
for v in self.coeffs.chunks_mut(chunk) {
scope.spawn(move |_scope| {
for v in v {
v.group_mul_assign(&i);
}
});
}
});
}
/// Perform O(n) multiplication of two polynomials in the domain.
pub fn mul_assign(&mut self, worker: &Worker, other: &EvaluationDomain<S, Scalar<S>>) {
assert_eq!(self.coeffs.len(), other.coeffs.len());
worker.scope(self.coeffs.len(), |scope, chunk| {
for (a, b) in self
.coeffs
.chunks_mut(chunk)
.zip(other.coeffs.chunks(chunk))
{
scope.spawn(move |_scope| {
for (a, b) in a.iter_mut().zip(b.iter()) {
a.group_mul_assign(&b.0);
}
});
}
});
}
/// Perform O(n) subtraction of one polynomial from another in the domain.
pub fn sub_assign(&mut self, worker: &Worker, other: &EvaluationDomain<S, G>) {
assert_eq!(self.coeffs.len(), other.coeffs.len());
worker.scope(self.coeffs.len(), |scope, chunk| {
for (a, b) in self
.coeffs
.chunks_mut(chunk)
.zip(other.coeffs.chunks(chunk))
{
scope.spawn(move |_scope| {
for (a, b) in a.iter_mut().zip(b.iter()) {
a.group_sub_assign(b);
}
});
}
});
}
}
pub trait Group<Scalar: PrimeField>: Sized + Copy + Clone + Send + Sync {
fn group_zero() -> Self;
fn group_mul_assign(&mut self, by: &Scalar);
fn group_add_assign(&mut self, other: &Self);
fn group_sub_assign(&mut self, other: &Self);
}
pub struct Point<G: CofactorCurve>(pub G);
impl<G: CofactorCurve> PartialEq for Point<G> {
fn eq(&self, other: &Point<G>) -> bool {
self.0 == other.0
}
}
impl<G: CofactorCurve> Copy for Point<G> {}
impl<G: CofactorCurve> Clone for Point<G> {
fn clone(&self) -> Point<G> {
*self
}
}
impl<G: CofactorCurve> Group<G::Scalar> for Point<G> {
fn group_zero() -> Self {
Point(G::identity())
}
fn group_mul_assign(&mut self, by: &G::Scalar) {
self.0.mul_assign(by);
}
fn group_add_assign(&mut self, other: &Self) {
self.0.add_assign(&other.0);
}
fn group_sub_assign(&mut self, other: &Self) {
self.0.sub_assign(&other.0);
}
}
pub struct Scalar<S: PrimeField>(pub S);
impl<S: PrimeField> PartialEq for Scalar<S> {
fn eq(&self, other: &Scalar<S>) -> bool {
self.0 == other.0
}
}
impl<S: PrimeField> Copy for Scalar<S> {}
impl<S: PrimeField> Clone for Scalar<S> {
fn clone(&self) -> Scalar<S> {
*self
}
}
impl<S: PrimeField> Group<S> for Scalar<S> {
fn group_zero() -> Self {
Scalar(S::zero())
}
fn group_mul_assign(&mut self, by: &S) {
self.0.mul_assign(by);
}
fn group_add_assign(&mut self, other: &Self) {
self.0.add_assign(&other.0);
}
fn group_sub_assign(&mut self, other: &Self) {
self.0.sub_assign(&other.0);
}
}
fn best_fft<S: PrimeField, T: Group<S>>(a: &mut [T], worker: &Worker, omega: &S, log_n: u32) {
let log_cpus = worker.log_num_threads();
if log_n <= log_cpus {
serial_fft(a, omega, log_n);
} else {
parallel_fft(a, worker, omega, log_n, log_cpus);
}
}
#[allow(clippy::many_single_char_names)]
fn serial_fft<S: PrimeField, T: Group<S>>(a: &mut [T], omega: &S, log_n: u32) {
fn bitreverse(mut n: u32, l: u32) -> u32 {
let mut r = 0;
for _ in 0..l {
r = (r << 1) | (n & 1);
n >>= 1;
}
r
}
let n = a.len() as u32;
assert_eq!(n, 1 << log_n);
for k in 0..n {
let rk = bitreverse(k, log_n);
if k < rk {
a.swap(rk as usize, k as usize);
}
}
let mut m = 1;
for _ in 0..log_n {
let w_m = omega.pow_vartime(&[u64::from(n / (2 * m))]);
let mut k = 0;
while k < n {
let mut w = S::one();
for j in 0..m {
let mut t = a[(k + j + m) as usize];
t.group_mul_assign(&w);
let mut tmp = a[(k + j) as usize];
tmp.group_sub_assign(&t);
a[(k + j + m) as usize] = tmp;
a[(k + j) as usize].group_add_assign(&t);
w.mul_assign(&w_m);
}
k += 2 * m;
}
m *= 2;
}
}
fn parallel_fft<S: PrimeField, T: Group<S>>(
a: &mut [T],
worker: &Worker,
omega: &S,
log_n: u32,
log_cpus: u32,
) {
assert!(log_n >= log_cpus);
let num_cpus = 1 << log_cpus;
let log_new_n = log_n - log_cpus;
let mut tmp = vec![vec![T::group_zero(); 1 << log_new_n]; num_cpus];
let new_omega = omega.pow_vartime(&[num_cpus as u64]);
worker.scope(0, |scope, _| {
let a = &*a;
for (j, tmp) in tmp.iter_mut().enumerate() {
scope.spawn(move |_scope| {
// Shuffle into a sub-FFT
let omega_j = omega.pow_vartime(&[j as u64]);
let omega_step = omega.pow_vartime(&[(j as u64) << log_new_n]);
let mut elt = S::one();
for (i, tmp) in tmp.iter_mut().enumerate() {
for s in 0..num_cpus {
let idx = (i + (s << log_new_n)) % (1 << log_n);
let mut t = a[idx];
t.group_mul_assign(&elt);
tmp.group_add_assign(&t);
elt.mul_assign(&omega_step);
}
elt.mul_assign(&omega_j);
}
// Perform sub-FFT
serial_fft(tmp, &new_omega, log_new_n);
});
}
});
// TODO: does this hurt or help?
worker.scope(a.len(), |scope, chunk| {
let tmp = &tmp;
for (idx, a) in a.chunks_mut(chunk).enumerate() {
scope.spawn(move |_scope| {
let mut idx = idx * chunk;
let mask = (1 << log_cpus) - 1;
for a in a {
*a = tmp[idx & mask][idx >> log_cpus];
idx += 1;
}
});
}
});
}
// Test multiplying various (low degree) polynomials together and
// comparing with naive evaluations.
#[cfg(feature = "pairing")]
#[test]
fn polynomial_arith() {
use bls12_381::Scalar as Fr;
use rand_core::RngCore;
fn test_mul<S: PrimeField, R: RngCore>(mut rng: &mut R) {
let worker = Worker::new();
for coeffs_a in 0..70 {
for coeffs_b in 0..70 {
let mut a: Vec<_> = (0..coeffs_a)
.map(|_| Scalar::<S>(S::random(&mut rng)))
.collect();
let mut b: Vec<_> = (0..coeffs_b)
.map(|_| Scalar::<S>(S::random(&mut rng)))
.collect();
// naive evaluation
let mut naive = vec![Scalar(S::zero()); coeffs_a + coeffs_b];
for (i1, a) in a.iter().enumerate() {
for (i2, b) in b.iter().enumerate() {
let mut prod = *a;
prod.group_mul_assign(&b.0);
naive[i1 + i2].group_add_assign(&prod);
}
}
a.resize(coeffs_a + coeffs_b, Scalar(S::zero()));
b.resize(coeffs_a + coeffs_b, Scalar(S::zero()));
let mut a = EvaluationDomain::from_coeffs(a).unwrap();
let mut b = EvaluationDomain::from_coeffs(b).unwrap();
a.fft(&worker);
b.fft(&worker);
a.mul_assign(&worker, &b);
a.ifft(&worker);
for (naive, fft) in naive.iter().zip(a.coeffs.iter()) {
assert!(naive == fft);
}
}
}
}
let rng = &mut rand::thread_rng();
test_mul::<Fr, _>(rng);
}
#[cfg(feature = "pairing")]
#[test]
fn fft_composition() {
use bls12_381::Scalar as Fr;
use rand_core::RngCore;
fn test_comp<S: PrimeField, R: RngCore>(mut rng: &mut R) {
let worker = Worker::new();
for coeffs in 0..10 {
let coeffs = 1 << coeffs;
let mut v = vec![];
for _ in 0..coeffs {
v.push(Scalar::<S>(S::random(&mut rng)));
}
let mut domain = EvaluationDomain::from_coeffs(v.clone()).unwrap();
domain.ifft(&worker);
domain.fft(&worker);
assert!(v == domain.coeffs);
domain.fft(&worker);
domain.ifft(&worker);
assert!(v == domain.coeffs);
domain.icoset_fft(&worker);
domain.coset_fft(&worker);
assert!(v == domain.coeffs);
domain.coset_fft(&worker);
domain.icoset_fft(&worker);
assert!(v == domain.coeffs);
}
}
let rng = &mut rand::thread_rng();
test_comp::<Fr, _>(rng);
}
#[cfg(feature = "pairing")]
#[test]
fn parallel_fft_consistency() {
use bls12_381::Scalar as Fr;
use rand_core::RngCore;
use std::cmp::min;
fn test_consistency<S: PrimeField, R: RngCore>(mut rng: &mut R) {
let worker = Worker::new();
for _ in 0..5 {
for log_d in 0..10 {
let d = 1 << log_d;
let v1 = (0..d)
.map(|_| Scalar::<S>(S::random(&mut rng)))
.collect::<Vec<_>>();
let mut v1 = EvaluationDomain::from_coeffs(v1).unwrap();
let mut v2 = EvaluationDomain::from_coeffs(v1.coeffs.clone()).unwrap();
for log_cpus in log_d..min(log_d + 1, 3) {
parallel_fft(&mut v1.coeffs, &worker, &v1.omega, log_d, log_cpus);
serial_fft(&mut v2.coeffs, &v2.omega, log_d);
assert!(v1.coeffs == v2.coeffs);
}
}
}
}
let rng = &mut rand::thread_rng();
test_consistency::<Fr, _>(rng);
}

33
third_party/bellman/src/gadgets.rs vendored Normal file
View File

@@ -0,0 +1,33 @@
//! Self-contained sub-circuit implementations for various primitives.
pub mod test;
pub mod blake2s;
pub mod boolean;
pub mod lookup;
pub mod multieq;
pub mod multipack;
pub mod num;
pub mod sha256;
pub mod uint32;
use crate::SynthesisError;
// TODO: This should probably be removed and we
// should use existing helper methods on `Option`
// for mapping with an error.
/// This basically is just an extension to `Option`
/// which allows for a convenient mapping to an
/// error on `None`.
pub trait Assignment<T> {
fn get(&self) -> Result<&T, SynthesisError>;
}
impl<T> Assignment<T> for Option<T> {
fn get(&self) -> Result<&T, SynthesisError> {
match *self {
Some(ref v) => Ok(v),
None => Err(SynthesisError::AssignmentMissing),
}
}
}

View File

@@ -0,0 +1,668 @@
//! The [BLAKE2s] hash function with personalization support.
//!
//! [BLAKE2s]: https://tools.ietf.org/html/rfc7693
use super::{boolean::Boolean, multieq::MultiEq, uint32::UInt32};
use crate::{ConstraintSystem, SynthesisError};
use ff::PrimeField;
/*
2.1. Parameters
The following table summarizes various parameters and their ranges:
| BLAKE2b | BLAKE2s |
--------------+------------------+------------------+
Bits in word | w = 64 | w = 32 |
Rounds in F | r = 12 | r = 10 |
Block bytes | bb = 128 | bb = 64 |
Hash bytes | 1 <= nn <= 64 | 1 <= nn <= 32 |
Key bytes | 0 <= kk <= 64 | 0 <= kk <= 32 |
Input bytes | 0 <= ll < 2**128 | 0 <= ll < 2**64 |
--------------+------------------+------------------+
G Rotation | (R1, R2, R3, R4) | (R1, R2, R3, R4) |
constants = | (32, 24, 16, 63) | (16, 12, 8, 7) |
--------------+------------------+------------------+
*/
const R1: usize = 16;
const R2: usize = 12;
const R3: usize = 8;
const R4: usize = 7;
/*
Round | 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 |
----------+-------------------------------------------------+
SIGMA[0] | 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 |
SIGMA[1] | 14 10 4 8 9 15 13 6 1 12 0 2 11 7 5 3 |
SIGMA[2] | 11 8 12 0 5 2 15 13 10 14 3 6 7 1 9 4 |
SIGMA[3] | 7 9 3 1 13 12 11 14 2 6 5 10 4 0 15 8 |
SIGMA[4] | 9 0 5 7 2 4 10 15 14 1 11 12 6 8 3 13 |
SIGMA[5] | 2 12 6 10 0 11 8 3 4 13 7 5 15 14 1 9 |
SIGMA[6] | 12 5 1 15 14 13 4 10 0 7 6 3 9 2 8 11 |
SIGMA[7] | 13 11 7 14 12 1 3 9 5 0 15 4 8 6 2 10 |
SIGMA[8] | 6 15 14 9 11 3 0 8 12 2 13 7 1 4 10 5 |
SIGMA[9] | 10 2 8 4 7 6 1 5 15 11 9 14 3 12 13 0 |
----------+-------------------------------------------------+
*/
const SIGMA: [[usize; 16]; 10] = [
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15],
[14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3],
[11, 8, 12, 0, 5, 2, 15, 13, 10, 14, 3, 6, 7, 1, 9, 4],
[7, 9, 3, 1, 13, 12, 11, 14, 2, 6, 5, 10, 4, 0, 15, 8],
[9, 0, 5, 7, 2, 4, 10, 15, 14, 1, 11, 12, 6, 8, 3, 13],
[2, 12, 6, 10, 0, 11, 8, 3, 4, 13, 7, 5, 15, 14, 1, 9],
[12, 5, 1, 15, 14, 13, 4, 10, 0, 7, 6, 3, 9, 2, 8, 11],
[13, 11, 7, 14, 12, 1, 3, 9, 5, 0, 15, 4, 8, 6, 2, 10],
[6, 15, 14, 9, 11, 3, 0, 8, 12, 2, 13, 7, 1, 4, 10, 5],
[10, 2, 8, 4, 7, 6, 1, 5, 15, 11, 9, 14, 3, 12, 13, 0],
];
/*
3.1. Mixing Function G
The G primitive function mixes two input words, "x" and "y", into
four words indexed by "a", "b", "c", and "d" in the working vector
v[0..15]. The full modified vector is returned. The rotation
constants (R1, R2, R3, R4) are given in Section 2.1.
FUNCTION G( v[0..15], a, b, c, d, x, y )
|
| v[a] := (v[a] + v[b] + x) mod 2**w
| v[d] := (v[d] ^ v[a]) >>> R1
| v[c] := (v[c] + v[d]) mod 2**w
| v[b] := (v[b] ^ v[c]) >>> R2
| v[a] := (v[a] + v[b] + y) mod 2**w
| v[d] := (v[d] ^ v[a]) >>> R3
| v[c] := (v[c] + v[d]) mod 2**w
| v[b] := (v[b] ^ v[c]) >>> R4
|
| RETURN v[0..15]
|
END FUNCTION.
*/
#[allow(clippy::many_single_char_names)]
fn mixing_g<Scalar: PrimeField, CS: ConstraintSystem<Scalar>, M>(
mut cs: M,
v: &mut [UInt32],
(a, b, c, d): (usize, usize, usize, usize),
x: &UInt32,
y: &UInt32,
) -> Result<(), SynthesisError>
where
M: ConstraintSystem<Scalar, Root = MultiEq<Scalar, CS>>,
{
v[a] = UInt32::addmany(
cs.namespace(|| "mixing step 1"),
&[v[a].clone(), v[b].clone(), x.clone()],
)?;
v[d] = v[d].xor(cs.namespace(|| "mixing step 2"), &v[a])?.rotr(R1);
v[c] = UInt32::addmany(
cs.namespace(|| "mixing step 3"),
&[v[c].clone(), v[d].clone()],
)?;
v[b] = v[b].xor(cs.namespace(|| "mixing step 4"), &v[c])?.rotr(R2);
v[a] = UInt32::addmany(
cs.namespace(|| "mixing step 5"),
&[v[a].clone(), v[b].clone(), y.clone()],
)?;
v[d] = v[d].xor(cs.namespace(|| "mixing step 6"), &v[a])?.rotr(R3);
v[c] = UInt32::addmany(
cs.namespace(|| "mixing step 7"),
&[v[c].clone(), v[d].clone()],
)?;
v[b] = v[b].xor(cs.namespace(|| "mixing step 8"), &v[c])?.rotr(R4);
Ok(())
}
/*
3.2. Compression Function F
Compression function F takes as an argument the state vector "h",
message block vector "m" (last block is padded with zeros to full
block size, if required), 2w-bit offset counter "t", and final block
indicator flag "f". Local vector v[0..15] is used in processing. F
returns a new state vector. The number of rounds, "r", is 12 for
BLAKE2b and 10 for BLAKE2s. Rounds are numbered from 0 to r - 1.
FUNCTION F( h[0..7], m[0..15], t, f )
|
| // Initialize local work vector v[0..15]
| v[0..7] := h[0..7] // First half from state.
| v[8..15] := IV[0..7] // Second half from IV.
|
| v[12] := v[12] ^ (t mod 2**w) // Low word of the offset.
| v[13] := v[13] ^ (t >> w) // High word.
|
| IF f = TRUE THEN // last block flag?
| | v[14] := v[14] ^ 0xFF..FF // Invert all bits.
| END IF.
|
| // Cryptographic mixing
| FOR i = 0 TO r - 1 DO // Ten or twelve rounds.
| |
| | // Message word selection permutation for this round.
| | s[0..15] := SIGMA[i mod 10][0..15]
| |
| | v := G( v, 0, 4, 8, 12, m[s[ 0]], m[s[ 1]] )
| | v := G( v, 1, 5, 9, 13, m[s[ 2]], m[s[ 3]] )
| | v := G( v, 2, 6, 10, 14, m[s[ 4]], m[s[ 5]] )
| | v := G( v, 3, 7, 11, 15, m[s[ 6]], m[s[ 7]] )
| |
| | v := G( v, 0, 5, 10, 15, m[s[ 8]], m[s[ 9]] )
| | v := G( v, 1, 6, 11, 12, m[s[10]], m[s[11]] )
| | v := G( v, 2, 7, 8, 13, m[s[12]], m[s[13]] )
| | v := G( v, 3, 4, 9, 14, m[s[14]], m[s[15]] )
| |
| END FOR
|
| FOR i = 0 TO 7 DO // XOR the two halves.
| | h[i] := h[i] ^ v[i] ^ v[i + 8]
| END FOR.
|
| RETURN h[0..7] // New state.
|
END FUNCTION.
*/
#[allow(clippy::many_single_char_names)]
fn blake2s_compression<Scalar: PrimeField, CS: ConstraintSystem<Scalar>>(
mut cs: CS,
h: &mut [UInt32],
m: &[UInt32],
t: u64,
f: bool,
) -> Result<(), SynthesisError> {
assert_eq!(h.len(), 8);
assert_eq!(m.len(), 16);
/*
static const uint32_t blake2s_iv[8] =
{
0x6A09E667, 0xBB67AE85, 0x3C6EF372, 0xA54FF53A,
0x510E527F, 0x9B05688C, 0x1F83D9AB, 0x5BE0CD19
};
*/
let mut v = Vec::with_capacity(16);
v.extend_from_slice(h);
v.push(UInt32::constant(0x6A09E667));
v.push(UInt32::constant(0xBB67AE85));
v.push(UInt32::constant(0x3C6EF372));
v.push(UInt32::constant(0xA54FF53A));
v.push(UInt32::constant(0x510E527F));
v.push(UInt32::constant(0x9B05688C));
v.push(UInt32::constant(0x1F83D9AB));
v.push(UInt32::constant(0x5BE0CD19));
assert_eq!(v.len(), 16);
v[12] = v[12].xor(cs.namespace(|| "first xor"), &UInt32::constant(t as u32))?;
v[13] = v[13].xor(
cs.namespace(|| "second xor"),
&UInt32::constant((t >> 32) as u32),
)?;
if f {
v[14] = v[14].xor(
cs.namespace(|| "third xor"),
&UInt32::constant(u32::max_value()),
)?;
}
{
let mut cs = MultiEq::new(&mut cs);
for i in 0..10 {
let mut cs = cs.namespace(|| format!("round {}", i));
let s = SIGMA[i % 10];
mixing_g(
cs.namespace(|| "mixing invocation 1"),
&mut v,
(0, 4, 8, 12),
&m[s[0]],
&m[s[1]],
)?;
mixing_g(
cs.namespace(|| "mixing invocation 2"),
&mut v,
(1, 5, 9, 13),
&m[s[2]],
&m[s[3]],
)?;
mixing_g(
cs.namespace(|| "mixing invocation 3"),
&mut v,
(2, 6, 10, 14),
&m[s[4]],
&m[s[5]],
)?;
mixing_g(
cs.namespace(|| "mixing invocation 4"),
&mut v,
(3, 7, 11, 15),
&m[s[6]],
&m[s[7]],
)?;
mixing_g(
cs.namespace(|| "mixing invocation 5"),
&mut v,
(0, 5, 10, 15),
&m[s[8]],
&m[s[9]],
)?;
mixing_g(
cs.namespace(|| "mixing invocation 6"),
&mut v,
(1, 6, 11, 12),
&m[s[10]],
&m[s[11]],
)?;
mixing_g(
cs.namespace(|| "mixing invocation 7"),
&mut v,
(2, 7, 8, 13),
&m[s[12]],
&m[s[13]],
)?;
mixing_g(
cs.namespace(|| "mixing invocation 8"),
&mut v,
(3, 4, 9, 14),
&m[s[14]],
&m[s[15]],
)?;
}
}
for i in 0..8 {
let mut cs = cs.namespace(|| format!("h[{i}] ^ v[{i}] ^ v[{i} + 8]", i = i));
h[i] = h[i].xor(cs.namespace(|| "first xor"), &v[i])?;
h[i] = h[i].xor(cs.namespace(|| "second xor"), &v[i + 8])?;
}
Ok(())
}
/*
FUNCTION BLAKE2( d[0..dd-1], ll, kk, nn )
|
| h[0..7] := IV[0..7] // Initialization Vector.
|
| // Parameter block p[0]
| h[0] := h[0] ^ 0x01010000 ^ (kk << 8) ^ nn
|
| // Process padded key and data blocks
| IF dd > 1 THEN
| | FOR i = 0 TO dd - 2 DO
| | | h := F( h, d[i], (i + 1) * bb, FALSE )
| | END FOR.
| END IF.
|
| // Final block.
| IF kk = 0 THEN
| | h := F( h, d[dd - 1], ll, TRUE )
| ELSE
| | h := F( h, d[dd - 1], ll + bb, TRUE )
| END IF.
|
| RETURN first "nn" bytes from little-endian word array h[].
|
END FUNCTION.
*/
pub fn blake2s<Scalar: PrimeField, CS: ConstraintSystem<Scalar>>(
mut cs: CS,
input: &[Boolean],
personalization: &[u8],
) -> Result<Vec<Boolean>, SynthesisError> {
use byteorder::{ByteOrder, LittleEndian};
assert_eq!(personalization.len(), 8);
assert!(input.len() % 8 == 0);
let mut h = vec![
UInt32::constant(0x6A09E667 ^ 0x01010000 ^ 32),
UInt32::constant(0xBB67AE85),
UInt32::constant(0x3C6EF372),
UInt32::constant(0xA54FF53A),
UInt32::constant(0x510E527F),
UInt32::constant(0x9B05688C),
// Personalization is stored here
UInt32::constant(0x1F83D9AB ^ LittleEndian::read_u32(&personalization[0..4])),
UInt32::constant(0x5BE0CD19 ^ LittleEndian::read_u32(&personalization[4..8])),
];
let mut blocks: Vec<Vec<UInt32>> = vec![];
for block in input.chunks(512) {
let mut this_block = Vec::with_capacity(16);
for word in block.chunks(32) {
let mut tmp = word.to_vec();
while tmp.len() < 32 {
tmp.push(Boolean::constant(false));
}
this_block.push(UInt32::from_bits(&tmp));
}
while this_block.len() < 16 {
this_block.push(UInt32::constant(0));
}
blocks.push(this_block);
}
if blocks.is_empty() {
blocks.push((0..16).map(|_| UInt32::constant(0)).collect());
}
for (i, block) in blocks[0..blocks.len() - 1].iter().enumerate() {
let cs = cs.namespace(|| format!("block {}", i));
blake2s_compression(cs, &mut h, block, ((i as u64) + 1) * 64, false)?;
}
{
let cs = cs.namespace(|| "final block");
blake2s_compression(
cs,
&mut h,
&blocks[blocks.len() - 1],
(input.len() / 8) as u64,
true,
)?;
}
Ok(h.into_iter().flat_map(|b| b.into_bits()).collect())
}
#[cfg(test)]
mod test {
use blake2s_simd::Params as Blake2sParams;
use bls12_381::Scalar;
use hex_literal::hex;
use rand_core::{RngCore, SeedableRng};
use rand_xorshift::XorShiftRng;
use super::blake2s;
use crate::gadgets::boolean::{AllocatedBit, Boolean};
use crate::gadgets::test::TestConstraintSystem;
use crate::ConstraintSystem;
#[test]
fn test_blank_hash() {
let mut cs = TestConstraintSystem::<Scalar>::new();
let input_bits = vec![];
let out = blake2s(&mut cs, &input_bits, b"12345678").unwrap();
assert!(cs.is_satisfied());
assert_eq!(cs.num_constraints(), 0);
// >>> import blake2s from hashlib
// >>> h = blake2s(digest_size=32, person=b'12345678')
// >>> h.hexdigest()
let expected = hex!("c59f682376d137f3f255e671e207d1f2374ebe504e9314208a52d9f88d69e8c8");
let mut out = out.into_iter();
for b in expected.iter() {
for i in 0..8 {
let c = out.next().unwrap().get_value().unwrap();
assert_eq!(c, (b >> i) & 1u8 == 1u8);
}
}
}
#[test]
fn test_blake2s_constraints() {
let mut cs = TestConstraintSystem::<Scalar>::new();
let input_bits: Vec<_> = (0..512)
.map(|i| {
AllocatedBit::alloc(cs.namespace(|| format!("input bit {}", i)), Some(true))
.unwrap()
.into()
})
.collect();
blake2s(&mut cs, &input_bits, b"12345678").unwrap();
assert!(cs.is_satisfied());
assert_eq!(cs.num_constraints(), 21518);
}
#[test]
fn test_blake2s_precomp_constraints() {
// Test that 512 fixed leading bits (constants)
// doesn't result in more constraints.
let mut cs = TestConstraintSystem::<Scalar>::new();
let mut rng = XorShiftRng::from_seed([
0x59, 0x62, 0xbe, 0x5d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06,
0xbc, 0xe5,
]);
let input_bits: Vec<_> = (0..512)
.map(|_| Boolean::constant(rng.next_u32() % 2 != 0))
.chain((0..512).map(|i| {
AllocatedBit::alloc(cs.namespace(|| format!("input bit {}", i)), Some(true))
.unwrap()
.into()
}))
.collect();
blake2s(&mut cs, &input_bits, b"12345678").unwrap();
assert!(cs.is_satisfied());
assert_eq!(cs.num_constraints(), 21518);
}
#[test]
fn test_blake2s_constant_constraints() {
let mut cs = TestConstraintSystem::<Scalar>::new();
let mut rng = XorShiftRng::from_seed([
0x59, 0x62, 0xbe, 0x5d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06,
0xbc, 0xe5,
]);
let input_bits: Vec<_> = (0..512)
.map(|_| Boolean::constant(rng.next_u32() % 2 != 0))
.collect();
blake2s(&mut cs, &input_bits, b"12345678").unwrap();
assert_eq!(cs.num_constraints(), 0);
}
#[test]
fn test_blake2s() {
let mut rng = XorShiftRng::from_seed([
0x59, 0x62, 0xbe, 0x5d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06,
0xbc, 0xe5,
]);
for input_len in (0..32).chain((32..256).filter(|a| a % 8 == 0)) {
let mut h = Blake2sParams::new()
.hash_length(32)
.personal(b"12345678")
.to_state();
let data: Vec<u8> = (0..input_len).map(|_| rng.next_u32() as u8).collect();
h.update(&data);
let hash_result = h.finalize();
let mut cs = TestConstraintSystem::<Scalar>::new();
let mut input_bits = vec![];
for (byte_i, input_byte) in data.into_iter().enumerate() {
for bit_i in 0..8 {
let cs = cs.namespace(|| format!("input bit {} {}", byte_i, bit_i));
input_bits.push(
AllocatedBit::alloc(cs, Some((input_byte >> bit_i) & 1u8 == 1u8))
.unwrap()
.into(),
);
}
}
let r = blake2s(&mut cs, &input_bits, b"12345678").unwrap();
assert!(cs.is_satisfied());
let mut s = hash_result
.as_ref()
.iter()
.flat_map(|&byte| (0..8).map(move |i| (byte >> i) & 1u8 == 1u8));
for b in r {
match b {
Boolean::Is(b) => {
assert!(s.next().unwrap() == b.get_value().unwrap());
}
Boolean::Not(b) => {
assert!(s.next().unwrap() != b.get_value().unwrap());
}
Boolean::Constant(b) => {
assert!(input_len == 0);
assert!(s.next().unwrap() == b);
}
}
}
}
}
#[test]
fn test_blake2s_256_vars() {
let data: Vec<u8> = hex!("be9f9c485e670acce8b1516a378176161b20583637b6f1c536fbc1158a0a3296831df2920e57a442d5738f4be4dd6be89dd7913fc8b4d1c0a815646a4d674b77f7caf313bd880bf759fcac27037c48c2b2a20acd2fd5248e3be426c84a341c0a3c63eaf36e0d537d10b8db5c6e4c801832c41eb1a3ed602177acded8b4b803bd34339d99a18b71df399641cc8dfae2ad193fcd74b5913e704551777160d14c78f2e8d5c32716a8599c1080cb89a40ccd6ba596694a8b4a065d9f2d0667ef423ed2e418093caff884540858b4f4b62acd47edcea880523e1b1cda8eb225c128c2e9e83f14f6e7448c5733a195cac7d79a53dde5083172462c45b2f799e42af1c9").to_vec();
assert_eq!(data.len(), 256);
let mut cs = TestConstraintSystem::<Scalar>::new();
let mut input_bits = vec![];
for (byte_i, input_byte) in data.into_iter().enumerate() {
for bit_i in 0..8 {
let cs = cs.namespace(|| format!("input bit {} {}", byte_i, bit_i));
input_bits.push(
AllocatedBit::alloc(cs, Some((input_byte >> bit_i) & 1u8 == 1u8))
.unwrap()
.into(),
);
}
}
let r = blake2s(&mut cs, &input_bits, b"12345678").unwrap();
assert!(cs.is_satisfied());
let expected = hex!("0af5695115ced92c8a0341e43869209636e9aa6472e4576f0f2b996cf812b30e");
let mut out = r.into_iter();
for b in expected.iter() {
for i in 0..8 {
let c = out.next().unwrap().get_value().unwrap();
assert_eq!(c, (b >> i) & 1u8 == 1u8);
}
}
}
#[test]
fn test_blake2s_700_vars() {
let data: Vec<u8> = hex!("5dcfe8bab4c758d2eb1ddb7ef337583e0df3e2c358e1755b7cd303a658de9a1227eed1d1114179a5c3c38d692ff2cf2d4e5c92a9516de750106774bbf9f7d063f707f4c9b6a02c0a77e4feb99e036c3ccaee7d1a31cb144093aa074bc9da608f8ff30b39c3c60e4a243cc0bbd406d1262a7d6607b31c60275c6bcc8b0ac49a06a4b629a98693c5f7640f3bca45e4977cfabc5b17f52838af3433b1fd407dbbdc131e8e4bd58bcee85bbab4b57b656c6a2ec6cf852525bc8423675e2bf29159139cd5df99db94719f3f7167230e0d5bd76f6d7891b656732cef9c3c0d48a5fa3d7a879988157b39015a85451b25af0301ca5e759ac35fea79dca38c673ec6db9f3885d9103e2dcb3304bd3d59b0b1d01babc97ef8a74d91b6ab6bf50f29eb5adf7250a28fd85db37bff0133193635da69caeefc72979cf3bef1d2896d847eea7e8a81e0927893dbd010feb6fb845d0399007d9a148a0596d86cd8f4192631f975c560f4de8da5f712c161342063af3c11029d93d6df7ff46db48343499de9ec4786cac059c4025ef418c9fe40132428ff8b91259d71d1709ff066add84ae944b45a817f60b4c1bf719e39ae23e9b413469db2310793e9137cf38741e5dd2a3c138a566dbde1950c00071b20ac457b46ba9b0a7ebdddcc212bd228d2a4c4146a970e54158477247c27871af1564b176576e9fd43bf63740bf77434bc4ea3b1a4b430e1a11714bf43160145578a575c3f78ddeaa48de97f73460f26f8df2b5d63e31800100d16bc27160fea5ced5a977ef541cfe8dadc7b3991ed1c0d4f16a3076bbfed96ba3e155113e794987af8abb133f06feefabc2ac32eb4d4d4ba1541ca08b9e518d2e74b7f946b0cbd2663d58c689359b9a565821acc619011233d1011963fa302cde34fc9c5ba2e03eeb2512f547391e940d56218e22ae325f2dfa38d4bae35744ee707aa5dc9c17674025d15390a08f5c452343546ef6da0f7").to_vec();
assert_eq!(data.len(), 700);
let mut cs = TestConstraintSystem::<Scalar>::new();
let mut input_bits = vec![];
for (byte_i, input_byte) in data.into_iter().enumerate() {
for bit_i in 0..8 {
let cs = cs.namespace(|| format!("input bit {} {}", byte_i, bit_i));
input_bits.push(
AllocatedBit::alloc(cs, Some((input_byte >> bit_i) & 1u8 == 1u8))
.unwrap()
.into(),
);
}
}
let r = blake2s(&mut cs, &input_bits, b"12345678").unwrap();
assert!(cs.is_satisfied());
let expected = hex!("2ab8f0683167ba220eef19dccf4f9b1a8193cc09b35e0235842323950530f18a");
let mut out = r.into_iter();
for b in expected.iter() {
for i in 0..8 {
let c = out.next().unwrap().get_value().unwrap();
assert_eq!(c, (b >> i) & 1u8 == 1u8);
}
}
}
#[test]
fn test_blake2s_test_vectors() {
let mut rng = XorShiftRng::from_seed([
0x59, 0x62, 0xbe, 0x5d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06,
0xbc, 0xe5,
]);
let expecteds = [
hex!("a1309e334376c8f36a736a4ab0e691ef931ee3ebdb9ea96187127136fea622a1"),
hex!("82fefff60f265cea255252f7c194a7f93965dffee0609ef74eb67f0d76cd41c6"),
];
for expected in &expecteds {
let mut h = Blake2sParams::new()
.hash_length(32)
.personal(b"12345678")
.to_state();
let input_len = 1024;
let data: Vec<u8> = (0..input_len).map(|_| rng.next_u32() as u8).collect();
h.update(&data);
let hash_result = h.finalize();
let mut cs = TestConstraintSystem::<Scalar>::new();
let mut input_bits = vec![];
for (byte_i, input_byte) in data.into_iter().enumerate() {
for bit_i in 0..8 {
let cs = cs.namespace(|| format!("input bit {} {}", byte_i, bit_i));
input_bits.push(
AllocatedBit::alloc(cs, Some((input_byte >> bit_i) & 1u8 == 1u8))
.unwrap()
.into(),
);
}
}
let r = blake2s(&mut cs, &input_bits, b"12345678").unwrap();
assert!(cs.is_satisfied());
let mut s = hash_result
.as_ref()
.iter()
.flat_map(|&byte| (0..8).map(move |i| (byte >> i) & 1u8 == 1u8));
for b in r {
match b {
Boolean::Is(b) => {
assert!(s.next().unwrap() == b.get_value().unwrap());
}
Boolean::Not(b) => {
assert!(s.next().unwrap() != b.get_value().unwrap());
}
Boolean::Constant(b) => {
assert!(input_len == 0);
assert!(s.next().unwrap() == b);
}
}
}
assert_eq!(expected, hash_result.as_bytes());
}
}
}

1822
third_party/bellman/src/gadgets/boolean.rs vendored Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,319 @@
//! Window table lookup gadgets.
use ff::PrimeField;
use super::boolean::Boolean;
use super::num::{AllocatedNum, Num};
use super::*;
use crate::ConstraintSystem;
// Synthesize the constants for each base pattern.
fn synth<'a, Scalar: PrimeField, I>(window_size: usize, constants: I, assignment: &mut [Scalar])
where
I: IntoIterator<Item = &'a Scalar>,
{
assert_eq!(assignment.len(), 1 << window_size);
for (i, constant) in constants.into_iter().enumerate() {
let mut cur = assignment[i].neg();
cur.add_assign(constant);
assignment[i] = cur;
for (j, eval) in assignment.iter_mut().enumerate().skip(i + 1) {
if j & i == i {
eval.add_assign(&cur);
}
}
}
}
/// Performs a 3-bit window table lookup. `bits` is in
/// little-endian order.
pub fn lookup3_xy<Scalar: PrimeField, CS>(
mut cs: CS,
bits: &[Boolean],
coords: &[(Scalar, Scalar)],
) -> Result<(AllocatedNum<Scalar>, AllocatedNum<Scalar>), SynthesisError>
where
CS: ConstraintSystem<Scalar>,
{
assert_eq!(bits.len(), 3);
assert_eq!(coords.len(), 8);
// Calculate the index into `coords`
let i = match (
bits[0].get_value(),
bits[1].get_value(),
bits[2].get_value(),
) {
(Some(a_value), Some(b_value), Some(c_value)) => {
let mut tmp = 0;
if a_value {
tmp += 1;
}
if b_value {
tmp += 2;
}
if c_value {
tmp += 4;
}
Some(tmp)
}
_ => None,
};
// Allocate the x-coordinate resulting from the lookup
let res_x = AllocatedNum::alloc(cs.namespace(|| "x"), || Ok(coords[*i.get()?].0))?;
// Allocate the y-coordinate resulting from the lookup
let res_y = AllocatedNum::alloc(cs.namespace(|| "y"), || Ok(coords[*i.get()?].1))?;
// Compute the coefficients for the lookup constraints
let mut x_coeffs = [Scalar::zero(); 8];
let mut y_coeffs = [Scalar::zero(); 8];
synth::<Scalar, _>(3, coords.iter().map(|c| &c.0), &mut x_coeffs);
synth::<Scalar, _>(3, coords.iter().map(|c| &c.1), &mut y_coeffs);
let precomp = Boolean::and(cs.namespace(|| "precomp"), &bits[1], &bits[2])?;
let one = CS::one();
cs.enforce(
|| "x-coordinate lookup",
|lc| {
lc + (x_coeffs[0b001], one)
+ &bits[1].lc::<Scalar>(one, x_coeffs[0b011])
+ &bits[2].lc::<Scalar>(one, x_coeffs[0b101])
+ &precomp.lc::<Scalar>(one, x_coeffs[0b111])
},
|lc| lc + &bits[0].lc::<Scalar>(one, Scalar::one()),
|lc| {
lc + res_x.get_variable()
- (x_coeffs[0b000], one)
- &bits[1].lc::<Scalar>(one, x_coeffs[0b010])
- &bits[2].lc::<Scalar>(one, x_coeffs[0b100])
- &precomp.lc::<Scalar>(one, x_coeffs[0b110])
},
);
cs.enforce(
|| "y-coordinate lookup",
|lc| {
lc + (y_coeffs[0b001], one)
+ &bits[1].lc::<Scalar>(one, y_coeffs[0b011])
+ &bits[2].lc::<Scalar>(one, y_coeffs[0b101])
+ &precomp.lc::<Scalar>(one, y_coeffs[0b111])
},
|lc| lc + &bits[0].lc::<Scalar>(one, Scalar::one()),
|lc| {
lc + res_y.get_variable()
- (y_coeffs[0b000], one)
- &bits[1].lc::<Scalar>(one, y_coeffs[0b010])
- &bits[2].lc::<Scalar>(one, y_coeffs[0b100])
- &precomp.lc::<Scalar>(one, y_coeffs[0b110])
},
);
Ok((res_x, res_y))
}
/// Performs a 3-bit window table lookup, where
/// one of the bits is a sign bit.
pub fn lookup3_xy_with_conditional_negation<Scalar: PrimeField, CS>(
mut cs: CS,
bits: &[Boolean],
coords: &[(Scalar, Scalar)],
) -> Result<(Num<Scalar>, Num<Scalar>), SynthesisError>
where
CS: ConstraintSystem<Scalar>,
{
assert_eq!(bits.len(), 3);
assert_eq!(coords.len(), 4);
// Calculate the index into `coords`
let i = match (bits[0].get_value(), bits[1].get_value()) {
(Some(a_value), Some(b_value)) => {
let mut tmp = 0;
if a_value {
tmp += 1;
}
if b_value {
tmp += 2;
}
Some(tmp)
}
_ => None,
};
// Allocate the y-coordinate resulting from the lookup
// and conditional negation
let y = AllocatedNum::alloc(cs.namespace(|| "y"), || {
let mut tmp = coords[*i.get()?].1;
if *bits[2].get_value().get()? {
tmp = tmp.neg();
}
Ok(tmp)
})?;
let one = CS::one();
// Compute the coefficients for the lookup constraints
let mut x_coeffs = [Scalar::zero(); 4];
let mut y_coeffs = [Scalar::zero(); 4];
synth::<Scalar, _>(2, coords.iter().map(|c| &c.0), &mut x_coeffs);
synth::<Scalar, _>(2, coords.iter().map(|c| &c.1), &mut y_coeffs);
let precomp = Boolean::and(cs.namespace(|| "precomp"), &bits[0], &bits[1])?;
let x = Num::zero()
.add_bool_with_coeff(one, &Boolean::constant(true), x_coeffs[0b00])
.add_bool_with_coeff(one, &bits[0], x_coeffs[0b01])
.add_bool_with_coeff(one, &bits[1], x_coeffs[0b10])
.add_bool_with_coeff(one, &precomp, x_coeffs[0b11]);
let y_lc = precomp.lc::<Scalar>(one, y_coeffs[0b11])
+ &bits[1].lc::<Scalar>(one, y_coeffs[0b10])
+ &bits[0].lc::<Scalar>(one, y_coeffs[0b01])
+ (y_coeffs[0b00], one);
cs.enforce(
|| "y-coordinate lookup",
|lc| lc + &y_lc + &y_lc,
|lc| lc + &bits[2].lc::<Scalar>(one, Scalar::one()),
|lc| lc + &y_lc - y.get_variable(),
);
Ok((x, y.into()))
}
#[cfg(test)]
mod test {
use super::*;
use crate::gadgets::boolean::{AllocatedBit, Boolean};
use crate::gadgets::test::*;
use bls12_381::Scalar;
use ff::Field;
use rand_core::{RngCore, SeedableRng};
use rand_xorshift::XorShiftRng;
use std::ops::{AddAssign, Neg};
#[test]
fn test_lookup3_xy() {
let mut rng = XorShiftRng::from_seed([
0x59, 0x62, 0xbe, 0x3d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06,
0xbc, 0xe5,
]);
for _ in 0..100 {
let mut cs = TestConstraintSystem::new();
let a_val = rng.next_u32() % 2 != 0;
let a = Boolean::from(AllocatedBit::alloc(cs.namespace(|| "a"), Some(a_val)).unwrap());
let b_val = rng.next_u32() % 2 != 0;
let b = Boolean::from(AllocatedBit::alloc(cs.namespace(|| "b"), Some(b_val)).unwrap());
let c_val = rng.next_u32() % 2 != 0;
let c = Boolean::from(AllocatedBit::alloc(cs.namespace(|| "c"), Some(c_val)).unwrap());
let bits = vec![a, b, c];
let points: Vec<(Scalar, Scalar)> = (0..8)
.map(|_| (Scalar::random(&mut rng), Scalar::random(&mut rng)))
.collect();
let res = lookup3_xy(&mut cs, &bits, &points).unwrap();
assert!(cs.is_satisfied());
let mut index = 0;
if a_val {
index += 1
}
if b_val {
index += 2
}
if c_val {
index += 4
}
assert_eq!(res.0.get_value().unwrap(), points[index].0);
assert_eq!(res.1.get_value().unwrap(), points[index].1);
}
}
#[test]
fn test_lookup3_xy_with_conditional_negation() {
let mut rng = XorShiftRng::from_seed([
0x59, 0x62, 0xbe, 0x3d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06,
0xbc, 0xe5,
]);
for _ in 0..100 {
let mut cs = TestConstraintSystem::new();
let a_val = rng.next_u32() % 2 != 0;
let a = Boolean::from(AllocatedBit::alloc(cs.namespace(|| "a"), Some(a_val)).unwrap());
let b_val = rng.next_u32() % 2 != 0;
let b = Boolean::from(AllocatedBit::alloc(cs.namespace(|| "b"), Some(b_val)).unwrap());
let c_val = rng.next_u32() % 2 != 0;
let c = Boolean::from(AllocatedBit::alloc(cs.namespace(|| "c"), Some(c_val)).unwrap());
let bits = vec![a, b, c];
let points: Vec<(Scalar, Scalar)> = (0..4)
.map(|_| (Scalar::random(&mut rng), Scalar::random(&mut rng)))
.collect();
let res = lookup3_xy_with_conditional_negation(&mut cs, &bits, &points).unwrap();
assert!(cs.is_satisfied());
let mut index = 0;
if a_val {
index += 1
}
if b_val {
index += 2
}
assert_eq!(res.0.get_value().unwrap(), points[index].0);
let mut tmp = points[index].1;
if c_val {
tmp = tmp.neg()
}
assert_eq!(res.1.get_value().unwrap(), tmp);
}
}
#[test]
fn test_synth() {
let mut rng = XorShiftRng::from_seed([
0x59, 0x62, 0xbe, 0x3d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06,
0xbc, 0xe5,
]);
let window_size = 4;
let mut assignment = vec![Scalar::zero(); 1 << window_size];
let constants: Vec<_> = (0..(1 << window_size))
.map(|_| Scalar::random(&mut rng))
.collect();
synth(window_size, &constants, &mut assignment);
for (b, constant) in constants.iter().enumerate() {
let mut acc = Scalar::zero();
for (j, value) in assignment.iter().enumerate() {
if j & b == j {
acc.add_assign(value);
}
}
assert_eq!(&acc, constant);
}
}
}

View File

@@ -0,0 +1,121 @@
use ff::PrimeField;
use crate::{ConstraintSystem, LinearCombination, SynthesisError, Variable};
pub struct MultiEq<Scalar: PrimeField, CS: ConstraintSystem<Scalar>> {
cs: CS,
ops: usize,
bits_used: usize,
lhs: LinearCombination<Scalar>,
rhs: LinearCombination<Scalar>,
}
impl<Scalar: PrimeField, CS: ConstraintSystem<Scalar>> MultiEq<Scalar, CS> {
pub fn new(cs: CS) -> Self {
MultiEq {
cs,
ops: 0,
bits_used: 0,
lhs: LinearCombination::zero(),
rhs: LinearCombination::zero(),
}
}
fn accumulate(&mut self) {
let ops = self.ops;
let lhs = self.lhs.clone();
let rhs = self.rhs.clone();
self.cs.enforce(
|| format!("multieq {}", ops),
|_| lhs,
|lc| lc + CS::one(),
|_| rhs,
);
self.lhs = LinearCombination::zero();
self.rhs = LinearCombination::zero();
self.bits_used = 0;
self.ops += 1;
}
pub fn enforce_equal(
&mut self,
num_bits: usize,
lhs: &LinearCombination<Scalar>,
rhs: &LinearCombination<Scalar>,
) {
// Check if we will exceed the capacity
if (Scalar::CAPACITY as usize) <= (self.bits_used + num_bits) {
self.accumulate();
}
assert!((Scalar::CAPACITY as usize) > (self.bits_used + num_bits));
let coeff = Scalar::from(2).pow_vartime(&[self.bits_used as u64]);
self.lhs = self.lhs.clone() + (coeff, lhs);
self.rhs = self.rhs.clone() + (coeff, rhs);
self.bits_used += num_bits;
}
}
impl<Scalar: PrimeField, CS: ConstraintSystem<Scalar>> Drop for MultiEq<Scalar, CS> {
fn drop(&mut self) {
if self.bits_used > 0 {
self.accumulate();
}
}
}
impl<Scalar: PrimeField, CS: ConstraintSystem<Scalar>> ConstraintSystem<Scalar>
for MultiEq<Scalar, CS>
{
type Root = Self;
fn one() -> Variable {
CS::one()
}
fn alloc<F, A, AR>(&mut self, annotation: A, f: F) -> Result<Variable, SynthesisError>
where
F: FnOnce() -> Result<Scalar, SynthesisError>,
A: FnOnce() -> AR,
AR: Into<String>,
{
self.cs.alloc(annotation, f)
}
fn alloc_input<F, A, AR>(&mut self, annotation: A, f: F) -> Result<Variable, SynthesisError>
where
F: FnOnce() -> Result<Scalar, SynthesisError>,
A: FnOnce() -> AR,
AR: Into<String>,
{
self.cs.alloc_input(annotation, f)
}
fn enforce<A, AR, LA, LB, LC>(&mut self, annotation: A, a: LA, b: LB, c: LC)
where
A: FnOnce() -> AR,
AR: Into<String>,
LA: FnOnce(LinearCombination<Scalar>) -> LinearCombination<Scalar>,
LB: FnOnce(LinearCombination<Scalar>) -> LinearCombination<Scalar>,
LC: FnOnce(LinearCombination<Scalar>) -> LinearCombination<Scalar>,
{
self.cs.enforce(annotation, a, b, c)
}
fn push_namespace<NR, N>(&mut self, name_fn: N)
where
NR: Into<String>,
N: FnOnce() -> NR,
{
self.cs.get_root().push_namespace(name_fn)
}
fn pop_namespace(&mut self) {
self.cs.get_root().pop_namespace()
}
fn get_root(&mut self) -> &mut Self::Root {
self
}
}

View File

@@ -0,0 +1,111 @@
//! Helpers for packing vectors of bits into scalar field elements.
use super::boolean::Boolean;
use super::num::Num;
use super::Assignment;
use crate::{ConstraintSystem, SynthesisError};
use ff::PrimeField;
/// Takes a sequence of booleans and exposes them as compact
/// public inputs
pub fn pack_into_inputs<Scalar, CS>(mut cs: CS, bits: &[Boolean]) -> Result<(), SynthesisError>
where
Scalar: PrimeField,
CS: ConstraintSystem<Scalar>,
{
for (i, bits) in bits.chunks(Scalar::CAPACITY as usize).enumerate() {
let mut num = Num::<Scalar>::zero();
let mut coeff = Scalar::one();
for bit in bits {
num = num.add_bool_with_coeff(CS::one(), bit, coeff);
coeff = coeff.double();
}
let input = cs.alloc_input(|| format!("input {}", i), || Ok(*num.get_value().get()?))?;
// num * 1 = input
cs.enforce(
|| format!("packing constraint {}", i),
|_| num.lc(Scalar::one()),
|lc| lc + CS::one(),
|lc| lc + input,
);
}
Ok(())
}
pub fn bytes_to_bits(bytes: &[u8]) -> Vec<bool> {
bytes
.iter()
.flat_map(|&v| (0..8).rev().map(move |i| (v >> i) & 1 == 1))
.collect()
}
pub fn bytes_to_bits_le(bytes: &[u8]) -> Vec<bool> {
bytes
.iter()
.flat_map(|&v| (0..8).map(move |i| (v >> i) & 1 == 1))
.collect()
}
pub fn compute_multipacking<Scalar: PrimeField>(bits: &[bool]) -> Vec<Scalar> {
let mut result = vec![];
for bits in bits.chunks(Scalar::CAPACITY as usize) {
let mut cur = Scalar::zero();
let mut coeff = Scalar::one();
for bit in bits {
if *bit {
cur.add_assign(&coeff);
}
coeff = coeff.double();
}
result.push(cur);
}
result
}
#[test]
fn test_multipacking() {
use crate::ConstraintSystem;
use bls12_381::Scalar;
use rand_core::{RngCore, SeedableRng};
use rand_xorshift::XorShiftRng;
use super::boolean::{AllocatedBit, Boolean};
use crate::gadgets::test::*;
let mut rng = XorShiftRng::from_seed([
0x59, 0x62, 0xbe, 0x3d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06, 0xbc,
0xe5,
]);
for num_bits in 0..1500 {
let mut cs = TestConstraintSystem::<Scalar>::new();
let bits: Vec<bool> = (0..num_bits).map(|_| rng.next_u32() % 2 != 0).collect();
let circuit_bits = bits
.iter()
.enumerate()
.map(|(i, &b)| {
Boolean::from(
AllocatedBit::alloc(cs.namespace(|| format!("bit {}", i)), Some(b)).unwrap(),
)
})
.collect::<Vec<_>>();
let expected_inputs = compute_multipacking(&bits);
pack_into_inputs(cs.namespace(|| "pack"), &circuit_bits).unwrap();
assert!(cs.is_satisfied());
assert!(cs.verify(&expected_inputs));
}
}

600
third_party/bellman/src/gadgets/num.rs vendored Normal file
View File

@@ -0,0 +1,600 @@
//! Gadgets representing numbers in the scalar field of the underlying curve.
use ff::{PrimeField, PrimeFieldBits};
use crate::{ConstraintSystem, LinearCombination, SynthesisError, Variable};
use super::Assignment;
use super::boolean::{self, AllocatedBit, Boolean};
pub struct AllocatedNum<Scalar: PrimeField> {
value: Option<Scalar>,
variable: Variable,
}
impl<Scalar: PrimeField> Clone for AllocatedNum<Scalar> {
fn clone(&self) -> Self {
AllocatedNum {
value: self.value,
variable: self.variable,
}
}
}
impl<Scalar: PrimeField> AllocatedNum<Scalar> {
pub fn alloc<CS, F>(mut cs: CS, value: F) -> Result<Self, SynthesisError>
where
CS: ConstraintSystem<Scalar>,
F: FnOnce() -> Result<Scalar, SynthesisError>,
{
let mut new_value = None;
let var = cs.alloc(
|| "num",
|| {
let tmp = value()?;
new_value = Some(tmp);
Ok(tmp)
},
)?;
Ok(AllocatedNum {
value: new_value,
variable: var,
})
}
pub fn inputize<CS>(&self, mut cs: CS) -> Result<(), SynthesisError>
where
CS: ConstraintSystem<Scalar>,
{
let input = cs.alloc_input(|| "input variable", || Ok(*self.value.get()?))?;
cs.enforce(
|| "enforce input is correct",
|lc| lc + input,
|lc| lc + CS::one(),
|lc| lc + self.variable,
);
Ok(())
}
/// Deconstructs this allocated number into its
/// boolean representation in little-endian bit
/// order, requiring that the representation
/// strictly exists "in the field" (i.e., a
/// congruency is not allowed.)
pub fn to_bits_le_strict<CS>(&self, mut cs: CS) -> Result<Vec<Boolean>, SynthesisError>
where
Scalar: PrimeFieldBits,
CS: ConstraintSystem<Scalar>,
{
pub fn kary_and<Scalar, CS>(
mut cs: CS,
v: &[AllocatedBit],
) -> Result<AllocatedBit, SynthesisError>
where
Scalar: PrimeField,
CS: ConstraintSystem<Scalar>,
{
assert!(!v.is_empty());
// Let's keep this simple for now and just AND them all
// manually
let mut cur = None;
for (i, v) in v.iter().enumerate() {
if cur.is_none() {
cur = Some(v.clone());
} else {
cur = Some(AllocatedBit::and(
cs.namespace(|| format!("and {}", i)),
cur.as_ref().unwrap(),
v,
)?);
}
}
Ok(cur.expect("v.len() > 0"))
}
// We want to ensure that the bit representation of a is
// less than or equal to r - 1.
let a = self.value.map(|e| e.to_le_bits());
let b = (-Scalar::one()).to_le_bits();
// Get the bits of a in big-endian order
let mut a = a.as_ref().map(|e| e.iter().by_vals().rev());
let mut result = vec![];
// Runs of ones in r
let mut last_run = None;
let mut current_run = vec![];
let mut found_one = false;
let mut i = 0;
for b in b.iter().by_vals().rev() {
let a_bit = a.as_mut().map(|e| e.next().unwrap());
// Skip over unset bits at the beginning
found_one |= b;
if !found_one {
// a_bit should also be false
if let Some(e) = a_bit {
assert!(!e);
}
continue;
}
if b {
// This is part of a run of ones. Let's just
// allocate the boolean with the expected value.
let a_bit = AllocatedBit::alloc(cs.namespace(|| format!("bit {}", i)), a_bit)?;
// ... and add it to the current run of ones.
current_run.push(a_bit.clone());
result.push(a_bit);
} else {
if !current_run.is_empty() {
// This is the start of a run of zeros, but we need
// to k-ary AND against `last_run` first.
if last_run.is_some() {
current_run.push(last_run.clone().unwrap());
}
last_run = Some(kary_and(
cs.namespace(|| format!("run ending at {}", i)),
&current_run,
)?);
current_run.truncate(0);
}
// If `last_run` is true, `a` must be false, or it would
// not be in the field.
//
// If `last_run` is false, `a` can be true or false.
let a_bit = AllocatedBit::alloc_conditionally(
cs.namespace(|| format!("bit {}", i)),
a_bit,
last_run.as_ref().expect("char always starts with a one"),
)?;
result.push(a_bit);
}
i += 1;
}
// char is prime, so we'll always end on
// a run of zeros.
assert_eq!(current_run.len(), 0);
// Now, we have `result` in big-endian order.
// However, now we have to unpack self!
let mut lc = LinearCombination::zero();
let mut coeff = Scalar::one();
for bit in result.iter().rev() {
lc = lc + (coeff, bit.get_variable());
coeff = coeff.double();
}
lc = lc - self.variable;
cs.enforce(|| "unpacking constraint", |lc| lc, |lc| lc, |_| lc);
// Convert into booleans, and reverse for little-endian bit order
Ok(result.into_iter().map(Boolean::from).rev().collect())
}
/// Convert the allocated number into its little-endian representation.
/// Note that this does not strongly enforce that the commitment is
/// "in the field."
pub fn to_bits_le<CS>(&self, mut cs: CS) -> Result<Vec<Boolean>, SynthesisError>
where
Scalar: PrimeFieldBits,
CS: ConstraintSystem<Scalar>,
{
let bits = boolean::field_into_allocated_bits_le(&mut cs, self.value)?;
let mut lc = LinearCombination::zero();
let mut coeff = Scalar::one();
for bit in bits.iter() {
lc = lc + (coeff, bit.get_variable());
coeff = coeff.double();
}
lc = lc - self.variable;
cs.enforce(|| "unpacking constraint", |lc| lc, |lc| lc, |_| lc);
Ok(bits.into_iter().map(Boolean::from).collect())
}
pub fn mul<CS>(&self, mut cs: CS, other: &Self) -> Result<Self, SynthesisError>
where
CS: ConstraintSystem<Scalar>,
{
let mut value = None;
let var = cs.alloc(
|| "product num",
|| {
let mut tmp = *self.value.get()?;
tmp.mul_assign(other.value.get()?);
value = Some(tmp);
Ok(tmp)
},
)?;
// Constrain: a * b = ab
cs.enforce(
|| "multiplication constraint",
|lc| lc + self.variable,
|lc| lc + other.variable,
|lc| lc + var,
);
Ok(AllocatedNum {
value,
variable: var,
})
}
pub fn square<CS>(&self, mut cs: CS) -> Result<Self, SynthesisError>
where
CS: ConstraintSystem<Scalar>,
{
let mut value = None;
let var = cs.alloc(
|| "squared num",
|| {
let tmp = self.value.get()?.square();
value = Some(tmp);
Ok(tmp)
},
)?;
// Constrain: a * a = aa
cs.enforce(
|| "squaring constraint",
|lc| lc + self.variable,
|lc| lc + self.variable,
|lc| lc + var,
);
Ok(AllocatedNum {
value,
variable: var,
})
}
pub fn assert_nonzero<CS>(&self, mut cs: CS) -> Result<(), SynthesisError>
where
CS: ConstraintSystem<Scalar>,
{
let inv = cs.alloc(
|| "ephemeral inverse",
|| {
let tmp = *self.value.get()?;
if tmp.is_zero_vartime() {
Err(SynthesisError::DivisionByZero)
} else {
Ok(tmp.invert().unwrap())
}
},
)?;
// Constrain a * inv = 1, which is only valid
// iff a has a multiplicative inverse, untrue
// for zero.
cs.enforce(
|| "nonzero assertion constraint",
|lc| lc + self.variable,
|lc| lc + inv,
|lc| lc + CS::one(),
);
Ok(())
}
/// Takes two allocated numbers (a, b) and returns
/// (b, a) if the condition is true, and (a, b)
/// otherwise.
pub fn conditionally_reverse<CS>(
mut cs: CS,
a: &Self,
b: &Self,
condition: &Boolean,
) -> Result<(Self, Self), SynthesisError>
where
CS: ConstraintSystem<Scalar>,
{
let c = Self::alloc(cs.namespace(|| "conditional reversal result 1"), || {
if *condition.get_value().get()? {
Ok(*b.value.get()?)
} else {
Ok(*a.value.get()?)
}
})?;
cs.enforce(
|| "first conditional reversal",
|lc| lc + a.variable - b.variable,
|_| condition.lc(CS::one(), Scalar::one()),
|lc| lc + a.variable - c.variable,
);
let d = Self::alloc(cs.namespace(|| "conditional reversal result 2"), || {
if *condition.get_value().get()? {
Ok(*a.value.get()?)
} else {
Ok(*b.value.get()?)
}
})?;
cs.enforce(
|| "second conditional reversal",
|lc| lc + b.variable - a.variable,
|_| condition.lc(CS::one(), Scalar::one()),
|lc| lc + b.variable - d.variable,
);
Ok((c, d))
}
pub fn get_value(&self) -> Option<Scalar> {
self.value
}
pub fn get_variable(&self) -> Variable {
self.variable
}
}
pub struct Num<Scalar: PrimeField> {
value: Option<Scalar>,
lc: LinearCombination<Scalar>,
}
impl<Scalar: PrimeField> From<AllocatedNum<Scalar>> for Num<Scalar> {
fn from(num: AllocatedNum<Scalar>) -> Num<Scalar> {
Num {
value: num.value,
lc: LinearCombination::<Scalar>::zero() + num.variable,
}
}
}
impl<Scalar: PrimeField> Num<Scalar> {
pub fn zero() -> Self {
Num {
value: Some(Scalar::zero()),
lc: LinearCombination::zero(),
}
}
pub fn get_value(&self) -> Option<Scalar> {
self.value
}
pub fn lc(&self, coeff: Scalar) -> LinearCombination<Scalar> {
LinearCombination::zero() + (coeff, &self.lc)
}
pub fn add_bool_with_coeff(self, one: Variable, bit: &Boolean, coeff: Scalar) -> Self {
let newval = match (self.value, bit.get_value()) {
(Some(mut curval), Some(bval)) => {
if bval {
curval.add_assign(&coeff);
}
Some(curval)
}
_ => None,
};
Num {
value: newval,
lc: self.lc + &bit.lc(one, coeff),
}
}
}
#[cfg(test)]
mod test {
use crate::ConstraintSystem;
use bls12_381::Scalar;
use ff::{Field, PrimeField, PrimeFieldBits};
use rand_core::SeedableRng;
use rand_xorshift::XorShiftRng;
use std::ops::{Neg, SubAssign};
use super::{AllocatedNum, Boolean};
use crate::gadgets::test::*;
#[test]
fn test_allocated_num() {
let mut cs = TestConstraintSystem::new();
AllocatedNum::alloc(&mut cs, || Ok(Scalar::one())).unwrap();
assert!(cs.get("num") == Scalar::one());
}
#[test]
fn test_num_squaring() {
let mut cs = TestConstraintSystem::new();
let n = AllocatedNum::alloc(&mut cs, || Ok(Scalar::from(3))).unwrap();
let n2 = n.square(&mut cs).unwrap();
assert!(cs.is_satisfied());
assert!(cs.get("squared num") == Scalar::from(9));
assert!(n2.value.unwrap() == Scalar::from(9));
cs.set("squared num", Scalar::from(10));
assert!(!cs.is_satisfied());
}
#[test]
fn test_num_multiplication() {
let mut cs = TestConstraintSystem::new();
let n = AllocatedNum::alloc(cs.namespace(|| "a"), || Ok(Scalar::from(12))).unwrap();
let n2 = AllocatedNum::alloc(cs.namespace(|| "b"), || Ok(Scalar::from(10))).unwrap();
let n3 = n.mul(&mut cs, &n2).unwrap();
assert!(cs.is_satisfied());
assert!(cs.get("product num") == Scalar::from(120));
assert!(n3.value.unwrap() == Scalar::from(120));
cs.set("product num", Scalar::from(121));
assert!(!cs.is_satisfied());
}
#[test]
fn test_num_conditional_reversal() {
let mut rng = XorShiftRng::from_seed([
0x59, 0x62, 0xbe, 0x3d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06,
0xbc, 0xe5,
]);
{
let mut cs = TestConstraintSystem::new();
let a =
AllocatedNum::alloc(cs.namespace(|| "a"), || Ok(Scalar::random(&mut rng))).unwrap();
let b =
AllocatedNum::alloc(cs.namespace(|| "b"), || Ok(Scalar::random(&mut rng))).unwrap();
let condition = Boolean::constant(false);
let (c, d) = AllocatedNum::conditionally_reverse(&mut cs, &a, &b, &condition).unwrap();
assert!(cs.is_satisfied());
assert_eq!(a.value.unwrap(), c.value.unwrap());
assert_eq!(b.value.unwrap(), d.value.unwrap());
}
{
let mut cs = TestConstraintSystem::new();
let a =
AllocatedNum::alloc(cs.namespace(|| "a"), || Ok(Scalar::random(&mut rng))).unwrap();
let b =
AllocatedNum::alloc(cs.namespace(|| "b"), || Ok(Scalar::random(&mut rng))).unwrap();
let condition = Boolean::constant(true);
let (c, d) = AllocatedNum::conditionally_reverse(&mut cs, &a, &b, &condition).unwrap();
assert!(cs.is_satisfied());
assert_eq!(a.value.unwrap(), d.value.unwrap());
assert_eq!(b.value.unwrap(), c.value.unwrap());
}
}
#[test]
fn test_num_nonzero() {
{
let mut cs = TestConstraintSystem::new();
let n = AllocatedNum::alloc(&mut cs, || Ok(Scalar::from(3))).unwrap();
n.assert_nonzero(&mut cs).unwrap();
assert!(cs.is_satisfied());
cs.set("ephemeral inverse", Scalar::from(3));
assert!(cs.which_is_unsatisfied() == Some("nonzero assertion constraint"));
}
{
let mut cs = TestConstraintSystem::new();
let n = AllocatedNum::alloc(&mut cs, || Ok(Scalar::zero())).unwrap();
assert!(n.assert_nonzero(&mut cs).is_err());
}
}
#[test]
fn test_into_bits_strict() {
let negone = Scalar::one().neg();
let mut cs = TestConstraintSystem::new();
let n = AllocatedNum::alloc(&mut cs, || Ok(negone)).unwrap();
n.to_bits_le_strict(&mut cs).unwrap();
assert!(cs.is_satisfied());
// make the bit representation the characteristic
cs.set("bit 254/boolean", Scalar::one());
// this makes the conditional boolean constraint fail
assert_eq!(
cs.which_is_unsatisfied().unwrap(),
"bit 254/boolean constraint"
);
}
#[test]
fn test_into_bits() {
let mut rng = XorShiftRng::from_seed([
0x59, 0x62, 0xbe, 0x3d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06,
0xbc, 0xe5,
]);
for i in 0..200 {
let r = Scalar::random(&mut rng);
let mut cs = TestConstraintSystem::new();
let n = AllocatedNum::alloc(&mut cs, || Ok(r)).unwrap();
let bits = if i % 2 == 0 {
n.to_bits_le(&mut cs).unwrap()
} else {
n.to_bits_le_strict(&mut cs).unwrap()
};
assert!(cs.is_satisfied());
for (b, a) in r
.to_le_bits()
.iter()
.by_vals()
.rev()
.skip(1)
.zip(bits.iter().rev())
{
if let Boolean::Is(ref a) = a {
assert_eq!(b, a.get_value().unwrap());
} else {
unreachable!()
}
}
cs.set("num", Scalar::random(&mut rng));
assert!(!cs.is_satisfied());
cs.set("num", r);
assert!(cs.is_satisfied());
for i in 0..Scalar::NUM_BITS {
let name = format!("bit {}/boolean", i);
let cur = cs.get(&name);
let mut tmp = Scalar::one();
tmp.sub_assign(&cur);
cs.set(&name, tmp);
assert!(!cs.is_satisfied());
cs.set(&name, cur);
assert!(cs.is_satisfied());
}
}
}
}

View File

@@ -0,0 +1,388 @@
//! Circuits for the [SHA-256] hash function and its internal compression
//! function.
//!
//! [SHA-256]: https://tools.ietf.org/html/rfc6234
use super::boolean::Boolean;
use super::multieq::MultiEq;
use super::uint32::UInt32;
use crate::{ConstraintSystem, SynthesisError};
use ff::PrimeField;
#[allow(clippy::unreadable_literal)]
const ROUND_CONSTANTS: [u32; 64] = [
0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5, 0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5,
0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3, 0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174,
0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc, 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da,
0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7, 0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967,
0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13, 0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85,
0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3, 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070,
0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5, 0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3,
0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208, 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2,
];
#[allow(clippy::unreadable_literal)]
const IV: [u32; 8] = [
0x6a09e667, 0xbb67ae85, 0x3c6ef372, 0xa54ff53a, 0x510e527f, 0x9b05688c, 0x1f83d9ab, 0x5be0cd19,
];
pub fn sha256_block_no_padding<Scalar, CS>(
mut cs: CS,
input: &[Boolean],
) -> Result<Vec<Boolean>, SynthesisError>
where
Scalar: PrimeField,
CS: ConstraintSystem<Scalar>,
{
assert_eq!(input.len(), 512);
Ok(
sha256_compression_function(&mut cs, input, &get_sha256_iv())?
.into_iter()
.flat_map(|e| e.into_bits_be())
.collect(),
)
}
pub fn sha256<Scalar, CS>(mut cs: CS, input: &[Boolean]) -> Result<Vec<Boolean>, SynthesisError>
where
Scalar: PrimeField,
CS: ConstraintSystem<Scalar>,
{
assert!(input.len() % 8 == 0);
let mut padded = input.to_vec();
let plen = padded.len() as u64;
// append a single '1' bit
padded.push(Boolean::constant(true));
// append K '0' bits, where K is the minimum number >= 0 such that L + 1 + K + 64 is a multiple of 512
while (padded.len() + 64) % 512 != 0 {
padded.push(Boolean::constant(false));
}
// append L as a 64-bit big-endian integer, making the total post-processed length a multiple of 512 bits
for b in (0..64).rev().map(|i| (plen >> i) & 1 == 1) {
padded.push(Boolean::constant(b));
}
assert!(padded.len() % 512 == 0);
let mut cur = get_sha256_iv();
for (i, block) in padded.chunks(512).enumerate() {
cur = sha256_compression_function(cs.namespace(|| format!("block {}", i)), block, &cur)?;
}
Ok(cur.into_iter().flat_map(|e| e.into_bits_be()).collect())
}
fn get_sha256_iv() -> Vec<UInt32> {
IV.iter().map(|&v| UInt32::constant(v)).collect()
}
#[allow(clippy::many_single_char_names)]
fn sha256_compression_function<Scalar, CS>(
cs: CS,
input: &[Boolean],
current_hash_value: &[UInt32],
) -> Result<Vec<UInt32>, SynthesisError>
where
Scalar: PrimeField,
CS: ConstraintSystem<Scalar>,
{
assert_eq!(input.len(), 512);
assert_eq!(current_hash_value.len(), 8);
let mut w = input
.chunks(32)
.map(|e| UInt32::from_bits_be(e))
.collect::<Vec<_>>();
// We can save some constraints by combining some of
// the constraints in different u32 additions
let mut cs = MultiEq::new(cs);
for i in 16..64 {
let cs = &mut cs.namespace(|| format!("w extension {}", i));
// s0 := (w[i-15] rightrotate 7) xor (w[i-15] rightrotate 18) xor (w[i-15] rightshift 3)
let mut s0 = w[i - 15].rotr(7);
s0 = s0.xor(cs.namespace(|| "first xor for s0"), &w[i - 15].rotr(18))?;
s0 = s0.xor(cs.namespace(|| "second xor for s0"), &w[i - 15].shr(3))?;
// s1 := (w[i-2] rightrotate 17) xor (w[i-2] rightrotate 19) xor (w[i-2] rightshift 10)
let mut s1 = w[i - 2].rotr(17);
s1 = s1.xor(cs.namespace(|| "first xor for s1"), &w[i - 2].rotr(19))?;
s1 = s1.xor(cs.namespace(|| "second xor for s1"), &w[i - 2].shr(10))?;
let tmp = UInt32::addmany(
cs.namespace(|| "computation of w[i]"),
&[w[i - 16].clone(), s0, w[i - 7].clone(), s1],
)?;
// w[i] := w[i-16] + s0 + w[i-7] + s1
w.push(tmp);
}
assert_eq!(w.len(), 64);
enum Maybe {
Deferred(Vec<UInt32>),
Concrete(UInt32),
}
impl Maybe {
fn compute<Scalar, CS, M>(self, cs: M, others: &[UInt32]) -> Result<UInt32, SynthesisError>
where
Scalar: PrimeField,
CS: ConstraintSystem<Scalar>,
M: ConstraintSystem<Scalar, Root = MultiEq<Scalar, CS>>,
{
Ok(match self {
Maybe::Concrete(ref v) => return Ok(v.clone()),
Maybe::Deferred(mut v) => {
v.extend(others.iter().cloned());
UInt32::addmany(cs, &v)?
}
})
}
}
let mut a = Maybe::Concrete(current_hash_value[0].clone());
let mut b = current_hash_value[1].clone();
let mut c = current_hash_value[2].clone();
let mut d = current_hash_value[3].clone();
let mut e = Maybe::Concrete(current_hash_value[4].clone());
let mut f = current_hash_value[5].clone();
let mut g = current_hash_value[6].clone();
let mut h = current_hash_value[7].clone();
for i in 0..64 {
let cs = &mut cs.namespace(|| format!("compression round {}", i));
// S1 := (e rightrotate 6) xor (e rightrotate 11) xor (e rightrotate 25)
let new_e = e.compute(cs.namespace(|| "deferred e computation"), &[])?;
let mut s1 = new_e.rotr(6);
s1 = s1.xor(cs.namespace(|| "first xor for s1"), &new_e.rotr(11))?;
s1 = s1.xor(cs.namespace(|| "second xor for s1"), &new_e.rotr(25))?;
// ch := (e and f) xor ((not e) and g)
let ch = UInt32::sha256_ch(cs.namespace(|| "ch"), &new_e, &f, &g)?;
// temp1 := h + S1 + ch + k[i] + w[i]
let temp1 = vec![
h.clone(),
s1,
ch,
UInt32::constant(ROUND_CONSTANTS[i]),
w[i].clone(),
];
// S0 := (a rightrotate 2) xor (a rightrotate 13) xor (a rightrotate 22)
let new_a = a.compute(cs.namespace(|| "deferred a computation"), &[])?;
let mut s0 = new_a.rotr(2);
s0 = s0.xor(cs.namespace(|| "first xor for s0"), &new_a.rotr(13))?;
s0 = s0.xor(cs.namespace(|| "second xor for s0"), &new_a.rotr(22))?;
// maj := (a and b) xor (a and c) xor (b and c)
let maj = UInt32::sha256_maj(cs.namespace(|| "maj"), &new_a, &b, &c)?;
// temp2 := S0 + maj
let temp2 = vec![s0, maj];
/*
h := g
g := f
f := e
e := d + temp1
d := c
c := b
b := a
a := temp1 + temp2
*/
h = g;
g = f;
f = new_e;
e = Maybe::Deferred(temp1.iter().cloned().chain(Some(d)).collect::<Vec<_>>());
d = c;
c = b;
b = new_a;
a = Maybe::Deferred(
temp1
.iter()
.cloned()
.chain(temp2.iter().cloned())
.collect::<Vec<_>>(),
);
}
/*
Add the compressed chunk to the current hash value:
h0 := h0 + a
h1 := h1 + b
h2 := h2 + c
h3 := h3 + d
h4 := h4 + e
h5 := h5 + f
h6 := h6 + g
h7 := h7 + h
*/
let h0 = a.compute(
cs.namespace(|| "deferred h0 computation"),
&[current_hash_value[0].clone()],
)?;
let h1 = UInt32::addmany(
cs.namespace(|| "new h1"),
&[current_hash_value[1].clone(), b],
)?;
let h2 = UInt32::addmany(
cs.namespace(|| "new h2"),
&[current_hash_value[2].clone(), c],
)?;
let h3 = UInt32::addmany(
cs.namespace(|| "new h3"),
&[current_hash_value[3].clone(), d],
)?;
let h4 = e.compute(
cs.namespace(|| "deferred h4 computation"),
&[current_hash_value[4].clone()],
)?;
let h5 = UInt32::addmany(
cs.namespace(|| "new h5"),
&[current_hash_value[5].clone(), f],
)?;
let h6 = UInt32::addmany(
cs.namespace(|| "new h6"),
&[current_hash_value[6].clone(), g],
)?;
let h7 = UInt32::addmany(
cs.namespace(|| "new h7"),
&[current_hash_value[7].clone(), h],
)?;
Ok(vec![h0, h1, h2, h3, h4, h5, h6, h7])
}
#[cfg(test)]
mod test {
use super::*;
use crate::gadgets::boolean::AllocatedBit;
use crate::gadgets::test::TestConstraintSystem;
use bls12_381::Scalar;
use hex_literal::hex;
use rand_core::{RngCore, SeedableRng};
use rand_xorshift::XorShiftRng;
#[test]
fn test_blank_hash() {
let iv = get_sha256_iv();
let mut cs = TestConstraintSystem::<Scalar>::new();
let mut input_bits: Vec<_> = (0..512).map(|_| Boolean::Constant(false)).collect();
input_bits[0] = Boolean::Constant(true);
let out = sha256_compression_function(&mut cs, &input_bits, &iv).unwrap();
let mut out = out.into_iter().flat_map(|e| e.into_bits_be());
assert!(cs.is_satisfied());
assert_eq!(cs.num_constraints(), 0);
let expected = hex!("e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855");
for b in expected.iter() {
for i in (0..8).rev() {
let c = out.next().unwrap().get_value().unwrap();
assert_eq!(c, (b >> i) & 1u8 == 1u8);
}
}
}
#[test]
fn test_full_block() {
let mut rng = XorShiftRng::from_seed([
0x59, 0x62, 0xbe, 0x3d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06,
0xbc, 0xe5,
]);
let iv = get_sha256_iv();
let mut cs = TestConstraintSystem::<Scalar>::new();
let input_bits: Vec<_> = (0..512)
.map(|i| {
Boolean::from(
AllocatedBit::alloc(
cs.namespace(|| format!("input bit {}", i)),
Some(rng.next_u32() % 2 != 0),
)
.unwrap(),
)
})
.collect();
sha256_compression_function(cs.namespace(|| "sha256"), &input_bits, &iv).unwrap();
assert!(cs.is_satisfied());
assert_eq!(cs.num_constraints() - 512, 25840);
}
#[test]
fn test_against_vectors() {
use sha2::{Digest, Sha256};
let mut rng = XorShiftRng::from_seed([
0x59, 0x62, 0xbe, 0x3d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06,
0xbc, 0xe5,
]);
for input_len in (0..32).chain((32..256).filter(|a| a % 8 == 0)) {
let mut h = Sha256::new();
let data: Vec<u8> = (0..input_len).map(|_| rng.next_u32() as u8).collect();
h.update(&data);
let hash_result = h.finalize();
let mut cs = TestConstraintSystem::<Scalar>::new();
let mut input_bits = vec![];
for (byte_i, input_byte) in data.into_iter().enumerate() {
for bit_i in (0..8).rev() {
let cs = cs.namespace(|| format!("input bit {} {}", byte_i, bit_i));
input_bits.push(
AllocatedBit::alloc(cs, Some((input_byte >> bit_i) & 1u8 == 1u8))
.unwrap()
.into(),
);
}
}
let r = sha256(&mut cs, &input_bits).unwrap();
assert!(cs.is_satisfied());
let mut s = hash_result
.iter()
.flat_map(|&byte| (0..8).rev().map(move |i| (byte >> i) & 1u8 == 1u8));
for b in r {
match b {
Boolean::Is(b) => {
assert!(s.next().unwrap() == b.get_value().unwrap());
}
Boolean::Not(b) => {
assert!(s.next().unwrap() != b.get_value().unwrap());
}
Boolean::Constant(b) => {
assert!(input_len == 0);
assert!(s.next().unwrap() == b);
}
}
}
}
}
}

View File

@@ -0,0 +1,469 @@
//! Helpers for testing circuit implementations.
use ff::PrimeField;
use crate::{ConstraintSystem, Index, LinearCombination, SynthesisError, Variable};
use std::collections::HashMap;
use std::fmt::Write;
use byteorder::{BigEndian, ByteOrder};
use std::cmp::Ordering;
use std::collections::BTreeMap;
use blake2s_simd::{Params as Blake2sParams, State as Blake2sState};
#[derive(Debug)]
enum NamedObject {
Constraint(usize),
Var(Variable),
Namespace,
}
type NamedConstraint<Scalar> = (
LinearCombination<Scalar>,
LinearCombination<Scalar>,
LinearCombination<Scalar>,
String,
);
/// Constraint system for testing purposes.
pub struct TestConstraintSystem<Scalar: PrimeField> {
named_objects: HashMap<String, NamedObject>,
current_namespace: Vec<String>,
constraints: Vec<NamedConstraint<Scalar>>,
inputs: Vec<(Scalar, String)>,
aux: Vec<(Scalar, String)>,
}
#[derive(Clone, Copy)]
struct OrderedVariable(Variable);
impl Eq for OrderedVariable {}
impl PartialEq for OrderedVariable {
fn eq(&self, other: &OrderedVariable) -> bool {
match (self.0.get_unchecked(), other.0.get_unchecked()) {
(Index::Input(ref a), Index::Input(ref b)) => a == b,
(Index::Aux(ref a), Index::Aux(ref b)) => a == b,
_ => false,
}
}
}
impl PartialOrd for OrderedVariable {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl Ord for OrderedVariable {
fn cmp(&self, other: &Self) -> Ordering {
match (self.0.get_unchecked(), other.0.get_unchecked()) {
(Index::Input(ref a), Index::Input(ref b)) => a.cmp(b),
(Index::Aux(ref a), Index::Aux(ref b)) => a.cmp(b),
(Index::Input(_), Index::Aux(_)) => Ordering::Less,
(Index::Aux(_), Index::Input(_)) => Ordering::Greater,
}
}
}
fn proc_lc<Scalar: PrimeField>(terms: &[(Variable, Scalar)]) -> BTreeMap<OrderedVariable, Scalar> {
let mut map = BTreeMap::new();
for &(var, coeff) in terms {
map.entry(OrderedVariable(var))
.or_insert_with(Scalar::zero)
.add_assign(&coeff);
}
// Remove terms that have a zero coefficient to normalize
let mut to_remove = vec![];
for (var, coeff) in map.iter() {
if coeff.is_zero_vartime() {
to_remove.push(*var);
}
}
for var in to_remove {
map.remove(&var);
}
map
}
fn hash_lc<Scalar: PrimeField>(terms: &[(Variable, Scalar)], h: &mut Blake2sState) {
let map = proc_lc::<Scalar>(terms);
let mut buf = [0u8; 9 + 32];
BigEndian::write_u64(&mut buf[0..8], map.len() as u64);
h.update(&buf[0..8]);
for (var, coeff) in map {
match var.0.get_unchecked() {
Index::Input(i) => {
buf[0] = b'I';
BigEndian::write_u64(&mut buf[1..9], i as u64);
}
Index::Aux(i) => {
buf[0] = b'A';
BigEndian::write_u64(&mut buf[1..9], i as u64);
}
}
// TODO: Change this to hash over the standard scalar endianness, not an assumed
// little-endian representation that we flip to big-endian.
let coeff_repr = coeff.to_repr();
let coeff_be: Vec<_> = coeff_repr.as_ref().iter().cloned().rev().collect();
buf[9..].copy_from_slice(&coeff_be[..]);
h.update(&buf);
}
}
fn eval_lc<Scalar: PrimeField>(
terms: &[(Variable, Scalar)],
inputs: &[(Scalar, String)],
aux: &[(Scalar, String)],
) -> Scalar {
let mut acc = Scalar::zero();
for &(var, ref coeff) in terms {
let mut tmp = match var.get_unchecked() {
Index::Input(index) => inputs[index].0,
Index::Aux(index) => aux[index].0,
};
tmp.mul_assign(coeff);
acc.add_assign(&tmp);
}
acc
}
impl<Scalar: PrimeField> Default for TestConstraintSystem<Scalar> {
fn default() -> Self {
Self::new()
}
}
impl<Scalar: PrimeField> TestConstraintSystem<Scalar> {
pub fn new() -> TestConstraintSystem<Scalar> {
let mut map = HashMap::new();
map.insert(
"ONE".into(),
NamedObject::Var(TestConstraintSystem::<Scalar>::one()),
);
TestConstraintSystem {
named_objects: map,
current_namespace: vec![],
constraints: vec![],
inputs: vec![(Scalar::one(), "ONE".into())],
aux: vec![],
}
}
pub fn pretty_print(&self) -> String {
let mut s = String::new();
let negone = Scalar::one().neg();
let powers_of_two = (0..Scalar::NUM_BITS)
.map(|i| Scalar::from(2).pow_vartime(&[u64::from(i)]))
.collect::<Vec<_>>();
let pp = |s: &mut String, lc: &LinearCombination<Scalar>| {
write!(s, "(").unwrap();
let mut is_first = true;
for (var, coeff) in proc_lc::<Scalar>(lc.as_ref()) {
if coeff == negone {
write!(s, " - ").unwrap();
} else if !is_first {
write!(s, " + ").unwrap();
}
is_first = false;
if coeff != Scalar::one() && coeff != negone {
for (i, x) in powers_of_two.iter().enumerate() {
if x == &coeff {
write!(s, "2^{} . ", i).unwrap();
break;
}
}
write!(s, "{:?} . ", coeff).unwrap();
}
match var.0.get_unchecked() {
Index::Input(i) => {
write!(s, "`{}`", &self.inputs[i].1).unwrap();
}
Index::Aux(i) => {
write!(s, "`{}`", &self.aux[i].1).unwrap();
}
}
}
if is_first {
// Nothing was visited, print 0.
write!(s, "0").unwrap();
}
write!(s, ")").unwrap();
};
for &(ref a, ref b, ref c, ref name) in &self.constraints {
writeln!(&mut s).unwrap();
write!(&mut s, "{}: ", name).unwrap();
pp(&mut s, a);
write!(&mut s, " * ").unwrap();
pp(&mut s, b);
write!(&mut s, " = ").unwrap();
pp(&mut s, c);
}
writeln!(&mut s).unwrap();
s
}
pub fn hash(&self) -> String {
let mut h = Blake2sParams::new().hash_length(32).to_state();
{
let mut buf = [0u8; 24];
BigEndian::write_u64(&mut buf[0..8], self.inputs.len() as u64);
BigEndian::write_u64(&mut buf[8..16], self.aux.len() as u64);
BigEndian::write_u64(&mut buf[16..24], self.constraints.len() as u64);
h.update(&buf);
}
for constraint in &self.constraints {
hash_lc::<Scalar>(constraint.0.as_ref(), &mut h);
hash_lc::<Scalar>(constraint.1.as_ref(), &mut h);
hash_lc::<Scalar>(constraint.2.as_ref(), &mut h);
}
let mut s = String::new();
for b in h.finalize().as_ref() {
s += &format!("{:02x}", b);
}
s
}
pub fn which_is_unsatisfied(&self) -> Option<&str> {
for &(ref a, ref b, ref c, ref path) in &self.constraints {
let mut a = eval_lc::<Scalar>(a.as_ref(), &self.inputs, &self.aux);
let b = eval_lc::<Scalar>(b.as_ref(), &self.inputs, &self.aux);
let c = eval_lc::<Scalar>(c.as_ref(), &self.inputs, &self.aux);
a.mul_assign(&b);
if a != c {
return Some(&*path);
}
}
None
}
pub fn is_satisfied(&self) -> bool {
self.which_is_unsatisfied().is_none()
}
pub fn num_constraints(&self) -> usize {
self.constraints.len()
}
pub fn set(&mut self, path: &str, to: Scalar) {
match self.named_objects.get(path) {
Some(&NamedObject::Var(ref v)) => match v.get_unchecked() {
Index::Input(index) => self.inputs[index].0 = to,
Index::Aux(index) => self.aux[index].0 = to,
},
Some(e) => panic!(
"tried to set path `{}` to value, but `{:?}` already exists there.",
path, e
),
_ => panic!("no variable exists at path: {}", path),
}
}
pub fn verify(&self, expected: &[Scalar]) -> bool {
assert_eq!(expected.len() + 1, self.inputs.len());
for (a, b) in self.inputs.iter().skip(1).zip(expected.iter()) {
if &a.0 != b {
return false;
}
}
true
}
pub fn num_inputs(&self) -> usize {
self.inputs.len()
}
pub fn get_input(&mut self, index: usize, path: &str) -> Scalar {
let (assignment, name) = self.inputs[index].clone();
assert_eq!(path, name);
assignment
}
pub fn get(&mut self, path: &str) -> Scalar {
match self.named_objects.get(path) {
Some(&NamedObject::Var(ref v)) => match v.get_unchecked() {
Index::Input(index) => self.inputs[index].0,
Index::Aux(index) => self.aux[index].0,
},
Some(e) => panic!(
"tried to get value of path `{}`, but `{:?}` exists there (not a variable)",
path, e
),
_ => panic!("no variable exists at path: {}", path),
}
}
fn set_named_obj(&mut self, path: String, to: NamedObject) {
if self.named_objects.contains_key(&path) {
panic!("tried to create object at existing path: {}", path);
}
self.named_objects.insert(path, to);
}
}
fn compute_path(ns: &[String], this: String) -> String {
if this.chars().any(|a| a == '/') {
panic!("'/' is not allowed in names");
}
let mut name = String::new();
let mut needs_separation = false;
for ns in ns.iter().chain(Some(&this).into_iter()) {
if needs_separation {
name += "/";
}
name += ns;
needs_separation = true;
}
name
}
impl<Scalar: PrimeField> ConstraintSystem<Scalar> for TestConstraintSystem<Scalar> {
type Root = Self;
fn alloc<F, A, AR>(&mut self, annotation: A, f: F) -> Result<Variable, SynthesisError>
where
F: FnOnce() -> Result<Scalar, SynthesisError>,
A: FnOnce() -> AR,
AR: Into<String>,
{
let index = self.aux.len();
let path = compute_path(&self.current_namespace, annotation().into());
self.aux.push((f()?, path.clone()));
let var = Variable::new_unchecked(Index::Aux(index));
self.set_named_obj(path, NamedObject::Var(var));
Ok(var)
}
fn alloc_input<F, A, AR>(&mut self, annotation: A, f: F) -> Result<Variable, SynthesisError>
where
F: FnOnce() -> Result<Scalar, SynthesisError>,
A: FnOnce() -> AR,
AR: Into<String>,
{
let index = self.inputs.len();
let path = compute_path(&self.current_namespace, annotation().into());
self.inputs.push((f()?, path.clone()));
let var = Variable::new_unchecked(Index::Input(index));
self.set_named_obj(path, NamedObject::Var(var));
Ok(var)
}
fn enforce<A, AR, LA, LB, LC>(&mut self, annotation: A, a: LA, b: LB, c: LC)
where
A: FnOnce() -> AR,
AR: Into<String>,
LA: FnOnce(LinearCombination<Scalar>) -> LinearCombination<Scalar>,
LB: FnOnce(LinearCombination<Scalar>) -> LinearCombination<Scalar>,
LC: FnOnce(LinearCombination<Scalar>) -> LinearCombination<Scalar>,
{
let path = compute_path(&self.current_namespace, annotation().into());
let index = self.constraints.len();
self.set_named_obj(path.clone(), NamedObject::Constraint(index));
let a = a(LinearCombination::zero());
let b = b(LinearCombination::zero());
let c = c(LinearCombination::zero());
self.constraints.push((a, b, c, path));
}
fn push_namespace<NR, N>(&mut self, name_fn: N)
where
NR: Into<String>,
N: FnOnce() -> NR,
{
let name = name_fn().into();
let path = compute_path(&self.current_namespace, name.clone());
self.set_named_obj(path, NamedObject::Namespace);
self.current_namespace.push(name);
}
fn pop_namespace(&mut self) {
assert!(self.current_namespace.pop().is_some());
}
fn get_root(&mut self) -> &mut Self::Root {
self
}
}
#[test]
fn test_cs() {
use bls12_381::Scalar;
let mut cs = TestConstraintSystem::new();
assert!(cs.is_satisfied());
assert_eq!(cs.num_constraints(), 0);
let a = cs
.namespace(|| "a")
.alloc(|| "var", || Ok(Scalar::from(10)))
.unwrap();
let b = cs
.namespace(|| "b")
.alloc(|| "var", || Ok(Scalar::from(4)))
.unwrap();
let c = cs.alloc(|| "product", || Ok(Scalar::from(40))).unwrap();
cs.enforce(|| "mult", |lc| lc + a, |lc| lc + b, |lc| lc + c);
assert!(cs.is_satisfied());
assert_eq!(cs.num_constraints(), 1);
cs.set("a/var", Scalar::from(4));
let one = TestConstraintSystem::<Scalar>::one();
cs.enforce(|| "eq", |lc| lc + a, |lc| lc + one, |lc| lc + b);
assert!(!cs.is_satisfied());
assert!(cs.which_is_unsatisfied() == Some("mult"));
assert!(cs.get("product") == Scalar::from(40));
cs.set("product", Scalar::from(16));
assert!(cs.is_satisfied());
{
let mut cs = cs.namespace(|| "test1");
let mut cs = cs.namespace(|| "test2");
cs.alloc(|| "hehe", || Ok(Scalar::one())).unwrap();
}
assert!(cs.get("test1/test2/hehe") == Scalar::one());
}

View File

@@ -0,0 +1,782 @@
//! Circuit representation of a [`u32`], with helpers for the [`sha256`]
//! gadgets.
//!
//! [`sha256`]: crate::gadgets::sha256
use ff::PrimeField;
use crate::{ConstraintSystem, LinearCombination, SynthesisError};
use super::boolean::{AllocatedBit, Boolean};
use super::multieq::MultiEq;
/// Represents an interpretation of 32 `Boolean` objects as an
/// unsigned integer.
#[derive(Clone)]
pub struct UInt32 {
// Least significant bit first
bits: Vec<Boolean>,
value: Option<u32>,
}
impl UInt32 {
/// Construct a constant `UInt32` from a `u32`
pub fn constant(value: u32) -> Self {
let mut bits = Vec::with_capacity(32);
let mut tmp = value;
for _ in 0..32 {
if tmp & 1 == 1 {
bits.push(Boolean::constant(true))
} else {
bits.push(Boolean::constant(false))
}
tmp >>= 1;
}
UInt32 {
bits,
value: Some(value),
}
}
/// Allocate a `UInt32` in the constraint system
pub fn alloc<Scalar, CS>(mut cs: CS, value: Option<u32>) -> Result<Self, SynthesisError>
where
Scalar: PrimeField,
CS: ConstraintSystem<Scalar>,
{
let values = match value {
Some(mut val) => {
let mut v = Vec::with_capacity(32);
for _ in 0..32 {
v.push(Some(val & 1 == 1));
val >>= 1;
}
v
}
None => vec![None; 32],
};
let bits = values
.into_iter()
.enumerate()
.map(|(i, v)| {
Ok(Boolean::from(AllocatedBit::alloc(
cs.namespace(|| format!("allocated bit {}", i)),
v,
)?))
})
.collect::<Result<Vec<_>, SynthesisError>>()?;
Ok(UInt32 { bits, value })
}
pub fn into_bits_be(self) -> Vec<Boolean> {
let mut ret = self.bits;
ret.reverse();
ret
}
pub fn from_bits_be(bits: &[Boolean]) -> Self {
assert_eq!(bits.len(), 32);
let mut value = Some(0u32);
for b in bits {
if let Some(v) = value.as_mut() {
*v <<= 1;
}
match b.get_value() {
Some(true) => {
if let Some(v) = value.as_mut() {
*v |= 1;
}
}
Some(false) => {}
None => {
value = None;
}
}
}
UInt32 {
value,
bits: bits.iter().rev().cloned().collect(),
}
}
/// Turns this `UInt32` into its little-endian byte order representation.
pub fn into_bits(self) -> Vec<Boolean> {
self.bits
}
/// Converts a little-endian byte order representation of bits into a
/// `UInt32`.
pub fn from_bits(bits: &[Boolean]) -> Self {
assert_eq!(bits.len(), 32);
let new_bits = bits.to_vec();
let mut value = Some(0u32);
for b in new_bits.iter().rev() {
if let Some(v) = value.as_mut() {
*v <<= 1
};
match *b {
Boolean::Constant(b) => {
if b {
if let Some(v) = value.as_mut() {
*v |= 1;
}
}
}
Boolean::Is(ref b) => match b.get_value() {
Some(true) => {
if let Some(v) = value.as_mut() {
*v |= 1;
}
}
Some(false) => {}
None => value = None,
},
Boolean::Not(ref b) => match b.get_value() {
Some(false) => {
if let Some(v) = value.as_mut() {
*v |= 1;
}
}
Some(true) => {}
None => value = None,
},
}
}
UInt32 {
value,
bits: new_bits,
}
}
pub fn rotr(&self, by: usize) -> Self {
let by = by % 32;
let new_bits = self
.bits
.iter()
.skip(by)
.chain(self.bits.iter())
.take(32)
.cloned()
.collect();
UInt32 {
bits: new_bits,
value: self.value.map(|v| v.rotate_right(by as u32)),
}
}
pub fn shr(&self, by: usize) -> Self {
let by = by % 32;
let fill = Boolean::constant(false);
let new_bits = self
.bits
.iter() // The bits are least significant first
.skip(by) // Skip the bits that will be lost during the shift
.chain(Some(&fill).into_iter().cycle()) // Rest will be zeros
.take(32) // Only 32 bits needed!
.cloned()
.collect();
UInt32 {
bits: new_bits,
value: self.value.map(|v| v >> by as u32),
}
}
fn triop<Scalar, CS, F, U>(
mut cs: CS,
a: &Self,
b: &Self,
c: &Self,
tri_fn: F,
circuit_fn: U,
) -> Result<Self, SynthesisError>
where
Scalar: PrimeField,
CS: ConstraintSystem<Scalar>,
F: Fn(u32, u32, u32) -> u32,
U: Fn(&mut CS, usize, &Boolean, &Boolean, &Boolean) -> Result<Boolean, SynthesisError>,
{
let new_value = match (a.value, b.value, c.value) {
(Some(a), Some(b), Some(c)) => Some(tri_fn(a, b, c)),
_ => None,
};
let bits = a
.bits
.iter()
.zip(b.bits.iter())
.zip(c.bits.iter())
.enumerate()
.map(|(i, ((a, b), c))| circuit_fn(&mut cs, i, a, b, c))
.collect::<Result<_, _>>()?;
Ok(UInt32 {
bits,
value: new_value,
})
}
/// Compute the `maj` value (a and b) xor (a and c) xor (b and c)
/// during SHA256.
pub fn sha256_maj<Scalar, CS>(
cs: CS,
a: &Self,
b: &Self,
c: &Self,
) -> Result<Self, SynthesisError>
where
Scalar: PrimeField,
CS: ConstraintSystem<Scalar>,
{
Self::triop(
cs,
a,
b,
c,
|a, b, c| (a & b) ^ (a & c) ^ (b & c),
|cs, i, a, b, c| Boolean::sha256_maj(cs.namespace(|| format!("maj {}", i)), a, b, c),
)
}
/// Compute the `ch` value `(a and b) xor ((not a) and c)`
/// during SHA256.
pub fn sha256_ch<Scalar, CS>(
cs: CS,
a: &Self,
b: &Self,
c: &Self,
) -> Result<Self, SynthesisError>
where
Scalar: PrimeField,
CS: ConstraintSystem<Scalar>,
{
Self::triop(
cs,
a,
b,
c,
|a, b, c| (a & b) ^ ((!a) & c),
|cs, i, a, b, c| Boolean::sha256_ch(cs.namespace(|| format!("ch {}", i)), a, b, c),
)
}
/// XOR this `UInt32` with another `UInt32`
pub fn xor<Scalar, CS>(&self, mut cs: CS, other: &Self) -> Result<Self, SynthesisError>
where
Scalar: PrimeField,
CS: ConstraintSystem<Scalar>,
{
let new_value = match (self.value, other.value) {
(Some(a), Some(b)) => Some(a ^ b),
_ => None,
};
let bits = self
.bits
.iter()
.zip(other.bits.iter())
.enumerate()
.map(|(i, (a, b))| Boolean::xor(cs.namespace(|| format!("xor of bit {}", i)), a, b))
.collect::<Result<_, _>>()?;
Ok(UInt32 {
bits,
value: new_value,
})
}
/// Perform modular addition of several `UInt32` objects.
pub fn addmany<Scalar, CS, M>(mut cs: M, operands: &[Self]) -> Result<Self, SynthesisError>
where
Scalar: PrimeField,
CS: ConstraintSystem<Scalar>,
M: ConstraintSystem<Scalar, Root = MultiEq<Scalar, CS>>,
{
// Make some arbitrary bounds for ourselves to avoid overflows
// in the scalar field
assert!(Scalar::NUM_BITS >= 64);
assert!(operands.len() >= 2); // Weird trivial cases that should never happen
assert!(operands.len() <= 10);
// Compute the maximum value of the sum so we allocate enough bits for
// the result
let mut max_value = (operands.len() as u64) * (u64::from(u32::max_value()));
// Keep track of the resulting value
let mut result_value = Some(0u64);
// This is a linear combination that we will enforce to equal the
// output
let mut lc = LinearCombination::zero();
let mut all_constants = true;
// Iterate over the operands
for op in operands {
// Accumulate the value
match op.value {
Some(val) => {
if let Some(v) = result_value.as_mut() {
*v += u64::from(val);
}
}
None => {
// If any of our operands have unknown value, we won't
// know the value of the result
result_value = None;
}
}
// Iterate over each bit of the operand and add the operand to
// the linear combination
let mut coeff = Scalar::one();
for bit in &op.bits {
lc = lc + &bit.lc(CS::one(), coeff);
all_constants &= bit.is_constant();
coeff = coeff.double();
}
}
// The value of the actual result is modulo 2^32
let modular_value = result_value.map(|v| v as u32);
if let (true, Some(result)) = (all_constants, modular_value) {
// We can just return a constant, rather than
// unpacking the result into allocated bits.
return Ok(UInt32::constant(result));
}
// Storage area for the resulting bits
let mut result_bits = vec![];
// Linear combination representing the output,
// for comparison with the sum of the operands
let mut result_lc = LinearCombination::zero();
// Allocate each bit of the result
let mut coeff = Scalar::one();
let mut i = 0;
while max_value != 0 {
// Allocate the bit
let b = AllocatedBit::alloc(
cs.namespace(|| format!("result bit {}", i)),
result_value.map(|v| (v >> i) & 1 == 1),
)?;
// Add this bit to the result combination
result_lc = result_lc + (coeff, b.get_variable());
result_bits.push(b.into());
max_value >>= 1;
i += 1;
coeff = coeff.double();
}
// Enforce equality between the sum and result
cs.get_root().enforce_equal(i, &lc, &result_lc);
// Discard carry bits that we don't care about
result_bits.truncate(32);
Ok(UInt32 {
bits: result_bits,
value: modular_value,
})
}
}
#[cfg(test)]
mod test {
use super::UInt32;
use crate::gadgets::boolean::Boolean;
use crate::gadgets::multieq::MultiEq;
use crate::gadgets::test::*;
use crate::ConstraintSystem;
use bls12_381::Scalar;
use ff::Field;
use rand_core::{RngCore, SeedableRng};
use rand_xorshift::XorShiftRng;
#[test]
fn test_uint32_from_bits_be() {
let mut rng = XorShiftRng::from_seed([
0x59, 0x62, 0xbe, 0x5d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06,
0xbc, 0xe5,
]);
for _ in 0..1000 {
let v = (0..32)
.map(|_| Boolean::constant(rng.next_u32() % 2 != 0))
.collect::<Vec<_>>();
let b = UInt32::from_bits_be(&v);
for (i, bit) in b.bits.iter().enumerate() {
match *bit {
Boolean::Constant(bit) => {
assert!(bit == ((b.value.unwrap() >> i) & 1 == 1));
}
_ => unreachable!(),
}
}
let expected_to_be_same = b.into_bits_be();
for x in v.iter().zip(expected_to_be_same.iter()) {
match x {
(&Boolean::Constant(true), &Boolean::Constant(true)) => {}
(&Boolean::Constant(false), &Boolean::Constant(false)) => {}
_ => unreachable!(),
}
}
}
}
#[test]
fn test_uint32_from_bits() {
let mut rng = XorShiftRng::from_seed([
0x59, 0x62, 0xbe, 0x5d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06,
0xbc, 0xe5,
]);
for _ in 0..1000 {
let v = (0..32)
.map(|_| Boolean::constant(rng.next_u32() % 2 != 0))
.collect::<Vec<_>>();
let b = UInt32::from_bits(&v);
for (i, bit) in b.bits.iter().enumerate() {
match *bit {
Boolean::Constant(bit) => {
assert!(bit == ((b.value.unwrap() >> i) & 1 == 1));
}
_ => unreachable!(),
}
}
let expected_to_be_same = b.into_bits();
for x in v.iter().zip(expected_to_be_same.iter()) {
match x {
(&Boolean::Constant(true), &Boolean::Constant(true)) => {}
(&Boolean::Constant(false), &Boolean::Constant(false)) => {}
_ => unreachable!(),
}
}
}
}
#[test]
fn test_uint32_xor() {
let mut rng = XorShiftRng::from_seed([
0x59, 0x62, 0xbe, 0x5d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06,
0xbc, 0xe5,
]);
for _ in 0..1000 {
let mut cs = TestConstraintSystem::<Scalar>::new();
let a = rng.next_u32();
let b = rng.next_u32();
let c = rng.next_u32();
let mut expected = a ^ b ^ c;
let a_bit = UInt32::alloc(cs.namespace(|| "a_bit"), Some(a)).unwrap();
let b_bit = UInt32::constant(b);
let c_bit = UInt32::alloc(cs.namespace(|| "c_bit"), Some(c)).unwrap();
let r = a_bit.xor(cs.namespace(|| "first xor"), &b_bit).unwrap();
let r = r.xor(cs.namespace(|| "second xor"), &c_bit).unwrap();
assert!(cs.is_satisfied());
assert!(r.value == Some(expected));
for b in r.bits.iter() {
match *b {
Boolean::Is(ref b) => {
assert!(b.get_value().unwrap() == (expected & 1 == 1));
}
Boolean::Not(ref b) => {
assert!(b.get_value().unwrap() != (expected & 1 == 1));
}
Boolean::Constant(b) => {
assert!(b == (expected & 1 == 1));
}
}
expected >>= 1;
}
}
}
#[test]
fn test_uint32_addmany_constants() {
let mut rng = XorShiftRng::from_seed([
0x59, 0x62, 0xbe, 0x5d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06,
0xbc, 0xe5,
]);
for _ in 0..1000 {
let mut cs = TestConstraintSystem::<Scalar>::new();
let a = rng.next_u32();
let b = rng.next_u32();
let c = rng.next_u32();
let a_bit = UInt32::constant(a);
let b_bit = UInt32::constant(b);
let c_bit = UInt32::constant(c);
let mut expected = a.wrapping_add(b).wrapping_add(c);
let r = {
let mut cs = MultiEq::new(&mut cs);
let r =
UInt32::addmany(cs.namespace(|| "addition"), &[a_bit, b_bit, c_bit]).unwrap();
r
};
assert!(r.value == Some(expected));
for b in r.bits.iter() {
match *b {
Boolean::Is(_) => panic!(),
Boolean::Not(_) => panic!(),
Boolean::Constant(b) => {
assert!(b == (expected & 1 == 1));
}
}
expected >>= 1;
}
}
}
#[test]
#[allow(clippy::many_single_char_names)]
fn test_uint32_addmany() {
let mut rng = XorShiftRng::from_seed([
0x59, 0x62, 0xbe, 0x5d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06,
0xbc, 0xe5,
]);
for _ in 0..1000 {
let mut cs = TestConstraintSystem::<Scalar>::new();
let a = rng.next_u32();
let b = rng.next_u32();
let c = rng.next_u32();
let d = rng.next_u32();
let mut expected = (a ^ b).wrapping_add(c).wrapping_add(d);
let a_bit = UInt32::alloc(cs.namespace(|| "a_bit"), Some(a)).unwrap();
let b_bit = UInt32::constant(b);
let c_bit = UInt32::constant(c);
let d_bit = UInt32::alloc(cs.namespace(|| "d_bit"), Some(d)).unwrap();
let r = a_bit.xor(cs.namespace(|| "xor"), &b_bit).unwrap();
let r = {
let mut cs = MultiEq::new(&mut cs);
UInt32::addmany(cs.namespace(|| "addition"), &[r, c_bit, d_bit]).unwrap()
};
assert!(cs.is_satisfied());
assert!(r.value == Some(expected));
for b in r.bits.iter() {
match *b {
Boolean::Is(ref b) => {
assert!(b.get_value().unwrap() == (expected & 1 == 1));
}
Boolean::Not(ref b) => {
assert!(b.get_value().unwrap() != (expected & 1 == 1));
}
Boolean::Constant(_) => unreachable!(),
}
expected >>= 1;
}
// Flip a bit and see if the addition constraint still works
if cs.get("addition/result bit 0/boolean").is_zero_vartime() {
cs.set("addition/result bit 0/boolean", Field::one());
} else {
cs.set("addition/result bit 0/boolean", Field::zero());
}
assert!(!cs.is_satisfied());
}
}
#[test]
fn test_uint32_rotr() {
let mut rng = XorShiftRng::from_seed([
0x59, 0x62, 0xbe, 0x5d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06,
0xbc, 0xe5,
]);
let mut num = rng.next_u32();
let a = UInt32::constant(num);
for i in 0..32 {
let b = a.rotr(i);
assert_eq!(a.bits.len(), b.bits.len());
assert!(b.value.unwrap() == num);
let mut tmp = num;
for b in &b.bits {
match *b {
Boolean::Constant(b) => {
assert_eq!(b, tmp & 1 == 1);
}
_ => unreachable!(),
}
tmp >>= 1;
}
num = num.rotate_right(1);
}
}
#[test]
fn test_uint32_shr() {
let mut rng = XorShiftRng::from_seed([
0x59, 0x62, 0xbe, 0x5d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06,
0xbc, 0xe5,
]);
for _ in 0..50 {
for i in 0..60 {
let num = rng.next_u32();
let a = UInt32::constant(num).shr(i);
let b = UInt32::constant(num.wrapping_shr(i as u32));
assert_eq!(a.value.unwrap(), num.wrapping_shr(i as u32));
assert_eq!(a.bits.len(), b.bits.len());
for (a, b) in a.bits.iter().zip(b.bits.iter()) {
assert_eq!(a.get_value().unwrap(), b.get_value().unwrap());
}
}
}
}
#[test]
fn test_uint32_sha256_maj() {
let mut rng = XorShiftRng::from_seed([
0x59, 0x62, 0xbe, 0x5d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06,
0xbc, 0xe5,
]);
for _ in 0..1000 {
let mut cs = TestConstraintSystem::<Scalar>::new();
let a = rng.next_u32();
let b = rng.next_u32();
let c = rng.next_u32();
let mut expected = (a & b) ^ (a & c) ^ (b & c);
let a_bit = UInt32::alloc(cs.namespace(|| "a_bit"), Some(a)).unwrap();
let b_bit = UInt32::constant(b);
let c_bit = UInt32::alloc(cs.namespace(|| "c_bit"), Some(c)).unwrap();
let r = UInt32::sha256_maj(&mut cs, &a_bit, &b_bit, &c_bit).unwrap();
assert!(cs.is_satisfied());
assert!(r.value == Some(expected));
for b in r.bits.iter() {
match b {
Boolean::Is(ref b) => {
assert!(b.get_value().unwrap() == (expected & 1 == 1));
}
Boolean::Not(ref b) => {
assert!(b.get_value().unwrap() != (expected & 1 == 1));
}
Boolean::Constant(b) => {
assert!(*b == (expected & 1 == 1));
}
}
expected >>= 1;
}
}
}
#[test]
fn test_uint32_sha256_ch() {
let mut rng = XorShiftRng::from_seed([
0x59, 0x62, 0xbe, 0x5d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06,
0xbc, 0xe5,
]);
for _ in 0..1000 {
let mut cs = TestConstraintSystem::<Scalar>::new();
let a = rng.next_u32();
let b = rng.next_u32();
let c = rng.next_u32();
let mut expected = (a & b) ^ ((!a) & c);
let a_bit = UInt32::alloc(cs.namespace(|| "a_bit"), Some(a)).unwrap();
let b_bit = UInt32::constant(b);
let c_bit = UInt32::alloc(cs.namespace(|| "c_bit"), Some(c)).unwrap();
let r = UInt32::sha256_ch(&mut cs, &a_bit, &b_bit, &c_bit).unwrap();
assert!(cs.is_satisfied());
assert!(r.value == Some(expected));
for b in r.bits.iter() {
match b {
Boolean::Is(ref b) => {
assert!(b.get_value().unwrap() == (expected & 1 == 1));
}
Boolean::Not(ref b) => {
assert!(b.get_value().unwrap() != (expected & 1 == 1));
}
Boolean::Constant(b) => {
assert!(*b == (expected & 1 == 1));
}
}
expected >>= 1;
}
}
}
}

View File

@@ -0,0 +1,502 @@
use rand_core::RngCore;
use std::ops::{AddAssign, MulAssign};
use std::sync::Arc;
use ff::{Field, PrimeField};
use group::{prime::PrimeCurveAffine, Curve, Group, Wnaf, WnafGroup};
use pairing::Engine;
use super::{Parameters, VerifyingKey};
use crate::{Circuit, ConstraintSystem, Index, LinearCombination, SynthesisError, Variable};
use crate::domain::{EvaluationDomain, Scalar};
use crate::multicore::Worker;
/// Generates a random common reference string for
/// a circuit.
pub fn generate_random_parameters<E, C, R>(
circuit: C,
mut rng: &mut R,
) -> Result<Parameters<E>, SynthesisError>
where
E: Engine,
E::G1: WnafGroup,
E::G2: WnafGroup,
C: Circuit<E::Fr>,
R: RngCore,
{
let g1 = E::G1::random(&mut rng);
let g2 = E::G2::random(&mut rng);
let alpha = E::Fr::random(&mut rng);
let beta = E::Fr::random(&mut rng);
let gamma = E::Fr::random(&mut rng);
let delta = E::Fr::random(&mut rng);
let tau = E::Fr::random(&mut rng);
generate_parameters::<E, C>(circuit, g1, g2, alpha, beta, gamma, delta, tau)
}
/// This is our assembly structure that we'll use to synthesize the
/// circuit into a QAP.
struct KeypairAssembly<Scalar: PrimeField> {
num_inputs: usize,
num_aux: usize,
num_constraints: usize,
at_inputs: Vec<Vec<(Scalar, usize)>>,
bt_inputs: Vec<Vec<(Scalar, usize)>>,
ct_inputs: Vec<Vec<(Scalar, usize)>>,
at_aux: Vec<Vec<(Scalar, usize)>>,
bt_aux: Vec<Vec<(Scalar, usize)>>,
ct_aux: Vec<Vec<(Scalar, usize)>>,
}
impl<Scalar: PrimeField> ConstraintSystem<Scalar> for KeypairAssembly<Scalar> {
type Root = Self;
fn alloc<F, A, AR>(&mut self, _: A, _: F) -> Result<Variable, SynthesisError>
where
F: FnOnce() -> Result<Scalar, SynthesisError>,
A: FnOnce() -> AR,
AR: Into<String>,
{
// There is no assignment, so we don't even invoke the
// function for obtaining one.
let index = self.num_aux;
self.num_aux += 1;
self.at_aux.push(vec![]);
self.bt_aux.push(vec![]);
self.ct_aux.push(vec![]);
Ok(Variable(Index::Aux(index)))
}
fn alloc_input<F, A, AR>(&mut self, _: A, _: F) -> Result<Variable, SynthesisError>
where
F: FnOnce() -> Result<Scalar, SynthesisError>,
A: FnOnce() -> AR,
AR: Into<String>,
{
// There is no assignment, so we don't even invoke the
// function for obtaining one.
let index = self.num_inputs;
self.num_inputs += 1;
self.at_inputs.push(vec![]);
self.bt_inputs.push(vec![]);
self.ct_inputs.push(vec![]);
Ok(Variable(Index::Input(index)))
}
fn enforce<A, AR, LA, LB, LC>(&mut self, _: A, a: LA, b: LB, c: LC)
where
A: FnOnce() -> AR,
AR: Into<String>,
LA: FnOnce(LinearCombination<Scalar>) -> LinearCombination<Scalar>,
LB: FnOnce(LinearCombination<Scalar>) -> LinearCombination<Scalar>,
LC: FnOnce(LinearCombination<Scalar>) -> LinearCombination<Scalar>,
{
fn eval<Scalar: PrimeField>(
l: LinearCombination<Scalar>,
inputs: &mut [Vec<(Scalar, usize)>],
aux: &mut [Vec<(Scalar, usize)>],
this_constraint: usize,
) {
for (index, coeff) in l.0 {
match index {
Variable(Index::Input(id)) => inputs[id].push((coeff, this_constraint)),
Variable(Index::Aux(id)) => aux[id].push((coeff, this_constraint)),
}
}
}
eval(
a(LinearCombination::zero()),
&mut self.at_inputs,
&mut self.at_aux,
self.num_constraints,
);
eval(
b(LinearCombination::zero()),
&mut self.bt_inputs,
&mut self.bt_aux,
self.num_constraints,
);
eval(
c(LinearCombination::zero()),
&mut self.ct_inputs,
&mut self.ct_aux,
self.num_constraints,
);
self.num_constraints += 1;
}
fn push_namespace<NR, N>(&mut self, _: N)
where
NR: Into<String>,
N: FnOnce() -> NR,
{
// Do nothing; we don't care about namespaces in this context.
}
fn pop_namespace(&mut self) {
// Do nothing; we don't care about namespaces in this context.
}
fn get_root(&mut self) -> &mut Self::Root {
self
}
}
/// Create parameters for a circuit, given some toxic waste.
#[allow(clippy::too_many_arguments)]
pub fn generate_parameters<E, C>(
circuit: C,
g1: E::G1,
g2: E::G2,
alpha: E::Fr,
beta: E::Fr,
gamma: E::Fr,
delta: E::Fr,
tau: E::Fr,
) -> Result<Parameters<E>, SynthesisError>
where
E: Engine,
E::G1: WnafGroup,
E::G2: WnafGroup,
C: Circuit<E::Fr>,
{
let mut assembly = KeypairAssembly {
num_inputs: 0,
num_aux: 0,
num_constraints: 0,
at_inputs: vec![],
bt_inputs: vec![],
ct_inputs: vec![],
at_aux: vec![],
bt_aux: vec![],
ct_aux: vec![],
};
// Allocate the "one" input variable
assembly.alloc_input(|| "", || Ok(E::Fr::one()))?;
// Synthesize the circuit.
circuit.synthesize(&mut assembly)?;
// Input constraints to ensure full density of IC query
// x * 0 = 0
for i in 0..assembly.num_inputs {
assembly.enforce(|| "", |lc| lc + Variable(Index::Input(i)), |lc| lc, |lc| lc);
}
// Create bases for blind evaluation of polynomials at tau
let powers_of_tau = vec![Scalar::<E::Fr>(E::Fr::zero()); assembly.num_constraints];
let mut powers_of_tau = EvaluationDomain::from_coeffs(powers_of_tau)?;
// Compute G1 window table
let mut g1_wnaf = Wnaf::new();
let g1_wnaf = g1_wnaf.base(g1, {
// H query
(powers_of_tau.as_ref().len() - 1)
// IC/L queries
+ assembly.num_inputs + assembly.num_aux
// A query
+ assembly.num_inputs + assembly.num_aux
// B query
+ assembly.num_inputs + assembly.num_aux
});
// Compute G2 window table
let mut g2_wnaf = Wnaf::new();
let g2_wnaf = g2_wnaf.base(g2, {
// B query
assembly.num_inputs + assembly.num_aux
});
let gamma_inverse = {
let inverse = gamma.invert();
if bool::from(inverse.is_some()) {
Ok(inverse.unwrap())
} else {
Err(SynthesisError::UnexpectedIdentity)
}
}?;
let delta_inverse = {
let inverse = delta.invert();
if bool::from(inverse.is_some()) {
Ok(inverse.unwrap())
} else {
Err(SynthesisError::UnexpectedIdentity)
}
}?;
let worker = Worker::new();
let mut h = vec![E::G1Affine::identity(); powers_of_tau.as_ref().len() - 1];
{
// Compute powers of tau
{
let powers_of_tau = powers_of_tau.as_mut();
worker.scope(powers_of_tau.len(), |scope, chunk| {
for (i, powers_of_tau) in powers_of_tau.chunks_mut(chunk).enumerate() {
scope.spawn(move |_scope| {
let mut current_tau_power = tau.pow_vartime(&[(i * chunk) as u64]);
for p in powers_of_tau {
p.0 = current_tau_power;
current_tau_power.mul_assign(&tau);
}
});
}
});
}
// coeff = t(x) / delta
let mut coeff = powers_of_tau.z(&tau);
coeff.mul_assign(&delta_inverse);
// Compute the H query with multiple threads
worker.scope(h.len(), |scope, chunk| {
for (h, p) in h
.chunks_mut(chunk)
.zip(powers_of_tau.as_ref().chunks(chunk))
{
let mut g1_wnaf = g1_wnaf.shared();
scope.spawn(move |_scope| {
// Set values of the H query to g1^{(tau^i * t(tau)) / delta}
let h_proj: Vec<_> = p[..h.len()]
.iter()
.map(|p| {
// Compute final exponent
let mut exp = p.0;
exp.mul_assign(&coeff);
// Exponentiate
g1_wnaf.scalar(&exp)
})
.collect();
// Batch normalize
E::G1::batch_normalize(&h_proj, h);
});
}
});
}
// Use inverse FFT to convert powers of tau to Lagrange coefficients
powers_of_tau.ifft(&worker);
let powers_of_tau = powers_of_tau.into_coeffs();
let mut a = vec![E::G1Affine::identity(); assembly.num_inputs + assembly.num_aux];
let mut b_g1 = vec![E::G1Affine::identity(); assembly.num_inputs + assembly.num_aux];
let mut b_g2 = vec![E::G2Affine::identity(); assembly.num_inputs + assembly.num_aux];
let mut ic = vec![E::G1Affine::identity(); assembly.num_inputs];
let mut l = vec![E::G1Affine::identity(); assembly.num_aux];
#[allow(clippy::too_many_arguments)]
fn eval<E: Engine>(
// wNAF window tables
g1_wnaf: &Wnaf<usize, &[E::G1], &mut Vec<i64>>,
g2_wnaf: &Wnaf<usize, &[E::G2], &mut Vec<i64>>,
// Lagrange coefficients for tau
powers_of_tau: &[Scalar<E::Fr>],
// QAP polynomials
at: &[Vec<(E::Fr, usize)>],
bt: &[Vec<(E::Fr, usize)>],
ct: &[Vec<(E::Fr, usize)>],
// Resulting evaluated QAP polynomials
a: &mut [E::G1Affine],
b_g1: &mut [E::G1Affine],
b_g2: &mut [E::G2Affine],
ext: &mut [E::G1Affine],
// Inverse coefficient for ext elements
inv: &E::Fr,
// Trapdoors
alpha: &E::Fr,
beta: &E::Fr,
// Worker
worker: &Worker,
) {
// Sanity check
assert_eq!(a.len(), at.len());
assert_eq!(a.len(), bt.len());
assert_eq!(a.len(), ct.len());
assert_eq!(a.len(), b_g1.len());
assert_eq!(a.len(), b_g2.len());
assert_eq!(a.len(), ext.len());
// Evaluate polynomials in multiple threads
worker.scope(a.len(), |scope, chunk| {
for ((((((a, b_g1), b_g2), ext), at), bt), ct) in a
.chunks_mut(chunk)
.zip(b_g1.chunks_mut(chunk))
.zip(b_g2.chunks_mut(chunk))
.zip(ext.chunks_mut(chunk))
.zip(at.chunks(chunk))
.zip(bt.chunks(chunk))
.zip(ct.chunks(chunk))
{
let mut g1_wnaf = g1_wnaf.shared();
let mut g2_wnaf = g2_wnaf.shared();
scope.spawn(move |_scope| {
let mut a_proj = vec![E::G1::identity(); a.len()];
let mut b_g1_proj = vec![E::G1::identity(); b_g1.len()];
let mut b_g2_proj = vec![E::G2::identity(); b_g2.len()];
let mut ext_proj = vec![E::G1::identity(); ext.len()];
for ((((((a, b_g1), b_g2), ext), at), bt), ct) in a_proj
.iter_mut()
.zip(b_g1_proj.iter_mut())
.zip(b_g2_proj.iter_mut())
.zip(ext_proj.iter_mut())
.zip(at.iter())
.zip(bt.iter())
.zip(ct.iter())
{
fn eval_at_tau<S: PrimeField>(
powers_of_tau: &[Scalar<S>],
p: &[(S, usize)],
) -> S {
let mut acc = S::zero();
for &(ref coeff, index) in p {
let mut n = powers_of_tau[index].0;
n.mul_assign(coeff);
acc.add_assign(&n);
}
acc
}
// Evaluate QAP polynomials at tau
let mut at = eval_at_tau(powers_of_tau, at);
let mut bt = eval_at_tau(powers_of_tau, bt);
let ct = eval_at_tau(powers_of_tau, ct);
// Compute A query (in G1)
if !at.is_zero_vartime() {
*a = g1_wnaf.scalar(&at);
}
// Compute B query (in G1/G2)
if !bt.is_zero_vartime() {
*b_g1 = g1_wnaf.scalar(&bt);
*b_g2 = g2_wnaf.scalar(&bt);
}
at *= beta;
bt *= alpha;
let mut e = at;
e.add_assign(&bt);
e.add_assign(&ct);
e.mul_assign(inv);
*ext = g1_wnaf.scalar(&e);
}
// Batch normalize
E::G1::batch_normalize(&a_proj, a);
E::G1::batch_normalize(&b_g1_proj, b_g1);
E::G2::batch_normalize(&b_g2_proj, b_g2);
E::G1::batch_normalize(&ext_proj, ext);
});
}
});
}
// Evaluate for inputs.
eval::<E>(
&g1_wnaf,
&g2_wnaf,
&powers_of_tau,
&assembly.at_inputs,
&assembly.bt_inputs,
&assembly.ct_inputs,
&mut a[0..assembly.num_inputs],
&mut b_g1[0..assembly.num_inputs],
&mut b_g2[0..assembly.num_inputs],
&mut ic,
&gamma_inverse,
&alpha,
&beta,
&worker,
);
// Evaluate for auxiliary variables.
eval::<E>(
&g1_wnaf,
&g2_wnaf,
&powers_of_tau,
&assembly.at_aux,
&assembly.bt_aux,
&assembly.ct_aux,
&mut a[assembly.num_inputs..],
&mut b_g1[assembly.num_inputs..],
&mut b_g2[assembly.num_inputs..],
&mut l,
&delta_inverse,
&alpha,
&beta,
&worker,
);
// Don't allow any elements be unconstrained, so that
// the L query is always fully dense.
for e in l.iter() {
if e.is_identity().into() {
return Err(SynthesisError::UnconstrainedVariable);
}
}
let g1 = g1.to_affine();
let g2 = g2.to_affine();
let vk = VerifyingKey::<E> {
alpha_g1: (g1 * alpha).to_affine(),
beta_g1: (g1 * beta).to_affine(),
beta_g2: (g2 * beta).to_affine(),
gamma_g2: (g2 * gamma).to_affine(),
delta_g1: (g1 * delta).to_affine(),
delta_g2: (g2 * delta).to_affine(),
ic,
};
Ok(Parameters {
vk,
h: Arc::new(h),
l: Arc::new(l),
// Filter points at infinity away from A/B queries
a: Arc::new(
a.into_iter()
.filter(|e| bool::from(!e.is_identity()))
.collect(),
),
b_g1: Arc::new(
b_g1.into_iter()
.filter(|e| bool::from(!e.is_identity()))
.collect(),
),
b_g2: Arc::new(
b_g2.into_iter()
.filter(|e| bool::from(!e.is_identity()))
.collect(),
),
})
}

569
third_party/bellman/src/groth16/mod.rs vendored Normal file
View File

@@ -0,0 +1,569 @@
//! The [Groth16] proving system.
//!
//! [Groth16]: https://eprint.iacr.org/2016/260
use group::{prime::PrimeCurveAffine, GroupEncoding, UncompressedEncoding};
use pairing::{Engine, MultiMillerLoop};
use crate::SynthesisError;
use crate::multiexp::SourceBuilder;
use byteorder::{BigEndian, ReadBytesExt, WriteBytesExt};
use std::io::{self, Read, Write};
use std::sync::Arc;
#[cfg(test)]
mod tests;
mod generator;
mod prover;
mod verifier;
pub use self::generator::*;
pub use self::prover::*;
pub use self::verifier::*;
#[derive(Clone, Debug)]
pub struct Proof<E: Engine> {
pub a: E::G1Affine,
pub b: E::G2Affine,
pub c: E::G1Affine,
}
impl<E: Engine> PartialEq for Proof<E> {
fn eq(&self, other: &Self) -> bool {
self.a == other.a && self.b == other.b && self.c == other.c
}
}
impl<E: Engine> Proof<E> {
pub fn write<W: Write>(&self, mut writer: W) -> io::Result<()> {
writer.write_all(self.a.to_bytes().as_ref())?;
writer.write_all(self.b.to_bytes().as_ref())?;
writer.write_all(self.c.to_bytes().as_ref())?;
Ok(())
}
pub fn read<R: Read>(mut reader: R) -> io::Result<Self> {
let read_g1 = |reader: &mut R| -> io::Result<E::G1Affine> {
let mut g1_repr = <E::G1Affine as GroupEncoding>::Repr::default();
reader.read_exact(g1_repr.as_mut())?;
let affine = E::G1Affine::from_bytes(&g1_repr);
let affine = if affine.is_some().into() {
Ok(affine.unwrap())
} else {
Err(io::Error::new(io::ErrorKind::InvalidData, "invalid G1"))
};
affine.and_then(|e| {
if e.is_identity().into() {
Err(io::Error::new(
io::ErrorKind::InvalidData,
"point at infinity",
))
} else {
Ok(e)
}
})
};
let read_g2 = |reader: &mut R| -> io::Result<E::G2Affine> {
let mut g2_repr = <E::G2Affine as GroupEncoding>::Repr::default();
reader.read_exact(g2_repr.as_mut())?;
let affine = E::G2Affine::from_bytes(&g2_repr);
let affine = if affine.is_some().into() {
Ok(affine.unwrap())
} else {
Err(io::Error::new(io::ErrorKind::InvalidData, "invalid G2"))
};
affine.and_then(|e| {
if e.is_identity().into() {
Err(io::Error::new(
io::ErrorKind::InvalidData,
"point at infinity",
))
} else {
Ok(e)
}
})
};
let a = read_g1(&mut reader)?;
let b = read_g2(&mut reader)?;
let c = read_g1(&mut reader)?;
Ok(Proof { a, b, c })
}
}
#[derive(Clone)]
pub struct VerifyingKey<E: Engine> {
// alpha in g1 for verifying and for creating A/C elements of
// proof. Never the point at infinity.
pub alpha_g1: E::G1Affine,
// beta in g1 and g2 for verifying and for creating B/C elements
// of proof. Never the point at infinity.
pub beta_g1: E::G1Affine,
pub beta_g2: E::G2Affine,
// gamma in g2 for verifying. Never the point at infinity.
pub gamma_g2: E::G2Affine,
// delta in g1/g2 for verifying and proving, essentially the magic
// trapdoor that forces the prover to evaluate the C element of the
// proof with only components from the CRS. Never the point at
// infinity.
pub delta_g1: E::G1Affine,
pub delta_g2: E::G2Affine,
// Elements of the form (beta * u_i(tau) + alpha v_i(tau) + w_i(tau)) / gamma
// for all public inputs. Because all public inputs have a dummy constraint,
// this is the same size as the number of inputs, and never contains points
// at infinity.
pub ic: Vec<E::G1Affine>,
}
impl<E: Engine> PartialEq for VerifyingKey<E> {
fn eq(&self, other: &Self) -> bool {
self.alpha_g1 == other.alpha_g1
&& self.beta_g1 == other.beta_g1
&& self.beta_g2 == other.beta_g2
&& self.gamma_g2 == other.gamma_g2
&& self.delta_g1 == other.delta_g1
&& self.delta_g2 == other.delta_g2
&& self.ic == other.ic
}
}
impl<E: Engine> VerifyingKey<E> {
pub fn write<W: Write>(&self, mut writer: W) -> io::Result<()> {
writer.write_all(self.alpha_g1.to_uncompressed().as_ref())?;
writer.write_all(self.beta_g1.to_uncompressed().as_ref())?;
writer.write_all(self.beta_g2.to_uncompressed().as_ref())?;
writer.write_all(self.gamma_g2.to_uncompressed().as_ref())?;
writer.write_all(self.delta_g1.to_uncompressed().as_ref())?;
writer.write_all(self.delta_g2.to_uncompressed().as_ref())?;
writer.write_u32::<BigEndian>(self.ic.len() as u32)?;
for ic in &self.ic {
writer.write_all(ic.to_uncompressed().as_ref())?;
}
Ok(())
}
pub fn read<R: Read>(mut reader: R) -> io::Result<Self> {
let read_g1 = |reader: &mut R| -> io::Result<E::G1Affine> {
let mut g1_repr = <E::G1Affine as UncompressedEncoding>::Uncompressed::default();
reader.read_exact(g1_repr.as_mut())?;
let affine = E::G1Affine::from_uncompressed(&g1_repr);
if affine.is_some().into() {
Ok(affine.unwrap())
} else {
Err(io::Error::new(io::ErrorKind::InvalidData, "invalid G1"))
}
};
let read_g2 = |reader: &mut R| -> io::Result<E::G2Affine> {
let mut g2_repr = <E::G2Affine as UncompressedEncoding>::Uncompressed::default();
reader.read_exact(g2_repr.as_mut())?;
let affine = E::G2Affine::from_uncompressed(&g2_repr);
if affine.is_some().into() {
Ok(affine.unwrap())
} else {
Err(io::Error::new(io::ErrorKind::InvalidData, "invalid G2"))
}
};
let alpha_g1 = read_g1(&mut reader)?;
let beta_g1 = read_g1(&mut reader)?;
let beta_g2 = read_g2(&mut reader)?;
let gamma_g2 = read_g2(&mut reader)?;
let delta_g1 = read_g1(&mut reader)?;
let delta_g2 = read_g2(&mut reader)?;
let ic_len = reader.read_u32::<BigEndian>()? as usize;
let mut ic = vec![];
for _ in 0..ic_len {
let g1 = read_g1(&mut reader).and_then(|e| {
if e.is_identity().into() {
Err(io::Error::new(
io::ErrorKind::InvalidData,
"point at infinity",
))
} else {
Ok(e)
}
})?;
ic.push(g1);
}
Ok(VerifyingKey {
alpha_g1,
beta_g1,
beta_g2,
gamma_g2,
delta_g1,
delta_g2,
ic,
})
}
}
#[derive(Clone)]
pub struct Parameters<E: Engine> {
pub vk: VerifyingKey<E>,
// Elements of the form ((tau^i * t(tau)) / delta) for i between 0 and
// m-2 inclusive. Never contains points at infinity.
pub h: Arc<Vec<E::G1Affine>>,
// Elements of the form (beta * u_i(tau) + alpha v_i(tau) + w_i(tau)) / delta
// for all auxiliary inputs. Variables can never be unconstrained, so this
// never contains points at infinity.
pub l: Arc<Vec<E::G1Affine>>,
// QAP "A" polynomials evaluated at tau in the Lagrange basis. Never contains
// points at infinity: polynomials that evaluate to zero are omitted from
// the CRS and the prover can deterministically skip their evaluation.
pub a: Arc<Vec<E::G1Affine>>,
// QAP "B" polynomials evaluated at tau in the Lagrange basis. Needed in
// G1 and G2 for C/B queries, respectively. Never contains points at
// infinity for the same reason as the "A" polynomials.
pub b_g1: Arc<Vec<E::G1Affine>>,
pub b_g2: Arc<Vec<E::G2Affine>>,
}
impl<E: Engine> PartialEq for Parameters<E> {
fn eq(&self, other: &Self) -> bool {
self.vk == other.vk
&& self.h == other.h
&& self.l == other.l
&& self.a == other.a
&& self.b_g1 == other.b_g1
&& self.b_g2 == other.b_g2
}
}
impl<E: Engine> Parameters<E> {
pub fn write<W: Write>(&self, mut writer: W) -> io::Result<()> {
self.vk.write(&mut writer)?;
writer.write_u32::<BigEndian>(self.h.len() as u32)?;
for g in &self.h[..] {
writer.write_all(g.to_uncompressed().as_ref())?;
}
writer.write_u32::<BigEndian>(self.l.len() as u32)?;
for g in &self.l[..] {
writer.write_all(g.to_uncompressed().as_ref())?;
}
writer.write_u32::<BigEndian>(self.a.len() as u32)?;
for g in &self.a[..] {
writer.write_all(g.to_uncompressed().as_ref())?;
}
writer.write_u32::<BigEndian>(self.b_g1.len() as u32)?;
for g in &self.b_g1[..] {
writer.write_all(g.to_uncompressed().as_ref())?;
}
writer.write_u32::<BigEndian>(self.b_g2.len() as u32)?;
for g in &self.b_g2[..] {
writer.write_all(g.to_uncompressed().as_ref())?;
}
Ok(())
}
pub fn read<R: Read>(mut reader: R, checked: bool) -> io::Result<Self> {
let read_g1 = |reader: &mut R| -> io::Result<E::G1Affine> {
let mut repr = <E::G1Affine as UncompressedEncoding>::Uncompressed::default();
reader.read_exact(repr.as_mut())?;
let affine = if checked {
E::G1Affine::from_uncompressed(&repr)
} else {
E::G1Affine::from_uncompressed_unchecked(&repr)
};
let affine = if affine.is_some().into() {
Ok(affine.unwrap())
} else {
Err(io::Error::new(io::ErrorKind::InvalidData, "invalid G1"))
};
affine.and_then(|e| {
if e.is_identity().into() {
Err(io::Error::new(
io::ErrorKind::InvalidData,
"point at infinity",
))
} else {
Ok(e)
}
})
};
let read_g2 = |reader: &mut R| -> io::Result<E::G2Affine> {
let mut repr = <E::G2Affine as UncompressedEncoding>::Uncompressed::default();
reader.read_exact(repr.as_mut())?;
let affine = if checked {
E::G2Affine::from_uncompressed(&repr)
} else {
E::G2Affine::from_uncompressed_unchecked(&repr)
};
let affine = if affine.is_some().into() {
Ok(affine.unwrap())
} else {
Err(io::Error::new(io::ErrorKind::InvalidData, "invalid G2"))
};
affine.and_then(|e| {
if e.is_identity().into() {
Err(io::Error::new(
io::ErrorKind::InvalidData,
"point at infinity",
))
} else {
Ok(e)
}
})
};
let vk = VerifyingKey::<E>::read(&mut reader)?;
let mut h = vec![];
let mut l = vec![];
let mut a = vec![];
let mut b_g1 = vec![];
let mut b_g2 = vec![];
{
let len = reader.read_u32::<BigEndian>()? as usize;
for _ in 0..len {
h.push(read_g1(&mut reader)?);
}
}
{
let len = reader.read_u32::<BigEndian>()? as usize;
for _ in 0..len {
l.push(read_g1(&mut reader)?);
}
}
{
let len = reader.read_u32::<BigEndian>()? as usize;
for _ in 0..len {
a.push(read_g1(&mut reader)?);
}
}
{
let len = reader.read_u32::<BigEndian>()? as usize;
for _ in 0..len {
b_g1.push(read_g1(&mut reader)?);
}
}
{
let len = reader.read_u32::<BigEndian>()? as usize;
for _ in 0..len {
b_g2.push(read_g2(&mut reader)?);
}
}
Ok(Parameters {
vk,
h: Arc::new(h),
l: Arc::new(l),
a: Arc::new(a),
b_g1: Arc::new(b_g1),
b_g2: Arc::new(b_g2),
})
}
}
pub struct PreparedVerifyingKey<E: MultiMillerLoop> {
/// Pairing result of alpha*beta
alpha_g1_beta_g2: E::Gt,
/// -gamma in G2
neg_gamma_g2: E::G2Prepared,
/// -delta in G2
neg_delta_g2: E::G2Prepared,
/// Copy of IC from `VerifiyingKey`.
ic: Vec<E::G1Affine>,
}
pub trait ParameterSource<E: Engine> {
type G1Builder: SourceBuilder<E::G1Affine>;
type G2Builder: SourceBuilder<E::G2Affine>;
fn get_vk(&mut self, num_ic: usize) -> Result<VerifyingKey<E>, SynthesisError>;
fn get_h(&mut self, num_h: usize) -> Result<Self::G1Builder, SynthesisError>;
fn get_l(&mut self, num_l: usize) -> Result<Self::G1Builder, SynthesisError>;
fn get_a(
&mut self,
num_inputs: usize,
num_aux: usize,
) -> Result<(Self::G1Builder, Self::G1Builder), SynthesisError>;
fn get_b_g1(
&mut self,
num_inputs: usize,
num_aux: usize,
) -> Result<(Self::G1Builder, Self::G1Builder), SynthesisError>;
fn get_b_g2(
&mut self,
num_inputs: usize,
num_aux: usize,
) -> Result<(Self::G2Builder, Self::G2Builder), SynthesisError>;
}
impl<'a, E: Engine> ParameterSource<E> for &'a Parameters<E> {
type G1Builder = (Arc<Vec<E::G1Affine>>, usize);
type G2Builder = (Arc<Vec<E::G2Affine>>, usize);
fn get_vk(&mut self, _: usize) -> Result<VerifyingKey<E>, SynthesisError> {
Ok(self.vk.clone())
}
fn get_h(&mut self, _: usize) -> Result<Self::G1Builder, SynthesisError> {
Ok((self.h.clone(), 0))
}
fn get_l(&mut self, _: usize) -> Result<Self::G1Builder, SynthesisError> {
Ok((self.l.clone(), 0))
}
fn get_a(
&mut self,
num_inputs: usize,
_: usize,
) -> Result<(Self::G1Builder, Self::G1Builder), SynthesisError> {
Ok(((self.a.clone(), 0), (self.a.clone(), num_inputs)))
}
fn get_b_g1(
&mut self,
num_inputs: usize,
_: usize,
) -> Result<(Self::G1Builder, Self::G1Builder), SynthesisError> {
Ok(((self.b_g1.clone(), 0), (self.b_g1.clone(), num_inputs)))
}
fn get_b_g2(
&mut self,
num_inputs: usize,
_: usize,
) -> Result<(Self::G2Builder, Self::G2Builder), SynthesisError> {
Ok(((self.b_g2.clone(), 0), (self.b_g2.clone(), num_inputs)))
}
}
#[cfg(test)]
mod test_with_bls12_381 {
use super::*;
use crate::{Circuit, ConstraintSystem, SynthesisError};
use bls12_381::{Bls12, Scalar};
use ff::{Field, PrimeField};
use rand::thread_rng;
use std::ops::MulAssign;
#[test]
fn serialization() {
struct MySillyCircuit<Scalar: PrimeField> {
a: Option<Scalar>,
b: Option<Scalar>,
}
impl<Scalar: PrimeField> Circuit<Scalar> for MySillyCircuit<Scalar> {
fn synthesize<CS: ConstraintSystem<Scalar>>(
self,
cs: &mut CS,
) -> Result<(), SynthesisError> {
let a = cs.alloc(|| "a", || self.a.ok_or(SynthesisError::AssignmentMissing))?;
let b = cs.alloc(|| "b", || self.b.ok_or(SynthesisError::AssignmentMissing))?;
let c = cs.alloc_input(
|| "c",
|| {
let mut a = self.a.ok_or(SynthesisError::AssignmentMissing)?;
let b = self.b.ok_or(SynthesisError::AssignmentMissing)?;
a.mul_assign(&b);
Ok(a)
},
)?;
cs.enforce(|| "a*b=c", |lc| lc + a, |lc| lc + b, |lc| lc + c);
Ok(())
}
}
let mut rng = thread_rng();
let params = generate_random_parameters::<Bls12, _, _>(
MySillyCircuit { a: None, b: None },
&mut rng,
)
.unwrap();
{
let mut v = vec![];
params.write(&mut v).unwrap();
assert_eq!(v.len(), 2136);
let de_params = Parameters::read(&v[..], true).unwrap();
assert!(params == de_params);
let de_params = Parameters::read(&v[..], false).unwrap();
assert!(params == de_params);
}
let pvk = prepare_verifying_key::<Bls12>(&params.vk);
for _ in 0..100 {
let a = Scalar::random(&mut rng);
let b = Scalar::random(&mut rng);
let mut c = a;
c.mul_assign(&b);
let proof = create_random_proof(
MySillyCircuit {
a: Some(a),
b: Some(b),
},
&params,
&mut rng,
)
.unwrap();
let mut v = vec![];
proof.write(&mut v).unwrap();
assert_eq!(v.len(), 192);
let de_proof = Proof::read(&v[..]).unwrap();
assert!(proof == de_proof);
assert!(verify_proof(&pvk, &proof, &[c]).is_ok());
assert!(verify_proof(&pvk, &proof, &[a]).is_err());
}
}
}

View File

@@ -0,0 +1,352 @@
use rand_core::RngCore;
use std::ops::{AddAssign, MulAssign};
use std::sync::Arc;
use ff::{Field, PrimeField, PrimeFieldBits};
use group::{prime::PrimeCurveAffine, Curve};
use pairing::Engine;
use super::{ParameterSource, Proof};
use crate::{Circuit, ConstraintSystem, Index, LinearCombination, SynthesisError, Variable};
use crate::domain::{EvaluationDomain, Scalar};
use crate::multiexp::{multiexp, DensityTracker, FullDensity};
use crate::multicore::Worker;
fn eval<S: PrimeField>(
lc: &LinearCombination<S>,
mut input_density: Option<&mut DensityTracker>,
mut aux_density: Option<&mut DensityTracker>,
input_assignment: &[S],
aux_assignment: &[S],
) -> S {
let mut acc = S::zero();
for &(index, coeff) in lc.0.iter() {
let mut tmp;
if !coeff.is_zero_vartime() {
match index {
Variable(Index::Input(i)) => {
tmp = input_assignment[i];
if let Some(ref mut v) = input_density {
v.inc(i);
}
}
Variable(Index::Aux(i)) => {
tmp = aux_assignment[i];
if let Some(ref mut v) = aux_density {
v.inc(i);
}
}
}
if coeff != S::one() {
tmp *= coeff;
}
acc += tmp;
}
}
acc
}
struct ProvingAssignment<S: PrimeField> {
// Density of queries
a_aux_density: DensityTracker,
b_input_density: DensityTracker,
b_aux_density: DensityTracker,
// Evaluations of A, B, C polynomials
a: Vec<Scalar<S>>,
b: Vec<Scalar<S>>,
c: Vec<Scalar<S>>,
// Assignments of variables
input_assignment: Vec<S>,
aux_assignment: Vec<S>,
}
impl<S: PrimeField> ConstraintSystem<S> for ProvingAssignment<S> {
type Root = Self;
fn alloc<F, A, AR>(&mut self, _: A, f: F) -> Result<Variable, SynthesisError>
where
F: FnOnce() -> Result<S, SynthesisError>,
A: FnOnce() -> AR,
AR: Into<String>,
{
self.aux_assignment.push(f()?);
self.a_aux_density.add_element();
self.b_aux_density.add_element();
Ok(Variable(Index::Aux(self.aux_assignment.len() - 1)))
}
fn alloc_input<F, A, AR>(&mut self, _: A, f: F) -> Result<Variable, SynthesisError>
where
F: FnOnce() -> Result<S, SynthesisError>,
A: FnOnce() -> AR,
AR: Into<String>,
{
self.input_assignment.push(f()?);
self.b_input_density.add_element();
Ok(Variable(Index::Input(self.input_assignment.len() - 1)))
}
fn enforce<A, AR, LA, LB, LC>(&mut self, _: A, a: LA, b: LB, c: LC)
where
A: FnOnce() -> AR,
AR: Into<String>,
LA: FnOnce(LinearCombination<S>) -> LinearCombination<S>,
LB: FnOnce(LinearCombination<S>) -> LinearCombination<S>,
LC: FnOnce(LinearCombination<S>) -> LinearCombination<S>,
{
let a = a(LinearCombination::zero());
let b = b(LinearCombination::zero());
let c = c(LinearCombination::zero());
self.a.push(Scalar(eval(
&a,
// Inputs have full density in the A query
// because there are constraints of the
// form x * 0 = 0 for each input.
None,
Some(&mut self.a_aux_density),
&self.input_assignment,
&self.aux_assignment,
)));
self.b.push(Scalar(eval(
&b,
Some(&mut self.b_input_density),
Some(&mut self.b_aux_density),
&self.input_assignment,
&self.aux_assignment,
)));
self.c.push(Scalar(eval(
&c,
// There is no C polynomial query,
// though there is an (beta)A + (alpha)B + C
// query for all aux variables.
// However, that query has full density.
None,
None,
&self.input_assignment,
&self.aux_assignment,
)));
}
fn push_namespace<NR, N>(&mut self, _: N)
where
NR: Into<String>,
N: FnOnce() -> NR,
{
// Do nothing; we don't care about namespaces in this context.
}
fn pop_namespace(&mut self) {
// Do nothing; we don't care about namespaces in this context.
}
fn get_root(&mut self) -> &mut Self::Root {
self
}
}
pub fn create_random_proof<E, C, R, P: ParameterSource<E>>(
circuit: C,
params: P,
mut rng: &mut R,
) -> Result<Proof<E>, SynthesisError>
where
E: Engine,
E::Fr: PrimeFieldBits,
C: Circuit<E::Fr>,
R: RngCore,
{
let r = E::Fr::random(&mut rng);
let s = E::Fr::random(&mut rng);
create_proof::<E, C, P>(circuit, params, r, s)
}
#[allow(clippy::many_single_char_names)]
pub fn create_proof<E, C, P: ParameterSource<E>>(
circuit: C,
mut params: P,
r: E::Fr,
s: E::Fr,
) -> Result<Proof<E>, SynthesisError>
where
E: Engine,
E::Fr: PrimeFieldBits,
C: Circuit<E::Fr>,
{
let mut prover = ProvingAssignment {
a_aux_density: DensityTracker::new(),
b_input_density: DensityTracker::new(),
b_aux_density: DensityTracker::new(),
a: vec![],
b: vec![],
c: vec![],
input_assignment: vec![],
aux_assignment: vec![],
};
prover.alloc_input(|| "", || Ok(E::Fr::one()))?;
circuit.synthesize(&mut prover)?;
for i in 0..prover.input_assignment.len() {
prover.enforce(|| "", |lc| lc + Variable(Index::Input(i)), |lc| lc, |lc| lc);
}
let worker = Worker::new();
let vk = params.get_vk(prover.input_assignment.len())?;
let h = {
let mut a = EvaluationDomain::from_coeffs(prover.a)?;
let mut b = EvaluationDomain::from_coeffs(prover.b)?;
let mut c = EvaluationDomain::from_coeffs(prover.c)?;
a.ifft(&worker);
a.coset_fft(&worker);
b.ifft(&worker);
b.coset_fft(&worker);
c.ifft(&worker);
c.coset_fft(&worker);
a.mul_assign(&worker, &b);
drop(b);
a.sub_assign(&worker, &c);
drop(c);
a.divide_by_z_on_coset(&worker);
a.icoset_fft(&worker);
let mut a = a.into_coeffs();
let a_len = a.len() - 1;
a.truncate(a_len);
// TODO: parallelize if it's even helpful
let a = Arc::new(a.into_iter().map(|s| s.0.into()).collect::<Vec<_>>());
multiexp(&worker, params.get_h(a.len())?, FullDensity, a)
};
// TODO: parallelize if it's even helpful
let input_assignment = Arc::new(
prover
.input_assignment
.into_iter()
.map(|s| s.into())
.collect::<Vec<_>>(),
);
let aux_assignment = Arc::new(
prover
.aux_assignment
.into_iter()
.map(|s| s.into())
.collect::<Vec<_>>(),
);
let l = multiexp(
&worker,
params.get_l(aux_assignment.len())?,
FullDensity,
aux_assignment.clone(),
);
let a_aux_density_total = prover.a_aux_density.get_total_density();
let (a_inputs_source, a_aux_source) =
params.get_a(input_assignment.len(), a_aux_density_total)?;
let a_inputs = multiexp(
&worker,
a_inputs_source,
FullDensity,
input_assignment.clone(),
);
let a_aux = multiexp(
&worker,
a_aux_source,
Arc::new(prover.a_aux_density),
aux_assignment.clone(),
);
let b_input_density = Arc::new(prover.b_input_density);
let b_input_density_total = b_input_density.get_total_density();
let b_aux_density = Arc::new(prover.b_aux_density);
let b_aux_density_total = b_aux_density.get_total_density();
let (b_g1_inputs_source, b_g1_aux_source) =
params.get_b_g1(b_input_density_total, b_aux_density_total)?;
let b_g1_inputs = multiexp(
&worker,
b_g1_inputs_source,
b_input_density.clone(),
input_assignment.clone(),
);
let b_g1_aux = multiexp(
&worker,
b_g1_aux_source,
b_aux_density.clone(),
aux_assignment.clone(),
);
let (b_g2_inputs_source, b_g2_aux_source) =
params.get_b_g2(b_input_density_total, b_aux_density_total)?;
let b_g2_inputs = multiexp(
&worker,
b_g2_inputs_source,
b_input_density,
input_assignment,
);
let b_g2_aux = multiexp(&worker, b_g2_aux_source, b_aux_density, aux_assignment);
if bool::from(vk.delta_g1.is_identity() | vk.delta_g2.is_identity()) {
// If this element is zero, someone is trying to perform a
// subversion-CRS attack.
return Err(SynthesisError::UnexpectedIdentity);
}
let mut g_a = vk.delta_g1 * r;
AddAssign::<&E::G1Affine>::add_assign(&mut g_a, &vk.alpha_g1);
let mut g_b = vk.delta_g2 * s;
AddAssign::<&E::G2Affine>::add_assign(&mut g_b, &vk.beta_g2);
let mut g_c;
{
let mut rs = r;
rs.mul_assign(&s);
g_c = vk.delta_g1 * rs;
AddAssign::<&E::G1>::add_assign(&mut g_c, &(vk.alpha_g1 * s));
AddAssign::<&E::G1>::add_assign(&mut g_c, &(vk.beta_g1 * r));
}
let mut a_answer = a_inputs.wait()?;
AddAssign::<&E::G1>::add_assign(&mut a_answer, &a_aux.wait()?);
AddAssign::<&E::G1>::add_assign(&mut g_a, &a_answer);
MulAssign::<E::Fr>::mul_assign(&mut a_answer, s);
AddAssign::<&E::G1>::add_assign(&mut g_c, &a_answer);
let mut b1_answer: E::G1 = b_g1_inputs.wait()?;
AddAssign::<&E::G1>::add_assign(&mut b1_answer, &b_g1_aux.wait()?);
let mut b2_answer = b_g2_inputs.wait()?;
AddAssign::<&E::G2>::add_assign(&mut b2_answer, &b_g2_aux.wait()?);
AddAssign::<&E::G2>::add_assign(&mut g_b, &b2_answer);
MulAssign::<E::Fr>::mul_assign(&mut b1_answer, r);
AddAssign::<&E::G1>::add_assign(&mut g_c, &b1_answer);
AddAssign::<&E::G1>::add_assign(&mut g_c, &h.wait()?);
AddAssign::<&E::G1>::add_assign(&mut g_c, &l.wait()?);
Ok(Proof {
a: g_a.to_affine(),
b: g_b.to_affine(),
c: g_c.to_affine(),
})
}

View File

@@ -0,0 +1,495 @@
use ff::{Field, FieldBits, PrimeField, PrimeFieldBits};
use group::{
prime::{PrimeCurve, PrimeCurveAffine, PrimeGroup},
Curve, Group, GroupEncoding, UncompressedEncoding, WnafGroup,
};
use pairing::{Engine, MillerLoopResult, MultiMillerLoop, PairingCurveAffine};
use rand_core::RngCore;
use std::fmt;
use std::iter::Sum;
use std::num::Wrapping;
use std::ops::{Add, AddAssign, BitAnd, Mul, MulAssign, Neg, Shr, Sub, SubAssign};
use subtle::{Choice, ConditionallySelectable, ConstantTimeEq, CtOption};
const MODULUS_R: Wrapping<u32> = Wrapping(64513);
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub struct Fr(Wrapping<u32>);
impl Default for Fr {
fn default() -> Self {
<Fr as Field>::zero()
}
}
impl ConstantTimeEq for Fr {
fn ct_eq(&self, other: &Fr) -> Choice {
(self.0).0.ct_eq(&(other.0).0)
}
}
impl fmt::Display for Fr {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
write!(f, "{}", (self.0).0)
}
}
impl From<u64> for Fr {
fn from(v: u64) -> Fr {
Fr(Wrapping((v % MODULUS_R.0 as u64) as u32))
}
}
impl ConditionallySelectable for Fr {
fn conditional_select(a: &Self, b: &Self, choice: Choice) -> Self {
Fr(Wrapping(u32::conditional_select(
&(a.0).0,
&(b.0).0,
choice,
)))
}
}
impl Sum for Fr {
fn sum<I: Iterator<Item = Self>>(iter: I) -> Self {
iter.fold(Self::zero(), ::std::ops::Add::add)
}
}
impl<'r> Sum<&'r Fr> for Fr {
fn sum<I: Iterator<Item = &'r Fr>>(iter: I) -> Self {
iter.fold(Self::zero(), ::std::ops::Add::add)
}
}
impl Neg for Fr {
type Output = Self;
fn neg(mut self) -> Self {
if !<Fr as Field>::is_zero_vartime(&self) {
self.0 = MODULUS_R - self.0;
}
self
}
}
impl<'r> Add<&'r Fr> for Fr {
type Output = Self;
fn add(self, other: &Self) -> Self {
let mut ret = self;
AddAssign::add_assign(&mut ret, other);
ret
}
}
impl Add for Fr {
type Output = Self;
#[allow(clippy::op_ref)]
fn add(self, other: Self) -> Self {
self + &other
}
}
impl<'r> AddAssign<&'r Fr> for Fr {
fn add_assign(&mut self, other: &Self) {
self.0 = (self.0 + other.0) % MODULUS_R;
}
}
impl AddAssign for Fr {
fn add_assign(&mut self, other: Self) {
AddAssign::add_assign(self, &other);
}
}
impl<'r> Sub<&'r Fr> for Fr {
type Output = Self;
fn sub(self, other: &Self) -> Self {
let mut ret = self;
SubAssign::sub_assign(&mut ret, other);
ret
}
}
impl Sub for Fr {
type Output = Self;
#[allow(clippy::op_ref)]
fn sub(self, other: Self) -> Self {
self - &other
}
}
impl<'r> SubAssign<&'r Fr> for Fr {
fn sub_assign(&mut self, other: &Self) {
self.0 = ((MODULUS_R + self.0) - other.0) % MODULUS_R;
}
}
impl SubAssign for Fr {
fn sub_assign(&mut self, other: Self) {
SubAssign::sub_assign(self, &other);
}
}
impl<'r> Mul<&'r Fr> for Fr {
type Output = Self;
fn mul(self, other: &Self) -> Self {
let mut ret = self;
MulAssign::mul_assign(&mut ret, other);
ret
}
}
impl Mul for Fr {
type Output = Self;
#[allow(clippy::op_ref)]
fn mul(self, other: Self) -> Self {
self * &other
}
}
impl<'r> MulAssign<&'r Fr> for Fr {
fn mul_assign(&mut self, other: &Self) {
self.0 = (self.0 * other.0) % MODULUS_R;
}
}
impl MulAssign for Fr {
fn mul_assign(&mut self, other: Self) {
MulAssign::mul_assign(self, &other);
}
}
impl BitAnd<u64> for Fr {
type Output = u64;
fn bitand(self, rhs: u64) -> u64 {
(self.0).0 as u64 & rhs
}
}
impl Shr<u32> for Fr {
type Output = Fr;
fn shr(mut self, rhs: u32) -> Fr {
self.0 = Wrapping((self.0).0 >> rhs);
self
}
}
impl Field for Fr {
fn random(mut rng: impl RngCore) -> Self {
Fr(Wrapping(rng.next_u32()) % MODULUS_R)
}
fn zero() -> Self {
Fr(Wrapping(0))
}
fn one() -> Self {
Fr(Wrapping(1))
}
fn is_zero(&self) -> Choice {
(self.0).0.ct_eq(&0)
}
fn square(&self) -> Self {
Fr((self.0 * self.0) % MODULUS_R)
}
fn double(&self) -> Self {
Fr((self.0 << 1) % MODULUS_R)
}
fn invert(&self) -> CtOption<Self> {
CtOption::new(
self.pow_vartime(&[(MODULUS_R.0 as u64) - 2]),
!<Fr as Field>::is_zero(self),
)
}
#[allow(clippy::many_single_char_names)]
fn sqrt(&self) -> CtOption<Self> {
// Tonelli-Shank's algorithm for q mod 16 = 1
// https://eprint.iacr.org/2012/685.pdf (page 12, algorithm 5)
let mut c = Fr::root_of_unity();
// r = self^((t + 1) // 2)
let mut r = self.pow_vartime([32u64]);
// t = self^t
let mut t = self.pow_vartime([63u64]);
let mut m = Fr::S;
while t != <Fr as Field>::one() {
let mut i = 1;
{
let mut t2i = t.square();
loop {
if t2i == <Fr as Field>::one() {
break;
}
t2i = t2i.square();
i += 1;
}
}
for _ in 0..(m - i - 1) {
c = c.square();
}
MulAssign::mul_assign(&mut r, &c);
c = c.square();
MulAssign::mul_assign(&mut t, &c);
m = i;
}
CtOption::new(r, (r * r).ct_eq(self))
}
}
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
pub struct FrRepr([u8; 8]);
impl From<Fr> for FrRepr {
fn from(v: Fr) -> FrRepr {
FrRepr::from(&v)
}
}
impl<'a> From<&'a Fr> for FrRepr {
fn from(v: &'a Fr) -> FrRepr {
FrRepr(((v.0).0 as u64).to_le_bytes())
}
}
impl AsMut<[u8]> for FrRepr {
fn as_mut(&mut self) -> &mut [u8] {
&mut self.0[..]
}
}
impl AsRef<[u8]> for FrRepr {
fn as_ref(&self) -> &[u8] {
&self.0[..]
}
}
impl Default for FrRepr {
fn default() -> FrRepr {
FrRepr([0; 8])
}
}
impl PrimeField for Fr {
type Repr = FrRepr;
const NUM_BITS: u32 = 16;
const CAPACITY: u32 = 15;
const S: u32 = 10;
fn from_repr(repr: FrRepr) -> CtOption<Self> {
let v = u64::from_le_bytes(repr.0);
let is_some = Choice::from(if v >= (MODULUS_R.0 as u64) { 0 } else { 1 });
CtOption::new(Fr(Wrapping(v as u32)), is_some)
}
fn to_repr(&self) -> FrRepr {
FrRepr::from(*self)
}
fn is_odd(&self) -> Choice {
Choice::from(((self.0).0 % 2) as u8)
}
fn multiplicative_generator() -> Fr {
Fr(Wrapping(5))
}
fn root_of_unity() -> Fr {
Fr(Wrapping(57751))
}
}
impl PrimeFieldBits for Fr {
type ReprBits = u64;
fn to_le_bits(&self) -> FieldBits<Self::ReprBits> {
FieldBits::new((self.0).0 as u64)
}
fn char_le_bits() -> FieldBits<Self::ReprBits> {
FieldBits::new(MODULUS_R.0 as u64)
}
}
#[derive(Clone, Debug)]
pub struct DummyEngine;
impl Engine for DummyEngine {
type Fr = Fr;
type G1 = Fr;
type G1Affine = Fr;
type G2 = Fr;
type G2Affine = Fr;
// TODO: This should be F_645131 or something. Doesn't matter for now.
type Gt = Fr;
fn pairing(p: &Self::G1Affine, q: &Self::G2Affine) -> Self::Gt {
Self::multi_miller_loop(&[(p, &(*q))]).final_exponentiation()
}
}
impl MultiMillerLoop for DummyEngine {
type G2Prepared = Fr;
// TODO: This should be F_645131 or something. Doesn't matter for now.
type Result = Fr;
fn multi_miller_loop(terms: &[(&Self::G1Affine, &Self::G2Prepared)]) -> Self::Result {
let mut acc = <Fr as Field>::zero();
for &(a, b) in terms {
let mut tmp = *a;
MulAssign::mul_assign(&mut tmp, b);
AddAssign::add_assign(&mut acc, &tmp);
}
acc
}
}
impl MillerLoopResult for Fr {
type Gt = Fr;
/// Perform final exponentiation of the result of a miller loop.
fn final_exponentiation(&self) -> Self::Gt {
*self
}
}
impl Group for Fr {
type Scalar = Fr;
fn random(rng: impl RngCore) -> Self {
<Fr as Field>::random(rng)
}
fn identity() -> Self {
<Fr as Field>::zero()
}
fn generator() -> Self {
<Fr as Field>::one()
}
fn is_identity(&self) -> Choice {
<Fr as Field>::is_zero(self)
}
fn double(&self) -> Self {
<Fr as Field>::double(self)
}
}
impl PrimeGroup for Fr {}
impl Curve for Fr {
type AffineRepr = Fr;
fn to_affine(&self) -> Fr {
*self
}
}
impl WnafGroup for Fr {
fn recommended_wnaf_for_num_scalars(_: usize) -> usize {
3
}
}
impl PrimeCurve for Fr {
type Affine = Fr;
}
#[derive(Copy, Clone, Default)]
pub struct FakePoint;
impl AsMut<[u8]> for FakePoint {
fn as_mut(&mut self) -> &mut [u8] {
unimplemented!()
}
}
impl AsRef<[u8]> for FakePoint {
fn as_ref(&self) -> &[u8] {
unimplemented!()
}
}
impl PrimeCurveAffine for Fr {
type Curve = Fr;
type Scalar = Fr;
fn identity() -> Self {
<Fr as Field>::zero()
}
fn generator() -> Self {
<Fr as Field>::one()
}
fn is_identity(&self) -> Choice {
<Fr as Field>::is_zero(self)
}
fn to_curve(&self) -> Self::Curve {
*self
}
}
impl GroupEncoding for Fr {
type Repr = FakePoint;
fn from_bytes(_bytes: &Self::Repr) -> CtOption<Self> {
unimplemented!()
}
fn from_bytes_unchecked(_bytes: &Self::Repr) -> CtOption<Self> {
unimplemented!()
}
fn to_bytes(&self) -> Self::Repr {
unimplemented!()
}
}
impl UncompressedEncoding for Fr {
type Uncompressed = FakePoint;
fn from_uncompressed(_bytes: &Self::Uncompressed) -> CtOption<Self> {
unimplemented!()
}
fn from_uncompressed_unchecked(_bytes: &Self::Uncompressed) -> CtOption<Self> {
unimplemented!()
}
fn to_uncompressed(&self) -> Self::Uncompressed {
unimplemented!()
}
}
impl PairingCurveAffine for Fr {
type Pair = Fr;
type PairingResult = Fr;
fn pairing_with(&self, other: &Self::Pair) -> Self::PairingResult {
self.mul(*other)
}
}

View File

@@ -0,0 +1,448 @@
use ff::{Field, PrimeField};
mod dummy_engine;
use self::dummy_engine::*;
use std::marker::PhantomData;
use std::ops::{AddAssign, MulAssign, SubAssign};
use crate::{Circuit, ConstraintSystem, SynthesisError};
use super::{create_proof, generate_parameters, prepare_verifying_key, verify_proof};
struct XorDemo<Scalar: PrimeField> {
a: Option<bool>,
b: Option<bool>,
_marker: PhantomData<Scalar>,
}
impl<Scalar: PrimeField> Circuit<Scalar> for XorDemo<Scalar> {
fn synthesize<CS: ConstraintSystem<Scalar>>(self, cs: &mut CS) -> Result<(), SynthesisError> {
let a_var = cs.alloc(
|| "a",
|| {
if self.a.is_some() {
if self.a.unwrap() {
Ok(Scalar::one())
} else {
Ok(Scalar::zero())
}
} else {
Err(SynthesisError::AssignmentMissing)
}
},
)?;
cs.enforce(
|| "a_boolean_constraint",
|lc| lc + CS::one() - a_var,
|lc| lc + a_var,
|lc| lc,
);
let b_var = cs.alloc(
|| "b",
|| {
if self.b.is_some() {
if self.b.unwrap() {
Ok(Scalar::one())
} else {
Ok(Scalar::zero())
}
} else {
Err(SynthesisError::AssignmentMissing)
}
},
)?;
cs.enforce(
|| "b_boolean_constraint",
|lc| lc + CS::one() - b_var,
|lc| lc + b_var,
|lc| lc,
);
let c_var = cs.alloc_input(
|| "c",
|| {
if self.a.is_some() && self.b.is_some() {
if self.a.unwrap() ^ self.b.unwrap() {
Ok(Scalar::one())
} else {
Ok(Scalar::zero())
}
} else {
Err(SynthesisError::AssignmentMissing)
}
},
)?;
cs.enforce(
|| "c_xor_constraint",
|lc| lc + a_var + a_var,
|lc| lc + b_var,
|lc| lc + a_var + b_var - c_var,
);
Ok(())
}
}
#[test]
fn test_xordemo() {
let g1 = Fr::one();
let g2 = Fr::one();
let alpha = Fr::from(48577);
let beta = Fr::from(22580);
let gamma = Fr::from(53332);
let delta = Fr::from(5481);
let tau = Fr::from(3673);
let params = {
let c = XorDemo {
a: None,
b: None,
_marker: PhantomData,
};
generate_parameters::<DummyEngine, _>(c, g1, g2, alpha, beta, gamma, delta, tau).unwrap()
};
// This will synthesize the constraint system:
//
// public inputs: a_0 = 1, a_1 = c
// aux inputs: a_2 = a, a_3 = b
// constraints:
// (a_0 - a_2) * (a_2) = 0
// (a_0 - a_3) * (a_3) = 0
// (a_2 + a_2) * (a_3) = (a_2 + a_3 - a_1)
// (a_0) * 0 = 0
// (a_1) * 0 = 0
// The evaluation domain is 8. The H query should
// have 7 elements (it's a quotient polynomial)
assert_eq!(7, params.h.len());
let mut root_of_unity = Fr::root_of_unity();
// We expect this to be a 2^10 root of unity
assert_eq!(Fr::one(), root_of_unity.pow_vartime(&[1u64 << 10]));
// Let's turn it into a 2^3 root of unity.
root_of_unity = root_of_unity.pow_vartime(&[1u64 << 7]);
assert_eq!(Fr::one(), root_of_unity.pow_vartime(&[1u64 << 3]));
assert_eq!(Fr::from(20201), root_of_unity);
// Let's compute all the points in our evaluation domain.
let mut points = Vec::with_capacity(8);
for i in 0u64..8 {
points.push(root_of_unity.pow_vartime(&[i]));
}
// Let's compute t(tau) = (tau - p_0)(tau - p_1)...
// = tau^8 - 1
let mut t_at_tau = tau.pow_vartime(&[8u64]);
t_at_tau.sub_assign(&Fr::one());
{
let mut tmp = Fr::one();
for p in &points {
let mut term = tau;
term.sub_assign(p);
tmp.mul_assign(&term);
}
assert_eq!(tmp, t_at_tau);
}
// We expect our H query to be 7 elements of the form...
// {tau^i t(tau) / delta}
let delta_inverse = delta.invert().unwrap();
let gamma_inverse = gamma.invert().unwrap();
{
let mut coeff = delta_inverse;
coeff.mul_assign(&t_at_tau);
let mut cur = Fr::one();
for h in params.h.iter() {
let mut tmp = cur;
tmp.mul_assign(&coeff);
assert_eq!(*h, tmp);
cur.mul_assign(&tau);
}
}
// The density of the IC query is 2 (2 inputs)
assert_eq!(2, params.vk.ic.len());
// The density of the L query is 2 (2 aux variables)
assert_eq!(2, params.l.len());
// The density of the A query is 4 (each variable is in at least one A term)
assert_eq!(4, params.a.len());
// The density of the B query is 2 (two variables are in at least one B term)
assert_eq!(2, params.b_g1.len());
assert_eq!(2, params.b_g2.len());
/*
Lagrange interpolation polynomials in our evaluation domain:
,-------------------------------. ,-------------------------------. ,-------------------------------.
| A TERM | | B TERM | | C TERM |
`-------------------------------. `-------------------------------' `-------------------------------'
| a_0 | a_1 | a_2 | a_3 | | a_0 | a_1 | a_2 | a_3 | | a_0 | a_1 | a_2 | a_3 |
| 1 | 0 | 64512 | 0 | | 0 | 0 | 1 | 0 | | 0 | 0 | 0 | 0 |
| 1 | 0 | 0 | 64512 | | 0 | 0 | 0 | 1 | | 0 | 0 | 0 | 0 |
| 0 | 0 | 2 | 0 | | 0 | 0 | 0 | 1 | | 0 | 64512 | 1 | 1 |
| 1 | 0 | 0 | 0 | | 0 | 0 | 0 | 0 | | 0 | 0 | 0 | 0 |
| 0 | 1 | 0 | 0 | | 0 | 0 | 0 | 0 | | 0 | 0 | 0 | 0 |
`-------'-------'-------'-------' `-------'-------'-------'-------' `-------'-------'-------'-------'
Example for u_0:
sage: r = 64513
sage: Fr = GF(r)
sage: omega = (Fr(5)^63)^(2^7)
sage: tau = Fr(3673)
sage: R.<x> = PolynomialRing(Fr, 'x')
sage: def eval(tau, c0, c1, c2, c3, c4):
....: p = R.lagrange_polynomial([(omega^0, c0), (omega^1, c1), (omega^2, c2), (omega^3, c3), (omega^4, c4), (omega^5, 0), (omega^6, 0), (omega^7, 0)])
....: return p.substitute(tau)
sage: eval(tau, 1, 1, 0, 1, 0)
59158
*/
let u_i = [59158, 48317, 21767, 10402]
.iter()
.map(|e| Fr::from(*e))
.collect::<Vec<Fr>>();
let v_i = [0, 0, 60619, 30791]
.iter()
.map(|e| Fr::from(*e))
.collect::<Vec<Fr>>();
let w_i = [0, 23320, 41193, 41193]
.iter()
.map(|e| Fr::from(*e))
.collect::<Vec<Fr>>();
for (u, a) in u_i.iter().zip(&params.a[..]) {
assert_eq!(u, a);
}
for (v, b) in v_i
.iter()
.filter(|&&e| e != Fr::zero())
.zip(&params.b_g1[..])
{
assert_eq!(v, b);
}
for (v, b) in v_i
.iter()
.filter(|&&e| e != Fr::zero())
.zip(&params.b_g2[..])
{
assert_eq!(v, b);
}
for i in 0..4 {
let mut tmp1 = beta;
tmp1.mul_assign(&u_i[i]);
let mut tmp2 = alpha;
tmp2.mul_assign(&v_i[i]);
tmp1.add_assign(&tmp2);
tmp1.add_assign(&w_i[i]);
if i < 2 {
// Check the correctness of the IC query elements
tmp1.mul_assign(&gamma_inverse);
assert_eq!(tmp1, params.vk.ic[i]);
} else {
// Check the correctness of the L query elements
tmp1.mul_assign(&delta_inverse);
assert_eq!(tmp1, params.l[i - 2]);
}
}
// Check consistency of the other elements
assert_eq!(alpha, params.vk.alpha_g1);
assert_eq!(beta, params.vk.beta_g1);
assert_eq!(beta, params.vk.beta_g2);
assert_eq!(gamma, params.vk.gamma_g2);
assert_eq!(delta, params.vk.delta_g1);
assert_eq!(delta, params.vk.delta_g2);
let pvk = prepare_verifying_key(&params.vk);
let r = Fr::from(27134);
let s = Fr::from(17146);
let proof = {
let c = XorDemo {
a: Some(true),
b: Some(false),
_marker: PhantomData,
};
create_proof(c, &params, r, s).unwrap()
};
// A(x) =
// a_0 * (44865*x^7 + 56449*x^6 + 44865*x^5 + 8064*x^4 + 3520*x^3 + 56449*x^2 + 3520*x + 40321) +
// a_1 * (8064*x^7 + 56449*x^6 + 8064*x^5 + 56449*x^4 + 8064*x^3 + 56449*x^2 + 8064*x + 56449) +
// a_2 * (16983*x^7 + 24192*x^6 + 63658*x^5 + 56449*x^4 + 16983*x^3 + 24192*x^2 + 63658*x + 56449) +
// a_3 * (5539*x^7 + 27797*x^6 + 6045*x^5 + 56449*x^4 + 58974*x^3 + 36716*x^2 + 58468*x + 8064) +
{
// proof A = alpha + A(tau) + delta * r
let mut expected_a = delta;
expected_a.mul_assign(&r);
expected_a.add_assign(&alpha);
expected_a.add_assign(&u_i[0]); // a_0 = 1
expected_a.add_assign(&u_i[1]); // a_1 = 1
expected_a.add_assign(&u_i[2]); // a_2 = 1
// a_3 = 0
assert_eq!(proof.a, expected_a);
}
// B(x) =
// a_0 * (0) +
// a_1 * (0) +
// a_2 * (56449*x^7 + 56449*x^6 + 56449*x^5 + 56449*x^4 + 56449*x^3 + 56449*x^2 + 56449*x + 56449) +
// a_3 * (31177*x^7 + 44780*x^6 + 21752*x^5 + 42255*x^3 + 35861*x^2 + 33842*x + 48385)
{
// proof B = beta + B(tau) + delta * s
let mut expected_b = delta;
expected_b.mul_assign(&s);
expected_b.add_assign(&beta);
expected_b.add_assign(&v_i[0]); // a_0 = 1
expected_b.add_assign(&v_i[1]); // a_1 = 1
expected_b.add_assign(&v_i[2]); // a_2 = 1
// a_3 = 0
assert_eq!(proof.b, expected_b);
}
// C(x) =
// a_0 * (0) +
// a_1 * (27797*x^7 + 56449*x^6 + 36716*x^5 + 8064*x^4 + 27797*x^3 + 56449*x^2 + 36716*x + 8064) +
// a_2 * (36716*x^7 + 8064*x^6 + 27797*x^5 + 56449*x^4 + 36716*x^3 + 8064*x^2 + 27797*x + 56449) +
// a_3 * (36716*x^7 + 8064*x^6 + 27797*x^5 + 56449*x^4 + 36716*x^3 + 8064*x^2 + 27797*x + 56449)
//
// If A * B = C at each point in the domain, then the following polynomial...
// P(x) = A(x) * B(x) - C(x)
// = 49752*x^14 + 13914*x^13 + 29243*x^12 + 27227*x^11 + 62362*x^10 + 35703*x^9 + 4032*x^8 + 14761*x^6 + 50599*x^5 + 35270*x^4 + 37286*x^3 + 2151*x^2 + 28810*x + 60481
//
// ... should be divisible by t(x), producing the quotient polynomial:
// h(x) = P(x) / t(x)
// = 49752*x^6 + 13914*x^5 + 29243*x^4 + 27227*x^3 + 62362*x^2 + 35703*x + 4032
{
let mut expected_c = Fr::zero();
// A * s
let mut tmp = proof.a;
tmp.mul_assign(&s);
expected_c.add_assign(&tmp);
// B * r
let mut tmp = proof.b;
tmp.mul_assign(&r);
expected_c.add_assign(&tmp);
// delta * r * s
let mut tmp = delta;
tmp.mul_assign(&r);
tmp.mul_assign(&s);
expected_c.sub_assign(&tmp);
// L query answer
// a_2 = 1, a_3 = 0
expected_c.add_assign(&params.l[0]);
// H query answer
for (i, coeff) in [5040, 11763, 10755, 63633, 128, 9747, 8739]
.iter()
.enumerate()
{
let coeff = Fr::from(*coeff);
let mut tmp = params.h[i];
tmp.mul_assign(&coeff);
expected_c.add_assign(&tmp);
}
assert_eq!(expected_c, proof.c);
}
assert!(verify_proof(&pvk, &proof, &[Fr::one()]).is_ok());
}
struct MultWithZeroCoeffs<F> {
a: Option<F>,
b: Option<F>,
c: Option<F>,
/// Whether to attach the zero coefficient to the "1" variable, or a different variable.
one_var: bool,
}
impl<F: ff::PrimeField> Circuit<F> for &MultWithZeroCoeffs<F> {
fn synthesize<CS: ConstraintSystem<F>>(self, cs: &mut CS) -> Result<(), SynthesisError> {
let a = cs.alloc(|| "a", || Ok(self.a.unwrap()))?;
let b = cs.alloc(|| "b", || Ok(self.b.unwrap()))?;
let c = cs.alloc(|| "c", || Ok(self.c.unwrap()))?;
if self.one_var {
cs.enforce(
|| "cs",
// notice the zero coefficient on the B term
|z| z + a,
|z| z + (F::from(0), CS::one()) + b,
|z| z + c,
);
} else {
cs.enforce(
|| "cs",
// notice the zero coefficient on the B term
|z| z + a,
|z| z + (F::from(0), a) + b,
|z| z + c,
);
}
Ok(())
}
}
fn zero_coeff_test(one_var: bool) {
let m = MultWithZeroCoeffs {
a: Some(Fr::from(5)),
b: Some(Fr::from(6)),
c: Some(Fr::from(30)),
one_var,
};
let g1 = Fr::one();
let g2 = Fr::one();
let alpha = Fr::from(48577);
let beta = Fr::from(22580);
let gamma = Fr::from(53332);
let delta = Fr::from(5481);
let tau = Fr::from(3673);
let pk =
generate_parameters::<DummyEngine, _>(&m, g1, g2, alpha, beta, gamma, delta, tau).unwrap();
let r = Fr::from(27134);
let s = Fr::from(17146);
let pf = create_proof(&m, &pk, r, s).unwrap();
let pvk = prepare_verifying_key(&pk.vk);
verify_proof(&pvk, &pf, &[]).unwrap();
}
#[test]
fn zero_coeff_one_var() {
zero_coeff_test(true);
}
#[test]
fn zero_coeff_non_one_var() {
zero_coeff_test(false);
}

View File

@@ -0,0 +1,58 @@
use group::{prime::PrimeCurveAffine, Curve};
use pairing::{MillerLoopResult, MultiMillerLoop};
use std::ops::{AddAssign, Neg};
use super::{PreparedVerifyingKey, Proof, VerifyingKey};
use crate::VerificationError;
pub mod batch;
pub fn prepare_verifying_key<E: MultiMillerLoop>(vk: &VerifyingKey<E>) -> PreparedVerifyingKey<E> {
let gamma = vk.gamma_g2.neg();
let delta = vk.delta_g2.neg();
PreparedVerifyingKey {
alpha_g1_beta_g2: E::pairing(&vk.alpha_g1, &vk.beta_g2),
neg_gamma_g2: gamma.into(),
neg_delta_g2: delta.into(),
ic: vk.ic.clone(),
}
}
pub fn verify_proof<'a, E: MultiMillerLoop>(
pvk: &'a PreparedVerifyingKey<E>,
proof: &Proof<E>,
public_inputs: &[E::Fr],
) -> Result<(), VerificationError> {
if (public_inputs.len() + 1) != pvk.ic.len() {
return Err(VerificationError::InvalidVerifyingKey);
}
let mut acc = pvk.ic[0].to_curve();
for (i, b) in public_inputs.iter().zip(pvk.ic.iter().skip(1)) {
AddAssign::<&E::G1>::add_assign(&mut acc, &(*b * i));
}
// The original verification equation is:
// A * B = alpha * beta + inputs * gamma + C * delta
// ... however, we rearrange it so that it is:
// A * B - inputs * gamma - C * delta = alpha * beta
// or equivalently:
// A * B + inputs * (-gamma) + C * (-delta) = alpha * beta
// which allows us to do a single final exponentiation.
if pvk.alpha_g1_beta_g2
== E::multi_miller_loop(&[
(&proof.a, &proof.b.into()),
(&acc.to_affine(), &pvk.neg_gamma_g2),
(&proof.c, &pvk.neg_delta_g2),
])
.final_exponentiation()
{
Ok(())
} else {
Err(VerificationError::InvalidProof)
}
}

View File

@@ -0,0 +1,289 @@
//! Performs batch Groth16 proof verification.
//!
//! Batch verification asks whether *all* proofs in some set are valid,
//! rather than asking whether *each* of them is valid. This allows sharing
//! computations among all proof verifications, performing less work overall
//! at the cost of higher latency (the entire batch must complete), complexity of
//! caller code (which must assemble a batch of proofs across work-items),
//! and loss of the ability to easily pinpoint failing proofs.
//!
//! This batch verification implementation is non-adaptive, in the sense that it
//! assumes that all the proofs in the batch are verifiable by the same
//! `VerifyingKey`. The reason is that if you have different proof statements,
//! you need to specify which statement you are proving, which means that you
//! need to refer to or lookup a particular `VerifyingKey`. In practice, with
//! large enough batches, it's manageable and not much worse performance-wise to
//! keep batches of each statement type, vs one large adaptive batch.
use std::ops::AddAssign;
use ff::Field;
use group::{Curve, Group};
use pairing::{MillerLoopResult, MultiMillerLoop};
use rand_core::{CryptoRng, RngCore};
#[cfg(feature = "multicore")]
use rand_core::OsRng;
#[cfg(feature = "multicore")]
use rayon::{iter::ParallelIterator, prelude::ParallelSlice};
use crate::{
groth16::{PreparedVerifyingKey, Proof, VerifyingKey},
VerificationError,
};
/// A batch verification item.
///
/// This struct exists to allow batch processing to be decoupled from the
/// lifetime of the message. This is useful when using the batch verification
/// API in an async context.
#[derive(Clone, Debug)]
pub struct Item<E: MultiMillerLoop> {
proof: Proof<E>,
inputs: Vec<E::Fr>,
}
impl<E: MultiMillerLoop> From<(&Proof<E>, &[E::Fr])> for Item<E> {
fn from((proof, inputs): (&Proof<E>, &[E::Fr])) -> Self {
(proof.clone(), inputs.to_owned()).into()
}
}
impl<E: MultiMillerLoop> From<(Proof<E>, Vec<E::Fr>)> for Item<E> {
fn from((proof, inputs): (Proof<E>, Vec<E::Fr>)) -> Self {
Self { proof, inputs }
}
}
impl<E: MultiMillerLoop> Item<E> {
/// Perform non-batched verification of this `Item`.
///
/// This is useful (in combination with `Item::clone`) for implementing
/// fallback logic when batch verification fails.
pub fn verify_single(self, pvk: &PreparedVerifyingKey<E>) -> Result<(), VerificationError> {
super::verify_proof(pvk, &self.proof, &self.inputs)
}
}
/// A batch verification context.
///
/// In practice, you would create a batch verifier for each proof statement
/// requiring the same `VerifyingKey`.
#[derive(Debug)]
pub struct Verifier<E: MultiMillerLoop> {
items: Vec<Item<E>>,
}
// Need to impl Default by hand to avoid a derived E: Default bound
impl<E: MultiMillerLoop> Default for Verifier<E> {
fn default() -> Self {
Self { items: Vec::new() }
}
}
impl<E: MultiMillerLoop> Verifier<E>
where
E::G1: AddAssign<E::G1>,
{
/// Construct a new batch verifier.
pub fn new() -> Self {
Self::default()
}
/// Queue a (proof, inputs) tuple for verification.
pub fn queue<I: Into<Item<E>>>(&mut self, item: I) {
self.items.push(item.into())
}
/// Perform batch verification with a particular `VerifyingKey`, returning
/// `Ok(())` if all proofs were verified and `VerificationError` otherwise.
#[allow(non_snake_case)]
pub fn verify<R: RngCore + CryptoRng>(
self,
mut rng: R,
vk: &VerifyingKey<E>,
) -> Result<(), VerificationError> {
if self
.items
.iter()
.any(|Item { inputs, .. }| inputs.len() + 1 != vk.ic.len())
{
return Err(VerificationError::InvalidVerifyingKey);
}
let mut ml_terms = Vec::<(E::G1Affine, E::G2Prepared)>::new();
let mut acc_Gammas = vec![E::Fr::zero(); vk.ic.len()];
let mut acc_Delta = E::G1::identity();
let mut acc_Y = E::Fr::zero();
for Item { proof, inputs } in self.items.into_iter() {
// The spec is explicit that z != 0. Field::random is defined to
// return a uniformly-random field element (which may be 0), so we
// loop until it's not, avoiding needing an assert or throwing an
// error through no fault of the batch items. This will likely never
// actually loop, but handles the edge case.
let z = loop {
let z = E::Fr::random(&mut rng);
if !z.is_zero_vartime() {
break z;
}
};
ml_terms.push(((proof.a * z).into(), (-proof.b).into()));
acc_Gammas[0] += &z; // a_0 is implicitly set to 1
for (a_i, acc_Gamma_i) in Iterator::zip(inputs.iter(), acc_Gammas.iter_mut().skip(1)) {
*acc_Gamma_i += &(z * a_i);
}
acc_Delta += proof.c * z;
acc_Y += &z;
}
ml_terms.push((acc_Delta.to_affine(), E::G2Prepared::from(vk.delta_g2)));
let Psi = vk
.ic
.iter()
.zip(acc_Gammas.iter())
.map(|(&Psi_i, acc_Gamma_i)| Psi_i * acc_Gamma_i)
.sum();
ml_terms.push((E::G1Affine::from(Psi), E::G2Prepared::from(vk.gamma_g2)));
// Covers the [acc_Y]⋅e(alpha_g1, beta_g2) component
//
// The multiplication by acc_Y is expensive -- it involves
// exponentiating by acc_Y because the result of the pairing is an
// element of a multiplicative subgroup of a large extension field.
// Instead, we add
// ([acc_Y]⋅alpha_g1, beta_g2)
// to our Miller loop terms because
// [acc_Y]⋅e(alpha_g1, beta_g2) = e([acc_Y]⋅alpha_g1, beta_g2)
ml_terms.push((
E::G1Affine::from(vk.alpha_g1 * acc_Y),
E::G2Prepared::from(vk.beta_g2),
));
let ml_terms = ml_terms.iter().map(|(a, b)| (a, b)).collect::<Vec<_>>();
if E::multi_miller_loop(&ml_terms[..]).final_exponentiation() == E::Gt::identity() {
Ok(())
} else {
Err(VerificationError::InvalidProof)
}
}
/// Perform batch verification with a particular `VerifyingKey`, returning
/// `Ok(())` if all proofs were verified and `VerificationError` otherwise.
///
/// This performs the bulk of internal arithmetic over the global rayon
/// threadpool.
#[cfg(feature = "multicore")]
#[allow(non_snake_case)]
pub fn verify_multicore(self, vk: &VerifyingKey<E>) -> Result<(), VerificationError> {
if self
.items
.iter()
.any(|Item { inputs, .. }| inputs.len() + 1 != vk.ic.len())
{
return Err(VerificationError::InvalidVerifyingKey);
}
struct Accumulator<E: MultiMillerLoop> {
gammas: Vec<E::Fr>,
delta: E::G1,
y: E::Fr,
ml_result: Option<E::Result>,
}
impl<E: MultiMillerLoop> Accumulator<E> {
fn new(ic_len: usize) -> Self {
Accumulator {
gammas: vec![E::Fr::zero(); ic_len],
delta: E::G1::identity(),
y: E::Fr::zero(),
ml_result: None,
}
}
}
let ic_len = vk.ic.len();
let acc = self
.items
// This chunk size was obtained heuristically.
.par_chunks(8)
.map(|items| {
let mut acc = Accumulator::<E>::new(ic_len);
let mut ml_terms: Vec<(E::G1Affine, E::G2Prepared)> = vec![];
let z = loop {
let z = E::Fr::random(&mut OsRng);
if !z.is_zero_vartime() {
break z;
}
};
let mut cur_z = z;
for Item { proof, inputs } in items {
acc.gammas[0] += &cur_z;
for (a_i, acc_gamma_i) in
Iterator::zip(inputs.iter(), acc.gammas.iter_mut().skip(1))
{
*acc_gamma_i += &(cur_z * a_i);
}
acc.delta += proof.c * cur_z;
acc.y += &cur_z;
ml_terms.push(((proof.a * cur_z).into(), (-proof.b).into()));
cur_z *= z;
}
let ml_terms = ml_terms.iter().map(|(a, b)| (a, b)).collect::<Vec<_>>();
acc.ml_result = Some(E::multi_miller_loop(&ml_terms[..]));
acc
})
.reduce(
|| Accumulator::<E>::new(ic_len),
|mut a, b| {
for (a, b) in a.gammas.iter_mut().zip(b.gammas.into_iter()) {
*a += b;
}
a.delta += b.delta;
a.y += b.y;
a.ml_result = match (a.ml_result, b.ml_result) {
(Some(a), Some(b)) => Some(a + b),
(Some(a), None) | (None, Some(a)) => Some(a),
(None, None) => None,
};
a
},
);
match acc.ml_result {
None => Ok(()),
Some(mut ml_result) => {
// TODO: could use a multiexp (Bos-Coster maybe?)
let psi = vk
.ic
.iter()
.zip(acc.gammas.into_iter())
.map(|(&psi_i, acc_gamma_i)| psi_i * acc_gamma_i)
.sum();
ml_result += E::multi_miller_loop(&[
(&acc.delta.to_affine(), &E::G2Prepared::from(vk.delta_g2)),
(&E::G1Affine::from(psi), &E::G2Prepared::from(vk.gamma_g2)),
(
&E::G1Affine::from(vk.alpha_g1 * acc.y),
&E::G2Prepared::from(vk.beta_g2),
),
]);
if ml_result.final_exponentiation() == E::Gt::identity() {
Ok(())
} else {
Err(VerificationError::InvalidProof)
}
}
}
}
}

605
third_party/bellman/src/lib.rs vendored Normal file
View File

@@ -0,0 +1,605 @@
//! `bellman` is a crate for building zk-SNARK circuits. It provides circuit
//! traits and and primitive structures, as well as basic gadget implementations
//! such as booleans and number abstractions.
//!
//! # Example circuit
//!
//! Say we want to write a circuit that proves we know the preimage to some hash
//! computed using SHA-256d (calling SHA-256 twice). The preimage must have a
//! fixed length known in advance (because the circuit parameters will depend on
//! it), but can otherwise have any value. We take the following strategy:
//!
//! - Witness each bit of the preimage.
//! - Compute `hash = SHA-256d(preimage)` inside the circuit.
//! - Expose `hash` as a public input using multiscalar packing.
//!
//! ```
//! use bellman::{
//! gadgets::{
//! boolean::{AllocatedBit, Boolean},
//! multipack,
//! sha256::sha256,
//! },
//! groth16, Circuit, ConstraintSystem, SynthesisError,
//! };
//! use bls12_381::Bls12;
//! use ff::PrimeField;
//! use pairing::Engine;
//! use rand::rngs::OsRng;
//! use sha2::{Digest, Sha256};
//!
//! /// Our own SHA-256d gadget. Input and output are in little-endian bit order.
//! fn sha256d<Scalar: PrimeField, CS: ConstraintSystem<Scalar>>(
//! mut cs: CS,
//! data: &[Boolean],
//! ) -> Result<Vec<Boolean>, SynthesisError> {
//! // Flip endianness of each input byte
//! let input: Vec<_> = data
//! .chunks(8)
//! .map(|c| c.iter().rev())
//! .flatten()
//! .cloned()
//! .collect();
//!
//! let mid = sha256(cs.namespace(|| "SHA-256(input)"), &input)?;
//! let res = sha256(cs.namespace(|| "SHA-256(mid)"), &mid)?;
//!
//! // Flip endianness of each output byte
//! Ok(res
//! .chunks(8)
//! .map(|c| c.iter().rev())
//! .flatten()
//! .cloned()
//! .collect())
//! }
//!
//! struct MyCircuit {
//! /// The input to SHA-256d we are proving that we know. Set to `None` when we
//! /// are verifying a proof (and do not have the witness data).
//! preimage: Option<[u8; 80]>,
//! }
//!
//! impl<Scalar: PrimeField> Circuit<Scalar> for MyCircuit {
//! fn synthesize<CS: ConstraintSystem<Scalar>>(self, cs: &mut CS) -> Result<(), SynthesisError> {
//! // Compute the values for the bits of the preimage. If we are verifying a proof,
//! // we still need to create the same constraints, so we return an equivalent-size
//! // Vec of None (indicating that the value of each bit is unknown).
//! let bit_values = if let Some(preimage) = self.preimage {
//! preimage
//! .into_iter()
//! .map(|byte| (0..8).map(move |i| (byte >> i) & 1u8 == 1u8))
//! .flatten()
//! .map(|b| Some(b))
//! .collect()
//! } else {
//! vec![None; 80 * 8]
//! };
//! assert_eq!(bit_values.len(), 80 * 8);
//!
//! // Witness the bits of the preimage.
//! let preimage_bits = bit_values
//! .into_iter()
//! .enumerate()
//! // Allocate each bit.
//! .map(|(i, b)| {
//! AllocatedBit::alloc(cs.namespace(|| format!("preimage bit {}", i)), b)
//! })
//! // Convert the AllocatedBits into Booleans (required for the sha256 gadget).
//! .map(|b| b.map(Boolean::from))
//! .collect::<Result<Vec<_>, _>>()?;
//!
//! // Compute hash = SHA-256d(preimage).
//! let hash = sha256d(cs.namespace(|| "SHA-256d(preimage)"), &preimage_bits)?;
//!
//! // Expose the vector of 32 boolean variables as compact public inputs.
//! multipack::pack_into_inputs(cs.namespace(|| "pack hash"), &hash)
//! }
//! }
//!
//! // Create parameters for our circuit. In a production deployment these would
//! // be generated securely using a multiparty computation.
//! let params = {
//! let c = MyCircuit { preimage: None };
//! groth16::generate_random_parameters::<Bls12, _, _>(c, &mut OsRng).unwrap()
//! };
//!
//! // Prepare the verification key (for proof verification).
//! let pvk = groth16::prepare_verifying_key(&params.vk);
//!
//! // Pick a preimage and compute its hash.
//! let preimage = [42; 80];
//! let hash = Sha256::digest(&Sha256::digest(&preimage));
//!
//! // Create an instance of our circuit (with the preimage as a witness).
//! let c = MyCircuit {
//! preimage: Some(preimage),
//! };
//!
//! // Create a Groth16 proof with our parameters.
//! let proof = groth16::create_random_proof(c, &params, &mut OsRng).unwrap();
//!
//! // Pack the hash as inputs for proof verification.
//! let hash_bits = multipack::bytes_to_bits_le(&hash);
//! let inputs = multipack::compute_multipacking(&hash_bits);
//!
//! // Check the proof!
//! assert!(groth16::verify_proof(&pvk, &proof, &inputs).is_ok());
//! ```
//!
//! # Roadmap
//!
//! `bellman` is being refactored into a generic proving library. Currently it
//! is pairing-specific, and different types of proving systems need to be
//! implemented as sub-modules. After the refactor, `bellman` will be generic
//! using the [`ff`] and [`group`] crates, while specific proving systems will
//! be separate crates that pull in the dependencies they require.
// Catch documentation errors caused by code changes.
#![deny(rustdoc::broken_intra_doc_links)]
pub mod domain;
pub mod gadgets;
#[cfg(feature = "groth16")]
pub mod groth16;
pub mod mirage;
pub mod multicore;
pub mod multiexp;
pub mod random;
use ff::PrimeField;
use std::error::Error;
use std::fmt;
use std::io;
use std::marker::PhantomData;
use std::ops::{Add, Sub};
/// Computations are expressed in terms of arithmetic circuits, in particular
/// rank-1 quadratic constraint systems. The `Circuit` trait represents a
/// circuit that can be synthesized. The `synthesize` method is called during
/// CRS generation and during proving.
pub trait Circuit<Scalar: PrimeField> {
/// Synthesize the circuit into a rank-1 quadratic constraint system
fn synthesize<CS: ConstraintSystem<Scalar>>(self, cs: &mut CS) -> Result<(), SynthesisError>;
}
/// Represents a variable in our constraint system.
#[derive(Copy, Clone, Debug)]
pub struct Variable(Index);
impl Variable {
/// This constructs a variable with an arbitrary index.
/// Circuit implementations are not recommended to use this.
pub fn new_unchecked(idx: Index) -> Variable {
Variable(idx)
}
/// This returns the index underlying the variable.
/// Circuit implementations are not recommended to use this.
pub fn get_unchecked(&self) -> Index {
self.0
}
}
/// Represents the index of either an input variable or
/// auxiliary variable.
#[derive(Copy, Clone, PartialEq, Debug)]
pub enum Index {
Input(usize),
Aux(usize),
}
/// This represents a linear combination of some variables, with coefficients
/// in the scalar field of a pairing-friendly elliptic curve group.
#[derive(Clone)]
pub struct LinearCombination<Scalar: PrimeField>(Vec<(Variable, Scalar)>);
impl<Scalar: PrimeField> AsRef<[(Variable, Scalar)]> for LinearCombination<Scalar> {
fn as_ref(&self) -> &[(Variable, Scalar)] {
&self.0
}
}
impl<Scalar: PrimeField> LinearCombination<Scalar> {
pub fn zero() -> LinearCombination<Scalar> {
LinearCombination(vec![])
}
}
impl<Scalar: PrimeField> Add<(Scalar, Variable)> for LinearCombination<Scalar> {
type Output = LinearCombination<Scalar>;
fn add(mut self, (coeff, var): (Scalar, Variable)) -> LinearCombination<Scalar> {
self.0.push((var, coeff));
self
}
}
impl<Scalar: PrimeField> Sub<(Scalar, Variable)> for LinearCombination<Scalar> {
type Output = LinearCombination<Scalar>;
#[allow(clippy::suspicious_arithmetic_impl)]
fn sub(self, (coeff, var): (Scalar, Variable)) -> LinearCombination<Scalar> {
self + (coeff.neg(), var)
}
}
impl<Scalar: PrimeField> Add<Variable> for LinearCombination<Scalar> {
type Output = LinearCombination<Scalar>;
fn add(self, other: Variable) -> LinearCombination<Scalar> {
self + (Scalar::one(), other)
}
}
impl<Scalar: PrimeField> Sub<Variable> for LinearCombination<Scalar> {
type Output = LinearCombination<Scalar>;
fn sub(self, other: Variable) -> LinearCombination<Scalar> {
self - (Scalar::one(), other)
}
}
impl<'a, Scalar: PrimeField> Add<&'a LinearCombination<Scalar>> for LinearCombination<Scalar> {
type Output = LinearCombination<Scalar>;
fn add(mut self, other: &'a LinearCombination<Scalar>) -> LinearCombination<Scalar> {
for s in &other.0 {
self = self + (s.1, s.0);
}
self
}
}
impl<'a, Scalar: PrimeField> Sub<&'a LinearCombination<Scalar>> for LinearCombination<Scalar> {
type Output = LinearCombination<Scalar>;
fn sub(mut self, other: &'a LinearCombination<Scalar>) -> LinearCombination<Scalar> {
for s in &other.0 {
self = self - (s.1, s.0);
}
self
}
}
impl<'a, Scalar: PrimeField> Add<(Scalar, &'a LinearCombination<Scalar>)>
for LinearCombination<Scalar>
{
type Output = LinearCombination<Scalar>;
fn add(
mut self,
(coeff, other): (Scalar, &'a LinearCombination<Scalar>),
) -> LinearCombination<Scalar> {
for s in &other.0 {
let mut tmp = s.1;
tmp.mul_assign(&coeff);
self = self + (tmp, s.0);
}
self
}
}
impl<'a, Scalar: PrimeField> Sub<(Scalar, &'a LinearCombination<Scalar>)>
for LinearCombination<Scalar>
{
type Output = LinearCombination<Scalar>;
fn sub(
mut self,
(coeff, other): (Scalar, &'a LinearCombination<Scalar>),
) -> LinearCombination<Scalar> {
for s in &other.0 {
let mut tmp = s.1;
tmp.mul_assign(&coeff);
self = self - (tmp, s.0);
}
self
}
}
/// This is an error that could occur during circuit synthesis contexts,
/// such as CRS generation or proving.
#[derive(Debug)]
pub enum SynthesisError {
/// During synthesis, we lacked knowledge of a variable assignment.
AssignmentMissing,
/// During synthesis, we divided by zero.
DivisionByZero,
/// During synthesis, we constructed an unsatisfiable constraint system.
Unsatisfiable,
/// During synthesis, our polynomials ended up being too high of degree
PolynomialDegreeTooLarge,
/// During proof generation, we encountered an identity in the CRS
UnexpectedIdentity,
/// During proof generation, we encountered an I/O error with the CRS
IoError(io::Error),
/// During CRS generation, we observed an unconstrained auxiliary variable
UnconstrainedVariable,
/// During synthesis, we ask for a random coin during the wrong phase...
LateRandomCoin,
/// During synthesis, tries to get coin value that isn't a coin
NotACoin,
}
impl From<io::Error> for SynthesisError {
fn from(e: io::Error) -> SynthesisError {
SynthesisError::IoError(e)
}
}
impl Error for SynthesisError {}
impl fmt::Display for SynthesisError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
let msg = match *self {
SynthesisError::AssignmentMissing => {
"an assignment for a variable could not be computed"
}
SynthesisError::DivisionByZero => "division by zero",
SynthesisError::Unsatisfiable => "unsatisfiable constraint system",
SynthesisError::PolynomialDegreeTooLarge => "polynomial degree is too large",
SynthesisError::UnexpectedIdentity => "encountered an identity element in the CRS",
SynthesisError::IoError(_) => "encountered an I/O error",
SynthesisError::UnconstrainedVariable => "auxiliary variable was unconstrained",
SynthesisError::LateRandomCoin => "late random coin",
SynthesisError::NotACoin => "not a coin",
};
if let SynthesisError::IoError(ref e) = *self {
write!(f, "I/O error: ")?;
e.fmt(f)
} else {
write!(f, "{}", msg)
}
}
}
/// An error during verification.
#[derive(Debug, Clone)]
pub enum VerificationError {
/// Verification was attempted with a malformed verifying key.
InvalidVerifyingKey,
/// Proof verification failed.
InvalidProof,
}
impl Error for VerificationError {}
impl fmt::Display for VerificationError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
let msg = match *self {
VerificationError::InvalidVerifyingKey => "malformed verifying key",
VerificationError::InvalidProof => "proof verification failed",
};
write!(f, "{}", msg)
}
}
/// Represents a constraint system which can have new variables
/// allocated and constrains between them formed.
pub trait ConstraintSystem<Scalar: PrimeField>: Sized {
/// Represents the type of the "root" of this constraint system
/// so that nested namespaces can minimize indirection.
type Root: ConstraintSystem<Scalar>;
/// Return the "one" input variable
fn one() -> Variable {
Variable::new_unchecked(Index::Input(0))
}
/// Allocate a private variable in the constraint system. The provided function is used to
/// determine the assignment of the variable. The given `annotation` function is invoked
/// in testing contexts in order to derive a unique name for this variable in the current
/// namespace.
fn alloc<F, A, AR>(&mut self, annotation: A, f: F) -> Result<Variable, SynthesisError>
where
F: FnOnce() -> Result<Scalar, SynthesisError>,
A: FnOnce() -> AR,
AR: Into<String>;
/// Allocate a public variable in the constraint system. The provided function is used to
/// determine the assignment of the variable.
fn alloc_input<F, A, AR>(&mut self, annotation: A, f: F) -> Result<Variable, SynthesisError>
where
F: FnOnce() -> Result<Scalar, SynthesisError>,
A: FnOnce() -> AR,
AR: Into<String>;
/// Enforce that `A` * `B` = `C`. The `annotation` function is invoked in testing contexts
/// in order to derive a unique name for the constraint in the current namespace.
fn enforce<A, AR, LA, LB, LC>(&mut self, annotation: A, a: LA, b: LB, c: LC)
where
A: FnOnce() -> AR,
AR: Into<String>,
LA: FnOnce(LinearCombination<Scalar>) -> LinearCombination<Scalar>,
LB: FnOnce(LinearCombination<Scalar>) -> LinearCombination<Scalar>,
LC: FnOnce(LinearCombination<Scalar>) -> LinearCombination<Scalar>;
/// Create a new (sub)namespace and enter into it. Not intended
/// for downstream use; use `namespace` instead.
fn push_namespace<NR, N>(&mut self, name_fn: N)
where
NR: Into<String>,
N: FnOnce() -> NR;
/// Exit out of the existing namespace. Not intended for
/// downstream use; use `namespace` instead.
fn pop_namespace(&mut self);
/// Gets the "root" constraint system, bypassing the namespacing.
/// Not intended for downstream use; use `namespace` instead.
fn get_root(&mut self) -> &mut Self::Root;
/// Begin a namespace for this constraint system.
fn namespace<NR, N>(&mut self, name_fn: N) -> Namespace<'_, Scalar, Self::Root>
where
NR: Into<String>,
N: FnOnce() -> NR,
{
self.get_root().push_namespace(name_fn);
Namespace(self.get_root(), PhantomData)
}
}
/// This is a "namespaced" constraint system which borrows a constraint system (pushing
/// a namespace context) and, when dropped, pops out of the namespace context.
pub struct Namespace<'a, Scalar: PrimeField, CS: ConstraintSystem<Scalar>>(
&'a mut CS,
PhantomData<Scalar>,
);
impl<'cs, Scalar: PrimeField, CS: ConstraintSystem<Scalar>> ConstraintSystem<Scalar>
for Namespace<'cs, Scalar, CS>
{
type Root = CS::Root;
fn one() -> Variable {
CS::one()
}
fn alloc<F, A, AR>(&mut self, annotation: A, f: F) -> Result<Variable, SynthesisError>
where
F: FnOnce() -> Result<Scalar, SynthesisError>,
A: FnOnce() -> AR,
AR: Into<String>,
{
self.0.alloc(annotation, f)
}
fn alloc_input<F, A, AR>(&mut self, annotation: A, f: F) -> Result<Variable, SynthesisError>
where
F: FnOnce() -> Result<Scalar, SynthesisError>,
A: FnOnce() -> AR,
AR: Into<String>,
{
self.0.alloc_input(annotation, f)
}
fn enforce<A, AR, LA, LB, LC>(&mut self, annotation: A, a: LA, b: LB, c: LC)
where
A: FnOnce() -> AR,
AR: Into<String>,
LA: FnOnce(LinearCombination<Scalar>) -> LinearCombination<Scalar>,
LB: FnOnce(LinearCombination<Scalar>) -> LinearCombination<Scalar>,
LC: FnOnce(LinearCombination<Scalar>) -> LinearCombination<Scalar>,
{
self.0.enforce(annotation, a, b, c)
}
// Downstream users who use `namespace` will never interact with these
// functions and they will never be invoked because the namespace is
// never a root constraint system.
fn push_namespace<NR, N>(&mut self, _: N)
where
NR: Into<String>,
N: FnOnce() -> NR,
{
panic!("only the root's push_namespace should be called");
}
fn pop_namespace(&mut self) {
panic!("only the root's pop_namespace should be called");
}
fn get_root(&mut self) -> &mut Self::Root {
self.0.get_root()
}
}
impl<'a, Scalar: PrimeField, CS: ConstraintSystem<Scalar>> Drop for Namespace<'a, Scalar, CS> {
fn drop(&mut self) {
self.get_root().pop_namespace()
}
}
/// Convenience implementation of ConstraintSystem<Scalar> for mutable references to
/// constraint systems.
impl<'cs, Scalar: PrimeField, CS: ConstraintSystem<Scalar>> ConstraintSystem<Scalar>
for &'cs mut CS
{
type Root = CS::Root;
fn one() -> Variable {
CS::one()
}
fn alloc<F, A, AR>(&mut self, annotation: A, f: F) -> Result<Variable, SynthesisError>
where
F: FnOnce() -> Result<Scalar, SynthesisError>,
A: FnOnce() -> AR,
AR: Into<String>,
{
(**self).alloc(annotation, f)
}
fn alloc_input<F, A, AR>(&mut self, annotation: A, f: F) -> Result<Variable, SynthesisError>
where
F: FnOnce() -> Result<Scalar, SynthesisError>,
A: FnOnce() -> AR,
AR: Into<String>,
{
(**self).alloc_input(annotation, f)
}
fn enforce<A, AR, LA, LB, LC>(&mut self, annotation: A, a: LA, b: LB, c: LC)
where
A: FnOnce() -> AR,
AR: Into<String>,
LA: FnOnce(LinearCombination<Scalar>) -> LinearCombination<Scalar>,
LB: FnOnce(LinearCombination<Scalar>) -> LinearCombination<Scalar>,
LC: FnOnce(LinearCombination<Scalar>) -> LinearCombination<Scalar>,
{
(**self).enforce(annotation, a, b, c)
}
fn push_namespace<NR, N>(&mut self, name_fn: N)
where
NR: Into<String>,
N: FnOnce() -> NR,
{
(**self).push_namespace(name_fn)
}
fn pop_namespace(&mut self) {
(**self).pop_namespace()
}
fn get_root(&mut self) -> &mut Self::Root {
(**self).get_root()
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn verification_error_string() {
let err = VerificationError::InvalidProof;
// Make sure it correctly returns something (i.e. it's not an endless loop)
assert!(!err.to_string().is_empty());
}
#[test]
fn synthesis_error_string() {
let err = SynthesisError::PolynomialDegreeTooLarge;
// Make sure it correctly returns something (i.e. it's not an endless loop)
assert!(!err.to_string().is_empty());
let err = SynthesisError::IoError(io::Error::new(io::ErrorKind::Other, "other"));
assert!(
err.to_string().contains("other"),
"\"{}\" does not contain the underlying error",
err
);
}
}

View File

@@ -0,0 +1,609 @@
use rand_core::RngCore;
use std::ops::{AddAssign, MulAssign};
use std::sync::Arc;
use ff::{Field, PrimeField};
use group::{prime::PrimeCurveAffine, Curve, Group, Wnaf, WnafGroup};
use pairing::Engine;
use super::{Parameters, VerifyingKey};
use crate::random::RandomCircuit;
use crate::random::RandomConstraintSystem;
use crate::{Circuit, ConstraintSystem, Index, LinearCombination, SynthesisError, Variable};
use crate::domain::{EvaluationDomain, Scalar};
use crate::multicore::Worker;
/// Generates a random common reference string for
/// a circuit.
pub fn generate_random_parameters<E, C, R>(
circuit: C,
mut rng: &mut R,
) -> Result<Parameters<E>, SynthesisError>
where
E: Engine,
E::G1: WnafGroup,
E::G2: WnafGroup,
C: RandomCircuit<E::Fr>,
R: RngCore,
{
let g1 = E::G1::random(&mut rng);
let g2 = E::G2::random(&mut rng);
let alpha = E::Fr::random(&mut rng);
let beta = E::Fr::random(&mut rng);
let gamma = E::Fr::random(&mut rng);
let delta = E::Fr::random(&mut rng);
let deltap = E::Fr::random(&mut rng);
let tau = E::Fr::random(&mut rng);
generate_parameters::<E, C>(circuit, g1, g2, alpha, beta, gamma, delta, deltap, tau)
}
/// This is our assembly structure that we'll use to synthesize the
/// circuit into a QAP.
struct KeypairAssembly<Scalar: PrimeField> {
num_inputs: usize,
num_aux: usize,
num_constraints: usize,
num_nonrandom: usize,
at_inputs: Vec<Vec<(Scalar, usize)>>,
bt_inputs: Vec<Vec<(Scalar, usize)>>,
ct_inputs: Vec<Vec<(Scalar, usize)>>,
at_aux: Vec<Vec<(Scalar, usize)>>,
bt_aux: Vec<Vec<(Scalar, usize)>>,
ct_aux: Vec<Vec<(Scalar, usize)>>,
// TODO: Do we care about whether or not the prover adds non-random
// aux values after the coins? Probably not?
coin_inds: Vec<usize>,
coins_done: bool,
}
impl<Scalar: PrimeField> ConstraintSystem<Scalar> for KeypairAssembly<Scalar> {
type Root = Self;
fn alloc<F, A, AR>(&mut self, _: A, _: F) -> Result<Variable, SynthesisError>
where
F: FnOnce() -> Result<Scalar, SynthesisError>,
A: FnOnce() -> AR,
AR: Into<String>,
{
// There is no assignment, so we don't even invoke the
// function for obtaining one.
let index = self.num_aux;
self.num_aux += 1;
self.at_aux.push(vec![]);
self.bt_aux.push(vec![]);
self.ct_aux.push(vec![]);
if self.coin_inds.is_empty() {
self.num_nonrandom += 1;
} else {
self.coins_done = true;
}
Ok(Variable(Index::Aux(index)))
}
fn alloc_input<F, A, AR>(&mut self, a: A, _: F) -> Result<Variable, SynthesisError>
where
F: FnOnce() -> Result<Scalar, SynthesisError>,
A: FnOnce() -> AR,
AR: Into<String>,
{
// There is no assignment, so we don't even invoke the
// function for obtaining one.
//
println!("allocating input: {}", a().into());
let index = self.num_inputs;
self.num_inputs += 1;
self.at_inputs.push(vec![]);
self.bt_inputs.push(vec![]);
self.ct_inputs.push(vec![]);
if !self.coin_inds.is_empty() {
self.coins_done = true;
}
Ok(Variable(Index::Input(index)))
}
fn enforce<A, AR, LA, LB, LC>(&mut self, _: A, a: LA, b: LB, c: LC)
where
A: FnOnce() -> AR,
AR: Into<String>,
LA: FnOnce(LinearCombination<Scalar>) -> LinearCombination<Scalar>,
LB: FnOnce(LinearCombination<Scalar>) -> LinearCombination<Scalar>,
LC: FnOnce(LinearCombination<Scalar>) -> LinearCombination<Scalar>,
{
fn eval<Scalar: PrimeField>(
l: LinearCombination<Scalar>,
inputs: &mut [Vec<(Scalar, usize)>],
aux: &mut [Vec<(Scalar, usize)>],
this_constraint: usize,
) {
for (index, coeff) in l.0 {
match index {
Variable(Index::Input(id)) => inputs[id].push((coeff, this_constraint)),
Variable(Index::Aux(id)) => aux[id].push((coeff, this_constraint)),
}
}
}
eval(
a(LinearCombination::zero()),
&mut self.at_inputs,
&mut self.at_aux,
self.num_constraints,
);
eval(
b(LinearCombination::zero()),
&mut self.bt_inputs,
&mut self.bt_aux,
self.num_constraints,
);
eval(
c(LinearCombination::zero()),
&mut self.ct_inputs,
&mut self.ct_aux,
self.num_constraints,
);
if !self.coin_inds.is_empty() {
self.coins_done = true;
}
self.num_constraints += 1;
}
fn push_namespace<NR, N>(&mut self, _: N)
where
NR: Into<String>,
N: FnOnce() -> NR,
{
// Do nothing; we don't care about namespaces in this context.
}
fn pop_namespace(&mut self) {
// Do nothing; we don't care about namespaces in this context.
}
fn get_root(&mut self) -> &mut Self::Root {
self
}
}
impl<Scalar: PrimeField> RandomConstraintSystem<Scalar> for KeypairAssembly<Scalar> {
fn alloc_random_coin<A, AR>(
&mut self,
annotation: A,
) -> Result<(Variable, Scalar), SynthesisError>
where
A: FnOnce() -> AR,
AR: Into<String>,
{
// There is no assingment, so we don't do compute anything...
if self.coins_done {
return Err(SynthesisError::LateRandomCoin);
}
println!("allocating coin: {}", annotation().into());
let index = self.num_inputs;
self.coin_inds.push(index);
self.num_inputs += 1;
self.at_inputs.push(vec![]);
self.bt_inputs.push(vec![]);
self.ct_inputs.push(vec![]);
Ok((Variable(Index::Input(index)), Scalar::one()))
}
}
/// Create parameters for a circuit, given some toxic waste.
#[allow(clippy::too_many_arguments)]
pub fn generate_parameters<E, C>(
circuit: C,
g1: E::G1,
g2: E::G2,
alpha: E::Fr,
beta: E::Fr,
gamma: E::Fr,
delta: E::Fr,
deltap: E::Fr,
tau: E::Fr,
) -> Result<Parameters<E>, SynthesisError>
where
E: Engine,
E::G1: WnafGroup,
E::G2: WnafGroup,
C: RandomCircuit<E::Fr>,
{
let mut assembly = KeypairAssembly {
num_inputs: 0,
num_aux: 0,
num_constraints: 0,
num_nonrandom: 0,
at_inputs: vec![],
bt_inputs: vec![],
ct_inputs: vec![],
at_aux: vec![],
bt_aux: vec![],
ct_aux: vec![],
coin_inds: vec![],
coins_done: false,
};
// Allocate the "one" input variable
assembly.alloc_input(|| "", || Ok(E::Fr::one()))?;
// Synthesize the circuit.
circuit.synthesize(&mut assembly)?;
// Input constraints to ensure full density of IC query
// x * 0 = 0
for i in 0..assembly.num_inputs {
assembly.enforce(|| "", |lc| lc + Variable(Index::Input(i)), |lc| lc, |lc| lc);
}
// Create bases for blind evaluation of polynomials at tau
let powers_of_tau = vec![Scalar::<E::Fr>(E::Fr::zero()); assembly.num_constraints];
let mut powers_of_tau = EvaluationDomain::from_coeffs(powers_of_tau)?;
// Compute G1 window table
let mut g1_wnaf = Wnaf::new();
let g1_wnaf = g1_wnaf.base(g1, {
// H query
(powers_of_tau.as_ref().len() - 1)
// IC/L queries
+ assembly.num_inputs + assembly.num_aux
// A query
+ assembly.num_inputs + assembly.num_aux
// B query
+ assembly.num_inputs + assembly.num_aux
});
// Compute G2 window table
let mut g2_wnaf = Wnaf::new();
let g2_wnaf = g2_wnaf.base(g2, {
// B query
assembly.num_inputs + assembly.num_aux
});
let gamma_inverse = {
let inverse = gamma.invert();
if bool::from(inverse.is_some()) {
Ok(inverse.unwrap())
} else {
Err(SynthesisError::UnexpectedIdentity)
}
}?;
let delta_inverse = {
let inverse = delta.invert();
if bool::from(inverse.is_some()) {
Ok(inverse.unwrap())
} else {
Err(SynthesisError::UnexpectedIdentity)
}
}?;
let deltap_inverse = {
let inverse = deltap.invert();
if bool::from(inverse.is_some()) {
Ok(inverse.unwrap())
} else {
Err(SynthesisError::UnexpectedIdentity)
}
}?;
let worker = Worker::new();
let mut h = vec![E::G1Affine::identity(); powers_of_tau.as_ref().len() - 1];
{
// Compute powers of tau
{
let powers_of_tau = powers_of_tau.as_mut();
worker.scope(powers_of_tau.len(), |scope, chunk| {
for (i, powers_of_tau) in powers_of_tau.chunks_mut(chunk).enumerate() {
scope.spawn(move |_scope| {
let mut current_tau_power = tau.pow_vartime(&[(i * chunk) as u64]);
for p in powers_of_tau {
p.0 = current_tau_power;
current_tau_power.mul_assign(&tau);
}
});
}
});
}
// coeff = t(x) / delta
let mut coeff = powers_of_tau.z(&tau);
coeff.mul_assign(&delta_inverse);
// Compute the H query with multiple threads
worker.scope(h.len(), |scope, chunk| {
for (h, p) in h
.chunks_mut(chunk)
.zip(powers_of_tau.as_ref().chunks(chunk))
{
let mut g1_wnaf = g1_wnaf.shared();
scope.spawn(move |_scope| {
// Set values of the H query to g1^{(tau^i * t(tau)) / delta}
let h_proj: Vec<_> = p[..h.len()]
.iter()
.map(|p| {
// Compute final exponent
let mut exp = p.0;
exp.mul_assign(&coeff);
// Exponentiate
g1_wnaf.scalar(&exp)
})
.collect();
// Batch normalize
E::G1::batch_normalize(&h_proj, h);
});
}
});
}
// Use inverse FFT to convert powers of tau to Lagrange coefficients
powers_of_tau.ifft(&worker);
let powers_of_tau = powers_of_tau.into_coeffs();
let mut a = vec![E::G1Affine::identity(); assembly.num_inputs + assembly.num_aux];
let mut b_g1 = vec![E::G1Affine::identity(); assembly.num_inputs + assembly.num_aux];
let mut b_g2 = vec![E::G2Affine::identity(); assembly.num_inputs + assembly.num_aux];
println!("gerator: {}", assembly.num_inputs);
let mut ic = vec![E::G1Affine::identity(); assembly.num_inputs];
// l only contains non-random indices
// j has all random ones
let mut j = vec![E::G1Affine::identity(); assembly.num_nonrandom];
let mut l = vec![E::G1Affine::identity(); assembly.num_aux - assembly.num_nonrandom];
#[allow(clippy::too_many_arguments)]
fn eval<E: Engine>(
// wNAF window tables
g1_wnaf: &Wnaf<usize, &[E::G1], &mut Vec<i64>>,
g2_wnaf: &Wnaf<usize, &[E::G2], &mut Vec<i64>>,
// Lagrange coefficients for tau
powers_of_tau: &[Scalar<E::Fr>],
// QAP polynomials
at: &[Vec<(E::Fr, usize)>],
bt: &[Vec<(E::Fr, usize)>],
ct: &[Vec<(E::Fr, usize)>],
// Resulting evaluated QAP polynomials
a: &mut [E::G1Affine],
b_g1: &mut [E::G1Affine],
b_g2: &mut [E::G2Affine],
ext: &mut [E::G1Affine],
// Inverse coefficient for ext elements
inv: &E::Fr,
// Trapdoors
alpha: &E::Fr,
beta: &E::Fr,
// Worker
worker: &Worker,
) {
// Sanity check
assert_eq!(a.len(), at.len());
assert_eq!(a.len(), bt.len());
assert_eq!(a.len(), ct.len());
assert_eq!(a.len(), b_g1.len());
assert_eq!(a.len(), b_g2.len());
assert_eq!(a.len(), ext.len());
// Evaluate polynomials in multiple threads
worker.scope(a.len(), |scope, chunk| {
for ((((((a, b_g1), b_g2), ext), at), bt), ct) in a
.chunks_mut(chunk)
.zip(b_g1.chunks_mut(chunk))
.zip(b_g2.chunks_mut(chunk))
.zip(ext.chunks_mut(chunk))
.zip(at.chunks(chunk))
.zip(bt.chunks(chunk))
.zip(ct.chunks(chunk))
{
let mut g1_wnaf = g1_wnaf.shared();
let mut g2_wnaf = g2_wnaf.shared();
scope.spawn(move |_scope| {
let mut a_proj = vec![E::G1::identity(); a.len()];
let mut b_g1_proj = vec![E::G1::identity(); b_g1.len()];
let mut b_g2_proj = vec![E::G2::identity(); b_g2.len()];
let mut ext_proj = vec![E::G1::identity(); ext.len()];
for ((((((a, b_g1), b_g2), ext), at), bt), ct) in a_proj
.iter_mut()
.zip(b_g1_proj.iter_mut())
.zip(b_g2_proj.iter_mut())
.zip(ext_proj.iter_mut())
.zip(at.iter())
.zip(bt.iter())
.zip(ct.iter())
{
fn eval_at_tau<S: PrimeField>(
powers_of_tau: &[Scalar<S>],
p: &[(S, usize)],
) -> S {
let mut acc = S::zero();
for &(ref coeff, index) in p {
let mut n = powers_of_tau[index].0;
n.mul_assign(coeff);
acc.add_assign(&n);
}
acc
}
// Evaluate QAP polynomials at tau
let mut at = eval_at_tau(powers_of_tau, at);
let mut bt = eval_at_tau(powers_of_tau, bt);
let ct = eval_at_tau(powers_of_tau, ct);
// Compute A query (in G1)
if !at.is_zero_vartime() {
*a = g1_wnaf.scalar(&at);
}
// Compute B query (in G1/G2)
if !bt.is_zero_vartime() {
*b_g1 = g1_wnaf.scalar(&bt);
*b_g2 = g2_wnaf.scalar(&bt);
}
at *= beta;
bt *= alpha;
let mut e = at;
e.add_assign(&bt);
e.add_assign(&ct);
e.mul_assign(inv);
*ext = g1_wnaf.scalar(&e);
}
// Batch normalize
E::G1::batch_normalize(&a_proj, a);
E::G1::batch_normalize(&b_g1_proj, b_g1);
E::G2::batch_normalize(&b_g2_proj, b_g2);
E::G1::batch_normalize(&ext_proj, ext);
});
}
});
}
// Evaluate for inputs.
eval::<E>(
&g1_wnaf,
&g2_wnaf,
&powers_of_tau,
&assembly.at_inputs,
&assembly.bt_inputs,
&assembly.ct_inputs,
&mut a[0..assembly.num_inputs],
&mut b_g1[0..assembly.num_inputs],
&mut b_g2[0..assembly.num_inputs],
&mut ic,
&gamma_inverse,
&alpha,
&beta,
&worker,
);
let nonrandom_a_end = assembly.num_inputs + assembly.num_nonrandom;
// Evaluate for non-random auxiliary variables.
eval::<E>(
&g1_wnaf,
&g2_wnaf,
&powers_of_tau,
&assembly.at_aux[..assembly.num_nonrandom],
&assembly.bt_aux[..assembly.num_nonrandom],
&assembly.ct_aux[..assembly.num_nonrandom],
&mut a[assembly.num_inputs..nonrandom_a_end],
&mut b_g1[assembly.num_inputs..nonrandom_a_end],
&mut b_g2[assembly.num_inputs..nonrandom_a_end],
//&assembly.at_aux[..assembly.num_nonrandom],
//&assembly.bt_aux[..assembly.num_nonrandom],
//&assembly.ct_aux[..assembly.num_nonrandom],
//&mut a[assembly.num_inputs..nonrandom_a_end],
//&mut b_g1[assembly.num_inputs..nonrandom_a_end],
//&mut b_g2[assembly.num_inputs..nonrandom_a_end],
&mut j,
&deltap_inverse,
&alpha,
&beta,
&worker,
);
//if assembly.at_aux.len() != assembly.num_nonrandom {
// Evaluate for random auxiliary variables.
eval::<E>(
&g1_wnaf,
&g2_wnaf,
&powers_of_tau,
&assembly.at_aux[assembly.num_nonrandom..],
&assembly.bt_aux[assembly.num_nonrandom..],
&assembly.ct_aux[assembly.num_nonrandom..],
&mut a[nonrandom_a_end..],
&mut b_g1[nonrandom_a_end..],
&mut b_g2[nonrandom_a_end..],
&mut l,
&delta_inverse,
&alpha,
&beta,
&worker,
);
//}
println!("ok");
// Don't allow any elements be unconstrained, so that
// the L query is always fully dense.
for e in l.iter() {
if e.is_identity().into() {
return Err(SynthesisError::UnconstrainedVariable);
}
}
for e in j.iter() {
if e.is_identity().into() {
return Err(SynthesisError::UnconstrainedVariable);
}
}
let g1 = g1.to_affine();
let g2 = g2.to_affine();
let vk = VerifyingKey::<E> {
alpha_g1: (g1 * alpha).to_affine(),
beta_g1: (g1 * beta).to_affine(),
beta_g2: (g2 * beta).to_affine(),
gamma_g2: (g2 * gamma).to_affine(),
delta_g1: (g1 * delta).to_affine(),
delta_g2: (g2 * delta).to_affine(),
deltap_g1: (g1 * deltap).to_affine(),
deltap_g2: (g2 * deltap).to_affine(),
ic,
num_coins: assembly.coin_inds.len(),
};
Ok(Parameters {
vk,
h: Arc::new(h),
l: Arc::new(l),
j: Arc::new(j),
// Filter points at infinity away from A/B queries
a: Arc::new(
a.into_iter()
.filter(|e| bool::from(!e.is_identity()))
.collect(),
),
b_g1: Arc::new(
b_g1.into_iter()
.filter(|e| bool::from(!e.is_identity()))
.collect(),
),
b_g2: Arc::new(
b_g2.into_iter()
.filter(|e| bool::from(!e.is_identity()))
.collect(),
),
})
}

643
third_party/bellman/src/mirage/mod.rs vendored Normal file
View File

@@ -0,0 +1,643 @@
//! The [Groth16] proving system.
//!
//! [Groth16]: https://eprint.iacr.org/2016/260
use group::{prime::PrimeCurveAffine, GroupEncoding, UncompressedEncoding};
use pairing::{Engine, MultiMillerLoop};
use crate::SynthesisError;
use crate::multiexp::SourceBuilder;
use byteorder::{BigEndian, ReadBytesExt, WriteBytesExt};
use std::io::{self, Read, Write};
use std::sync::Arc;
#[cfg(test)]
mod tests;
mod generator;
mod prover;
mod verifier;
pub use self::generator::*;
pub use self::prover::*;
pub use self::verifier::*;
#[derive(Clone, Debug)]
pub struct Proof<E: Engine> {
pub a: E::G1Affine,
pub b: E::G2Affine,
pub c: E::G1Affine,
pub d: E::G1Affine,
// Coins that were used in this proof...
//pub coins: Vec<E::Fr>,
}
impl<E: Engine> PartialEq for Proof<E> {
fn eq(&self, other: &Self) -> bool {
self.a == other.a && self.b == other.b && self.c == other.c && self.d == other.d
//&& self.coins == other.coins
}
}
impl<E: Engine> Proof<E> {
pub fn write<W: Write>(&self, mut writer: W) -> io::Result<()> {
writer.write_all(self.a.to_bytes().as_ref())?;
writer.write_all(self.b.to_bytes().as_ref())?;
writer.write_all(self.c.to_bytes().as_ref())?;
writer.write_all(self.d.to_bytes().as_ref())?;
//writer.write_u32::<BigEndian>(self.coins.len() as u32)?;
//use ff::PrimeField;
//for coin in &self.coins {
// println!("size of coin: {:?}", coin.to_repr().as_ref().len());
// writer.write_all(coin.to_repr().as_ref())?;
//}
Ok(())
}
pub fn read<R: Read>(mut reader: R) -> io::Result<Self> {
let read_g1 = |reader: &mut R| -> io::Result<E::G1Affine> {
let mut g1_repr = <E::G1Affine as GroupEncoding>::Repr::default();
reader.read_exact(g1_repr.as_mut())?;
let affine = E::G1Affine::from_bytes(&g1_repr);
let affine = if affine.is_some().into() {
Ok(affine.unwrap())
} else {
Err(io::Error::new(io::ErrorKind::InvalidData, "invalid G1"))
};
affine.and_then(|e| {
if e.is_identity().into() {
Err(io::Error::new(
io::ErrorKind::InvalidData,
"point at infinity",
))
} else {
Ok(e)
}
})
};
let read_g2 = |reader: &mut R| -> io::Result<E::G2Affine> {
let mut g2_repr = <E::G2Affine as GroupEncoding>::Repr::default();
reader.read_exact(g2_repr.as_mut())?;
let affine = E::G2Affine::from_bytes(&g2_repr);
let affine = if affine.is_some().into() {
Ok(affine.unwrap())
} else {
Err(io::Error::new(io::ErrorKind::InvalidData, "invalid G2"))
};
affine.and_then(|e| {
if e.is_identity().into() {
Err(io::Error::new(
io::ErrorKind::InvalidData,
"point at infinity",
))
} else {
Ok(e)
}
})
};
let a = read_g1(&mut reader)?;
let b = read_g2(&mut reader)?;
let c = read_g1(&mut reader)?;
let d = read_g1(&mut reader)?;
//let coin_len = reader.read_u32::<BigEndian>()?;
//let mut coins = vec![];
//use ff::PrimeField;
//for _ in 0..coin_len {
// let mut coin_repr = <E::Fr as PrimeField>::Repr::default();
// reader.read_exact(coin_repr.as_mut())?;
// let coin = <E::Fr as PrimeField>::from_repr(coin_repr);
// if coin.is_some().unwrap_u8() == 1 {
// coins.push(coin.unwrap());
// } else {
// return Err(io::Error::new(io::ErrorKind::InvalidData, "invalid coin"));
// }
//}
Ok(Proof { a, b, c, d })
}
}
#[derive(Clone)]
pub struct VerifyingKey<E: Engine> {
// alpha in g1 for verifying and for creating A/C elements of
// proof. Never the point at infinity.
pub alpha_g1: E::G1Affine,
// beta in g1 and g2 for verifying and for creating B/C elements
// of proof. Never the point at infinity.
pub beta_g1: E::G1Affine,
pub beta_g2: E::G2Affine,
// gamma in g2 for verifying. Never the point at infinity.
pub gamma_g2: E::G2Affine,
// delta in g1/g2 for verifying and proving, essentially the magic
// trapdoor that forces the prover to evaluate the C element of the
// proof with only components from the CRS. Never the point at
// infinity.
pub delta_g1: E::G1Affine,
pub delta_g2: E::G2Affine,
pub deltap_g1: E::G1Affine,
pub deltap_g2: E::G2Affine,
// Elements of the form (beta * u_i(tau) + alpha v_i(tau) + w_i(tau)) / gamma
// for all public inputs. Because all public inputs have a dummy constraint,
// this is the same size as the number of inputs, and never contains points
// at infinity.
pub ic: Vec<E::G1Affine>,
pub num_coins: usize,
}
impl<E: Engine> PartialEq for VerifyingKey<E> {
fn eq(&self, other: &Self) -> bool {
self.alpha_g1 == other.alpha_g1
&& self.beta_g1 == other.beta_g1
&& self.beta_g2 == other.beta_g2
&& self.gamma_g2 == other.gamma_g2
&& self.delta_g1 == other.delta_g1
&& self.delta_g2 == other.delta_g2
&& self.deltap_g1 == other.deltap_g1
&& self.deltap_g2 == other.deltap_g2
&& self.ic == other.ic
&& self.num_coins == other.num_coins
}
}
impl<E: Engine> VerifyingKey<E> {
pub fn write<W: Write>(&self, mut writer: W) -> io::Result<()> {
writer.write_all(self.alpha_g1.to_uncompressed().as_ref())?;
writer.write_all(self.beta_g1.to_uncompressed().as_ref())?;
writer.write_all(self.beta_g2.to_uncompressed().as_ref())?;
writer.write_all(self.gamma_g2.to_uncompressed().as_ref())?;
writer.write_all(self.delta_g1.to_uncompressed().as_ref())?;
writer.write_all(self.delta_g2.to_uncompressed().as_ref())?;
writer.write_all(self.deltap_g1.to_uncompressed().as_ref())?;
writer.write_all(self.deltap_g2.to_uncompressed().as_ref())?;
writer.write_u32::<BigEndian>(self.ic.len() as u32)?;
for ic in &self.ic {
writer.write_all(ic.to_uncompressed().as_ref())?;
}
writer.write_u32::<BigEndian>(self.num_coins as u32)?;
Ok(())
}
pub fn read<R: Read>(mut reader: R) -> io::Result<Self> {
let read_g1 = |reader: &mut R| -> io::Result<E::G1Affine> {
let mut g1_repr = <E::G1Affine as UncompressedEncoding>::Uncompressed::default();
reader.read_exact(g1_repr.as_mut())?;
let affine = E::G1Affine::from_uncompressed(&g1_repr);
if affine.is_some().into() {
Ok(affine.unwrap())
} else {
Err(io::Error::new(io::ErrorKind::InvalidData, "invalid G1"))
}
};
let read_g2 = |reader: &mut R| -> io::Result<E::G2Affine> {
let mut g2_repr = <E::G2Affine as UncompressedEncoding>::Uncompressed::default();
reader.read_exact(g2_repr.as_mut())?;
let affine = E::G2Affine::from_uncompressed(&g2_repr);
if affine.is_some().into() {
Ok(affine.unwrap())
} else {
Err(io::Error::new(io::ErrorKind::InvalidData, "invalid G2"))
}
};
let alpha_g1 = read_g1(&mut reader)?;
let beta_g1 = read_g1(&mut reader)?;
let beta_g2 = read_g2(&mut reader)?;
let gamma_g2 = read_g2(&mut reader)?;
let delta_g1 = read_g1(&mut reader)?;
let delta_g2 = read_g2(&mut reader)?;
let deltap_g1 = read_g1(&mut reader)?;
let deltap_g2 = read_g2(&mut reader)?;
let ic_len = reader.read_u32::<BigEndian>()? as usize;
let mut ic = vec![];
for _ in 0..ic_len {
let g1 = read_g1(&mut reader).and_then(|e| {
if e.is_identity().into() {
Err(io::Error::new(
io::ErrorKind::InvalidData,
"point at infinity",
))
} else {
Ok(e)
}
})?;
ic.push(g1);
}
let num_coins = reader.read_u32::<BigEndian>()? as usize;
Ok(VerifyingKey {
alpha_g1,
beta_g1,
beta_g2,
gamma_g2,
delta_g1,
delta_g2,
deltap_g1,
deltap_g2,
ic,
num_coins,
})
}
}
#[derive(Clone)]
pub struct Parameters<E: Engine> {
pub vk: VerifyingKey<E>,
// Elements of the form ((tau^i * t(tau)) / delta) for i between 0 and
// m-2 inclusive. Never contains points at infinity.
pub h: Arc<Vec<E::G1Affine>>,
// Elements of the form (beta * u_i(tau) + alpha v_i(tau) + w_i(tau)) / delta
// for all random auxiliary inputs. Variables can never be unconstrained, so this
// never contains points at infinity.
pub l: Arc<Vec<E::G1Affine>>,
// Elements of the form (beta * u_i(tau) + alpha v_i(tau) + w_i(tau)) / delta'
// for all non-random inputs. Variables can never be unconstrained, so this
// never contains points at infinity.
pub j: Arc<Vec<E::G1Affine>>,
// QAP "A" polynomials evaluated at tau in the Lagrange basis. Never contains
// points at infinity: polynomials that evaluate to zero are omitted from
// the CRS and the prover can deterministically skip their evaluation.
pub a: Arc<Vec<E::G1Affine>>,
// QAP "B" polynomials evaluated at tau in the Lagrange basis. Needed in
// G1 and G2 for C/B queries, respectively. Never contains points at
// infinity for the same reason as the "A" polynomials.
pub b_g1: Arc<Vec<E::G1Affine>>,
pub b_g2: Arc<Vec<E::G2Affine>>,
}
impl<E: Engine> PartialEq for Parameters<E> {
fn eq(&self, other: &Self) -> bool {
self.vk == other.vk
&& self.h == other.h
&& self.l == other.l
&& self.j == other.j
&& self.a == other.a
&& self.b_g1 == other.b_g1
&& self.b_g2 == other.b_g2
}
}
impl<E: Engine> Parameters<E> {
pub fn write<W: Write>(&self, mut writer: W) -> io::Result<()> {
self.vk.write(&mut writer)?;
writer.write_u32::<BigEndian>(self.h.len() as u32)?;
for g in &self.h[..] {
writer.write_all(g.to_uncompressed().as_ref())?;
}
writer.write_u32::<BigEndian>(self.l.len() as u32)?;
for g in &self.l[..] {
writer.write_all(g.to_uncompressed().as_ref())?;
}
writer.write_u32::<BigEndian>(self.j.len() as u32)?;
for j in &self.j[..] {
writer.write_all(j.to_uncompressed().as_ref())?;
}
writer.write_u32::<BigEndian>(self.a.len() as u32)?;
for g in &self.a[..] {
writer.write_all(g.to_uncompressed().as_ref())?;
}
writer.write_u32::<BigEndian>(self.b_g1.len() as u32)?;
for g in &self.b_g1[..] {
writer.write_all(g.to_uncompressed().as_ref())?;
}
writer.write_u32::<BigEndian>(self.b_g2.len() as u32)?;
for g in &self.b_g2[..] {
writer.write_all(g.to_uncompressed().as_ref())?;
}
Ok(())
}
pub fn read<R: Read>(mut reader: R, checked: bool) -> io::Result<Self> {
let read_g1 = |reader: &mut R| -> io::Result<E::G1Affine> {
let mut repr = <E::G1Affine as UncompressedEncoding>::Uncompressed::default();
reader.read_exact(repr.as_mut())?;
let affine = if checked {
E::G1Affine::from_uncompressed(&repr)
} else {
E::G1Affine::from_uncompressed_unchecked(&repr)
};
let affine = if affine.is_some().into() {
Ok(affine.unwrap())
} else {
Err(io::Error::new(io::ErrorKind::InvalidData, "invalid G1"))
};
affine.and_then(|e| {
if e.is_identity().into() {
Err(io::Error::new(
io::ErrorKind::InvalidData,
"point at infinity",
))
} else {
Ok(e)
}
})
};
let read_g2 = |reader: &mut R| -> io::Result<E::G2Affine> {
let mut repr = <E::G2Affine as UncompressedEncoding>::Uncompressed::default();
reader.read_exact(repr.as_mut())?;
let affine = if checked {
E::G2Affine::from_uncompressed(&repr)
} else {
E::G2Affine::from_uncompressed_unchecked(&repr)
};
let affine = if affine.is_some().into() {
Ok(affine.unwrap())
} else {
Err(io::Error::new(io::ErrorKind::InvalidData, "invalid G2"))
};
affine.and_then(|e| {
if e.is_identity().into() {
Err(io::Error::new(
io::ErrorKind::InvalidData,
"point at infinity",
))
} else {
Ok(e)
}
})
};
let vk = VerifyingKey::<E>::read(&mut reader)?;
let mut h = vec![];
let mut l = vec![];
let mut j = vec![];
let mut a = vec![];
let mut b_g1 = vec![];
let mut b_g2 = vec![];
{
let len = reader.read_u32::<BigEndian>()? as usize;
for _ in 0..len {
h.push(read_g1(&mut reader)?);
}
}
{
let len = reader.read_u32::<BigEndian>()? as usize;
for _ in 0..len {
l.push(read_g1(&mut reader)?);
}
}
{
let len = reader.read_u32::<BigEndian>()? as usize;
for _ in 0..len {
j.push(read_g1(&mut reader)?);
}
}
{
let len = reader.read_u32::<BigEndian>()? as usize;
for _ in 0..len {
a.push(read_g1(&mut reader)?);
}
}
{
let len = reader.read_u32::<BigEndian>()? as usize;
for _ in 0..len {
b_g1.push(read_g1(&mut reader)?);
}
}
{
let len = reader.read_u32::<BigEndian>()? as usize;
for _ in 0..len {
b_g2.push(read_g2(&mut reader)?);
}
}
Ok(Parameters {
vk,
h: Arc::new(h),
l: Arc::new(l),
j: Arc::new(j),
a: Arc::new(a),
b_g1: Arc::new(b_g1),
b_g2: Arc::new(b_g2),
})
}
}
pub struct PreparedVerifyingKey<E: MultiMillerLoop> {
/// Pairing result of alpha*beta
alpha_g1_beta_g2: E::Gt,
/// -gamma in G2
neg_gamma_g2: E::G2Prepared,
/// -delta in G2
neg_delta_g2: E::G2Prepared,
/// -deltap in G2
neg_deltap_g2: E::G2Prepared,
/// Copy of IC from `VerifiyingKey`.
ic: Vec<E::G1Affine>,
/// num coins
num_coins: usize,
}
pub trait ParameterSource<E: Engine> {
type G1Builder: SourceBuilder<E::G1Affine>;
type G2Builder: SourceBuilder<E::G2Affine>;
fn get_vk(&mut self, num_ic: usize) -> Result<VerifyingKey<E>, SynthesisError>;
fn get_h(&mut self, num_h: usize) -> Result<Self::G1Builder, SynthesisError>;
fn get_l(&mut self, num_l: usize) -> Result<Self::G1Builder, SynthesisError>;
fn get_j(&mut self, num_j: usize) -> Result<Self::G1Builder, SynthesisError>;
fn get_a(
&mut self,
num_inputs: usize,
num_aux: usize,
) -> Result<(Self::G1Builder, Self::G1Builder), SynthesisError>;
fn get_b_g1(
&mut self,
num_inputs: usize,
num_aux: usize,
) -> Result<(Self::G1Builder, Self::G1Builder), SynthesisError>;
fn get_b_g2(
&mut self,
num_inputs: usize,
num_aux: usize,
) -> Result<(Self::G2Builder, Self::G2Builder), SynthesisError>;
}
impl<'a, E: Engine> ParameterSource<E> for &'a Parameters<E> {
type G1Builder = (Arc<Vec<E::G1Affine>>, usize);
type G2Builder = (Arc<Vec<E::G2Affine>>, usize);
fn get_vk(&mut self, _: usize) -> Result<VerifyingKey<E>, SynthesisError> {
Ok(self.vk.clone())
}
fn get_h(&mut self, _: usize) -> Result<Self::G1Builder, SynthesisError> {
Ok((self.h.clone(), 0))
}
fn get_l(&mut self, _: usize) -> Result<Self::G1Builder, SynthesisError> {
Ok((self.l.clone(), 0))
}
fn get_j(&mut self, _: usize) -> Result<Self::G1Builder, SynthesisError> {
Ok((self.j.clone(), 0))
}
fn get_a(
&mut self,
num_inputs: usize,
_: usize,
) -> Result<(Self::G1Builder, Self::G1Builder), SynthesisError> {
Ok(((self.a.clone(), 0), (self.a.clone(), num_inputs)))
}
fn get_b_g1(
&mut self,
num_inputs: usize,
_: usize,
) -> Result<(Self::G1Builder, Self::G1Builder), SynthesisError> {
Ok(((self.b_g1.clone(), 0), (self.b_g1.clone(), num_inputs)))
}
fn get_b_g2(
&mut self,
num_inputs: usize,
_: usize,
) -> Result<(Self::G2Builder, Self::G2Builder), SynthesisError> {
Ok(((self.b_g2.clone(), 0), (self.b_g2.clone(), num_inputs)))
}
}
#[cfg(test)]
mod test_with_bls12_381 {
use super::*;
use crate::random::{RandomCircuit, RandomConstraintSystem};
use crate::{Circuit, ConstraintSystem, SynthesisError};
use bls12_381::{Bls12, Scalar};
use ff::{Field, PrimeField};
use rand::thread_rng;
use std::ops::MulAssign;
#[test]
fn serialization() {
struct MySillyCircuit<Scalar: PrimeField> {
a: Option<Scalar>,
b: Option<Scalar>,
}
impl<Scalar: PrimeField> RandomCircuit<Scalar> for MySillyCircuit<Scalar> {
fn synthesize<CS: RandomConstraintSystem<Scalar>>(
self,
cs: &mut CS,
) -> Result<(), SynthesisError> {
let a = cs.alloc(|| "a", || self.a.ok_or(SynthesisError::AssignmentMissing))?;
let b = cs.alloc(|| "b", || self.b.ok_or(SynthesisError::AssignmentMissing))?;
let c = cs.alloc_input(
|| "c",
|| {
let mut a = self.a.ok_or(SynthesisError::AssignmentMissing)?;
let b = self.b.ok_or(SynthesisError::AssignmentMissing)?;
a.mul_assign(&b);
Ok(a)
},
)?;
cs.enforce(|| "a*b=c", |lc| lc + a, |lc| lc + b, |lc| lc + c);
Ok(())
}
}
let mut rng = thread_rng();
let params = generate_random_parameters::<Bls12, _, _>(
MySillyCircuit { a: None, b: None },
&mut rng,
)
.unwrap();
{
let mut v = vec![];
params.write(&mut v).unwrap();
assert_eq!(v.len(), 2432);
let de_params = Parameters::read(&v[..], true).unwrap();
assert!(params == de_params);
let de_params = Parameters::read(&v[..], false).unwrap();
assert!(params == de_params);
}
let pvk = prepare_verifying_key::<Bls12>(&params.vk);
for _ in 0..100 {
let a = Scalar::random(&mut rng);
let b = Scalar::random(&mut rng);
let mut c = a;
c.mul_assign(&b);
let proof = create_random_proof(
MySillyCircuit {
a: Some(a),
b: Some(b),
},
&params,
&mut rng,
)
.unwrap();
let mut v = vec![];
proof.write(&mut v).unwrap();
// 48 * 3 + 96 = 240
assert_eq!(v.len(), 240);
let de_proof = Proof::read(&v[..]).unwrap();
assert!(proof == de_proof);
assert!(verify_proof(&pvk, &proof, &[c]).is_ok());
assert!(verify_proof(&pvk, &proof, &[a]).is_err());
}
}
}

552
third_party/bellman/src/mirage/prover.rs vendored Normal file
View File

@@ -0,0 +1,552 @@
use rand_core::RngCore;
use std::ops::{AddAssign, MulAssign};
use std::sync::Arc;
use ff::{Field, PrimeField, PrimeFieldBits};
use group::{prime::PrimeCurveAffine, Curve};
use pairing::Engine;
use rand_chacha::ChaChaRng;
use sha2::{Digest, Sha256};
use super::{ParameterSource, Proof};
use crate::random::RandomCircuit;
use crate::random::RandomConstraintSystem;
use crate::{Circuit, ConstraintSystem, Index, LinearCombination, SynthesisError, Variable};
use crate::domain::{EvaluationDomain, Scalar};
use crate::multiexp::{multiexp, DensityTracker, FullDensity};
use crate::multicore::Worker;
fn eval<S: PrimeField>(
lc: &LinearCombination<S>,
mut input_density: Option<&mut DensityTracker>,
mut aux_density: Option<&mut DensityTracker>,
input_assignment: &[S],
aux_assignment: &[S],
) -> S {
let mut acc = S::zero();
for &(index, coeff) in lc.0.iter() {
let mut tmp;
if !coeff.is_zero_vartime() {
match index {
Variable(Index::Input(i)) => {
tmp = input_assignment[i];
if let Some(ref mut v) = input_density {
v.inc(i);
}
}
Variable(Index::Aux(i)) => {
tmp = aux_assignment[i];
if let Some(ref mut v) = aux_density {
v.inc(i);
}
}
}
if coeff != S::one() {
tmp *= coeff;
}
acc += tmp;
}
}
acc
}
struct ProvingAssignment<'a, E: Engine, S: PrimeField> {
// Density of queries
a_aux_density: DensityTracker,
b_input_density: DensityTracker,
b_aux_density: DensityTracker,
// Evaluations of A, B, C polynomials
a: Vec<Scalar<S>>,
b: Vec<Scalar<S>>,
c: Vec<Scalar<S>>,
// Assignments of variables
input_assignment: Vec<S>,
aux_assignment: Vec<S>,
nonrand_aux_assignment: Vec<S>,
rand_aux_assignment: Vec<S>,
num_nonrandom: usize,
coin_inds: Vec<usize>,
coins_done: bool,
// cached pid value for generating coins
d: Option<E::G1Affine>,
get_d: &'a dyn Fn(&Vec<S>, usize) -> E::G1Affine,
}
impl<'a, E: Engine, S: PrimeField> ProvingAssignment<'a, E, S> {
fn fiat_shamir_coin(&mut self, num: usize) -> S {
if self.d.is_none() {
self.d = Some((self.get_d)(
&self.nonrand_aux_assignment,
self.num_nonrandom,
));
}
// don't include coins in the inputs...this doesn't really matter but
// makes verification easier
compute_coin::<E, S>(
&self.input_assignment[..self.input_assignment.len() - self.coin_inds.len()],
self.d.unwrap(),
num,
)
}
}
pub fn compute_coin<E: Engine, S: PrimeField>(inputs: &[S], pid: E::G1Affine, num: usize) -> S {
let inputs_bits: Vec<u8> = inputs
.iter()
.flat_map(|s| s.to_repr().as_ref().to_vec())
.collect();
use group::GroupEncoding;
println!("coin inputs are: {:?}", inputs);
let num_bits = num.to_be_bytes();
// TODO: need to put in Pid in here...
let sha_input = [&inputs_bits, pid.to_bytes().as_ref(), &num_bits[..]].concat();
let mut hasher = Sha256::new();
hasher.update(sha_input);
let seed = hasher.finalize();
use rand_core::SeedableRng;
let mut rng = ChaChaRng::from_seed(seed.try_into().unwrap());
// TODO: gross
//let coin =
//from_u8s(&res).unwrap()
let res = S::random(&mut rng);
println!("coin is: {:?}", res);
res
}
fn from_u8s<F: PrimeField>(arr: &[u8]) -> Option<F> {
if arr.is_empty() {
return None;
}
if arr.len() == 1 && arr[0] == 0 {
return Some(F::zero());
}
let mut res = F::zero();
let twofivesix = F::from(256);
let mut first_digit = true;
for c in arr {
if first_digit {
if *c == 0 {
return None;
}
first_digit = false;
}
res.mul_assign(&twofivesix);
res.add_assign(&F::from(*c as u64));
}
Some(res)
}
impl<E: Engine, S: PrimeField> ConstraintSystem<S> for ProvingAssignment<'_, E, S> {
type Root = Self;
fn alloc<F, A, AR>(&mut self, _: A, f: F) -> Result<Variable, SynthesisError>
where
F: FnOnce() -> Result<S, SynthesisError>,
A: FnOnce() -> AR,
AR: Into<String>,
{
let res = f()?;
let res2 = res.clone();
self.aux_assignment.push(res);
self.a_aux_density.add_element();
self.b_aux_density.add_element();
if self.coin_inds.is_empty() {
self.num_nonrandom += 1;
self.nonrand_aux_assignment.push(res2);
} else {
self.coins_done = true;
self.rand_aux_assignment.push(res2);
}
Ok(Variable(Index::Aux(self.aux_assignment.len() - 1)))
}
fn alloc_input<F, A, AR>(&mut self, _: A, f: F) -> Result<Variable, SynthesisError>
where
F: FnOnce() -> Result<S, SynthesisError>,
A: FnOnce() -> AR,
AR: Into<String>,
{
self.input_assignment.push(f()?);
self.b_input_density.add_element();
Ok(Variable(Index::Input(self.input_assignment.len() - 1)))
}
fn enforce<A, AR, LA, LB, LC>(&mut self, _: A, a: LA, b: LB, c: LC)
where
A: FnOnce() -> AR,
AR: Into<String>,
LA: FnOnce(LinearCombination<S>) -> LinearCombination<S>,
LB: FnOnce(LinearCombination<S>) -> LinearCombination<S>,
LC: FnOnce(LinearCombination<S>) -> LinearCombination<S>,
{
let a = a(LinearCombination::zero());
let b = b(LinearCombination::zero());
let c = c(LinearCombination::zero());
self.a.push(Scalar(eval(
&a,
// Inputs have full density in the A query
// because there are constraints of the
// form x * 0 = 0 for each input.
None,
Some(&mut self.a_aux_density),
&self.input_assignment,
&self.aux_assignment,
)));
self.b.push(Scalar(eval(
&b,
Some(&mut self.b_input_density),
Some(&mut self.b_aux_density),
&self.input_assignment,
&self.aux_assignment,
)));
self.c.push(Scalar(eval(
&c,
// There is no C polynomial query,
// though there is an (beta)A + (alpha)B + C
// query for all aux variables.
// However, that query has full density.
None,
None,
&self.input_assignment,
&self.aux_assignment,
)));
}
fn push_namespace<NR, N>(&mut self, _: N)
where
NR: Into<String>,
N: FnOnce() -> NR,
{
// Do nothing; we don't care about namespaces in this context.
}
fn pop_namespace(&mut self) {
// Do nothing; we don't care about namespaces in this context.
}
fn get_root(&mut self) -> &mut Self::Root {
self
}
}
impl<E: Engine, S: PrimeField> RandomConstraintSystem<S> for ProvingAssignment<'_, E, S> {
fn alloc_random_coin<A, AR>(&mut self, annotation: A) -> Result<(Variable, S), SynthesisError>
where
A: FnOnce() -> AR,
AR: Into<String>,
{
// There is no assingment, so we don't do compute anything...
// There is no assingment, so we don't do compute anything...
if self.coins_done {
return Err(SynthesisError::LateRandomCoin);
}
// TODO: compute random coin...
//let coin =
//self.input_assignment.push(S::one());
let coin = self.fiat_shamir_coin(self.coin_inds.len());
self.input_assignment.push(coin);
self.coin_inds.push(self.input_assignment.len() - 1);
self.b_input_density.add_element();
Ok((
Variable(Index::Input(self.input_assignment.len() - 1)),
coin,
))
}
}
pub fn create_random_proof<E, C, R, P: ParameterSource<E>>(
circuit: C,
params: P,
mut rng: &mut R,
) -> Result<Proof<E>, SynthesisError>
where
E: Engine,
E::Fr: PrimeFieldBits,
C: RandomCircuit<E::Fr>,
R: RngCore,
{
let r = E::Fr::random(&mut rng);
let s = E::Fr::random(&mut rng);
let t = E::Fr::random(&mut rng);
create_proof::<E, C, P>(circuit, params, r, s, t)
}
#[allow(clippy::many_single_char_names)]
pub fn create_proof<E, C, P: ParameterSource<E>>(
circuit: C,
mut params: P,
r: E::Fr,
s: E::Fr,
_t: E::Fr,
) -> Result<Proof<E>, SynthesisError>
where
E: Engine,
E::Fr: PrimeFieldBits,
C: RandomCircuit<E::Fr>,
{
let vk = params.get_vk(0)?;
let j = params.get_j(0).unwrap();
// TODO: gross....
let get_pid = |nonrand: &Vec<<E as Engine>::Fr>, numnonrand: usize| {
let mut g_d = vk.delta_g1 * _t;
let worker = Worker::new();
let aux_nonrand_assignment =
Arc::new(nonrand.into_iter().map(|s| s.into()).collect::<Vec<_>>());
let j = multiexp(
&worker,
j.clone(),
FullDensity,
aux_nonrand_assignment.clone(),
);
AddAssign::<&E::G1>::add_assign(&mut g_d, &j.wait().unwrap());
let res: E::G1Affine = g_d.to_affine();
res
};
let mut prover: ProvingAssignment<E, <E as Engine>::Fr> = ProvingAssignment {
a_aux_density: DensityTracker::new(),
b_input_density: DensityTracker::new(),
b_aux_density: DensityTracker::new(),
a: vec![],
b: vec![],
c: vec![],
input_assignment: vec![],
aux_assignment: vec![],
nonrand_aux_assignment: vec![],
rand_aux_assignment: vec![],
num_nonrandom: 0,
coin_inds: vec![],
coins_done: false,
d: None,
get_d: &get_pid,
};
prover.alloc_input(|| "", || Ok(E::Fr::one()))?;
circuit.synthesize(&mut prover)?;
//let coins = prover
// .coin_inds
// .iter()
// .map(|i| prover.input_assignment[*i])
// .collect();
//println!("Got coins {:?}", coins);
for i in 0..prover.input_assignment.len() {
prover.enforce(|| "", |lc| lc + Variable(Index::Input(i)), |lc| lc, |lc| lc);
}
let worker = Worker::new();
let h = {
let mut a = EvaluationDomain::from_coeffs(prover.a)?;
let mut b = EvaluationDomain::from_coeffs(prover.b)?;
let mut c = EvaluationDomain::from_coeffs(prover.c)?;
a.ifft(&worker);
a.coset_fft(&worker);
b.ifft(&worker);
b.coset_fft(&worker);
c.ifft(&worker);
c.coset_fft(&worker);
a.mul_assign(&worker, &b);
drop(b);
a.sub_assign(&worker, &c);
drop(c);
a.divide_by_z_on_coset(&worker);
a.icoset_fft(&worker);
let mut a = a.into_coeffs();
let a_len = a.len() - 1;
a.truncate(a_len);
// TODO: parallelize if it's even helpful
let a = Arc::new(a.into_iter().map(|s| s.0.into()).collect::<Vec<_>>());
multiexp(&worker, params.get_h(a.len())?, FullDensity, a)
};
println!("nonrand aux: {:?}", prover.nonrand_aux_assignment);
println!("rand aux: {:?}", prover.rand_aux_assignment);
// TODO: parallelize if it's even helpful
let input_assignment = Arc::new(
prover
.input_assignment
.into_iter()
.map(|s| s.into())
.collect::<Vec<_>>(),
);
let aux_assignment = Arc::new(
prover
.aux_assignment
.clone()
.into_iter()
.map(|s| s.into())
.collect::<Vec<_>>(),
);
let aux_nonrand_assignment = Arc::new(
prover
.nonrand_aux_assignment
.into_iter()
.map(|s| s.into())
.collect::<Vec<_>>(),
);
let aux_rand_assignment = Arc::new(
prover
.rand_aux_assignment
.into_iter()
.map(|s| s.into())
.collect::<Vec<_>>(),
);
let j = multiexp(
&worker,
params.get_j(prover.num_nonrandom)?,
FullDensity,
aux_nonrand_assignment.clone(),
);
println!("ok2");
let l = multiexp(
&worker,
params.get_l(aux_assignment.len() - prover.num_nonrandom)?,
FullDensity,
aux_rand_assignment.clone(),
);
println!("ok3");
let a_aux_density_total = prover.a_aux_density.get_total_density();
let (a_inputs_source, a_aux_source) =
params.get_a(input_assignment.len(), a_aux_density_total)?;
let a_inputs = multiexp(
&worker,
a_inputs_source,
FullDensity,
input_assignment.clone(),
);
let a_aux = multiexp(
&worker,
a_aux_source,
Arc::new(prover.a_aux_density),
aux_assignment.clone(),
);
let b_input_density = Arc::new(prover.b_input_density);
let b_input_density_total = b_input_density.get_total_density();
let b_aux_density = Arc::new(prover.b_aux_density);
let b_aux_density_total = b_aux_density.get_total_density();
let (b_g1_inputs_source, b_g1_aux_source) =
params.get_b_g1(b_input_density_total, b_aux_density_total)?;
let b_g1_inputs = multiexp(
&worker,
b_g1_inputs_source,
b_input_density.clone(),
input_assignment.clone(),
);
let b_g1_aux = multiexp(
&worker,
b_g1_aux_source,
b_aux_density.clone(),
aux_assignment.clone(),
);
let (b_g2_inputs_source, b_g2_aux_source) =
params.get_b_g2(b_input_density_total, b_aux_density_total)?;
let b_g2_inputs = multiexp(
&worker,
b_g2_inputs_source,
b_input_density,
input_assignment,
);
let b_g2_aux = multiexp(&worker, b_g2_aux_source, b_aux_density, aux_assignment);
if bool::from(vk.delta_g1.is_identity() | vk.delta_g2.is_identity()) {
// If this element is zero, someone is trying to perform a
// subversion-CRS attack.
return Err(SynthesisError::UnexpectedIdentity);
}
let mut g_a = vk.delta_g1 * r;
AddAssign::<&E::G1Affine>::add_assign(&mut g_a, &vk.alpha_g1);
let mut g_b = vk.delta_g2 * s;
AddAssign::<&E::G2Affine>::add_assign(&mut g_b, &vk.beta_g2);
let mut g_c;
{
let mut rs = r;
rs.mul_assign(&s);
g_c = vk.delta_g1 * rs;
use std::ops::Neg;
AddAssign::<&E::G1>::add_assign(&mut g_c, &(vk.deltap_g1.neg() * _t));
AddAssign::<&E::G1>::add_assign(&mut g_c, &(vk.alpha_g1 * s));
AddAssign::<&E::G1>::add_assign(&mut g_c, &(vk.beta_g1 * r));
}
let mut g_d = vk.delta_g1 * _t;
let mut a_answer = a_inputs.wait()?;
AddAssign::<&E::G1>::add_assign(&mut a_answer, &a_aux.wait()?);
AddAssign::<&E::G1>::add_assign(&mut g_a, &a_answer);
MulAssign::<E::Fr>::mul_assign(&mut a_answer, s);
AddAssign::<&E::G1>::add_assign(&mut g_c, &a_answer);
let mut b1_answer: E::G1 = b_g1_inputs.wait()?;
AddAssign::<&E::G1>::add_assign(&mut b1_answer, &b_g1_aux.wait()?);
let mut b2_answer = b_g2_inputs.wait()?;
AddAssign::<&E::G2>::add_assign(&mut b2_answer, &b_g2_aux.wait()?);
AddAssign::<&E::G2>::add_assign(&mut g_b, &b2_answer);
MulAssign::<E::Fr>::mul_assign(&mut b1_answer, r);
AddAssign::<&E::G1>::add_assign(&mut g_c, &b1_answer);
AddAssign::<&E::G1>::add_assign(&mut g_c, &h.wait()?);
AddAssign::<&E::G1>::add_assign(&mut g_c, &l.wait()?);
AddAssign::<&E::G1>::add_assign(&mut g_d, &j.wait()?);
//AddAssign::<&E::G1>::add_assign(&mut g_d, &j.wait()?);
Ok(Proof {
a: g_a.to_affine(),
b: g_b.to_affine(),
c: g_c.to_affine(),
d: g_d.to_affine(),
//coins,
})
}

View File

@@ -0,0 +1,495 @@
use ff::{Field, FieldBits, PrimeField, PrimeFieldBits};
use group::{
prime::{PrimeCurve, PrimeCurveAffine, PrimeGroup},
Curve, Group, GroupEncoding, UncompressedEncoding, WnafGroup,
};
use pairing::{Engine, MillerLoopResult, MultiMillerLoop, PairingCurveAffine};
use rand_core::RngCore;
use std::fmt;
use std::iter::Sum;
use std::num::Wrapping;
use std::ops::{Add, AddAssign, BitAnd, Mul, MulAssign, Neg, Shr, Sub, SubAssign};
use subtle::{Choice, ConditionallySelectable, ConstantTimeEq, CtOption};
const MODULUS_R: Wrapping<u32> = Wrapping(64513);
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub struct Fr(Wrapping<u32>);
impl Default for Fr {
fn default() -> Self {
<Fr as Field>::zero()
}
}
impl ConstantTimeEq for Fr {
fn ct_eq(&self, other: &Fr) -> Choice {
(self.0).0.ct_eq(&(other.0).0)
}
}
impl fmt::Display for Fr {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
write!(f, "{}", (self.0).0)
}
}
impl From<u64> for Fr {
fn from(v: u64) -> Fr {
Fr(Wrapping((v % MODULUS_R.0 as u64) as u32))
}
}
impl ConditionallySelectable for Fr {
fn conditional_select(a: &Self, b: &Self, choice: Choice) -> Self {
Fr(Wrapping(u32::conditional_select(
&(a.0).0,
&(b.0).0,
choice,
)))
}
}
impl Sum for Fr {
fn sum<I: Iterator<Item = Self>>(iter: I) -> Self {
iter.fold(Self::zero(), ::std::ops::Add::add)
}
}
impl<'r> Sum<&'r Fr> for Fr {
fn sum<I: Iterator<Item = &'r Fr>>(iter: I) -> Self {
iter.fold(Self::zero(), ::std::ops::Add::add)
}
}
impl Neg for Fr {
type Output = Self;
fn neg(mut self) -> Self {
if !<Fr as Field>::is_zero_vartime(&self) {
self.0 = MODULUS_R - self.0;
}
self
}
}
impl<'r> Add<&'r Fr> for Fr {
type Output = Self;
fn add(self, other: &Self) -> Self {
let mut ret = self;
AddAssign::add_assign(&mut ret, other);
ret
}
}
impl Add for Fr {
type Output = Self;
#[allow(clippy::op_ref)]
fn add(self, other: Self) -> Self {
self + &other
}
}
impl<'r> AddAssign<&'r Fr> for Fr {
fn add_assign(&mut self, other: &Self) {
self.0 = (self.0 + other.0) % MODULUS_R;
}
}
impl AddAssign for Fr {
fn add_assign(&mut self, other: Self) {
AddAssign::add_assign(self, &other);
}
}
impl<'r> Sub<&'r Fr> for Fr {
type Output = Self;
fn sub(self, other: &Self) -> Self {
let mut ret = self;
SubAssign::sub_assign(&mut ret, other);
ret
}
}
impl Sub for Fr {
type Output = Self;
#[allow(clippy::op_ref)]
fn sub(self, other: Self) -> Self {
self - &other
}
}
impl<'r> SubAssign<&'r Fr> for Fr {
fn sub_assign(&mut self, other: &Self) {
self.0 = ((MODULUS_R + self.0) - other.0) % MODULUS_R;
}
}
impl SubAssign for Fr {
fn sub_assign(&mut self, other: Self) {
SubAssign::sub_assign(self, &other);
}
}
impl<'r> Mul<&'r Fr> for Fr {
type Output = Self;
fn mul(self, other: &Self) -> Self {
let mut ret = self;
MulAssign::mul_assign(&mut ret, other);
ret
}
}
impl Mul for Fr {
type Output = Self;
#[allow(clippy::op_ref)]
fn mul(self, other: Self) -> Self {
self * &other
}
}
impl<'r> MulAssign<&'r Fr> for Fr {
fn mul_assign(&mut self, other: &Self) {
self.0 = (self.0 * other.0) % MODULUS_R;
}
}
impl MulAssign for Fr {
fn mul_assign(&mut self, other: Self) {
MulAssign::mul_assign(self, &other);
}
}
impl BitAnd<u64> for Fr {
type Output = u64;
fn bitand(self, rhs: u64) -> u64 {
(self.0).0 as u64 & rhs
}
}
impl Shr<u32> for Fr {
type Output = Fr;
fn shr(mut self, rhs: u32) -> Fr {
self.0 = Wrapping((self.0).0 >> rhs);
self
}
}
impl Field for Fr {
fn random(mut rng: impl RngCore) -> Self {
Fr(Wrapping(rng.next_u32()) % MODULUS_R)
}
fn zero() -> Self {
Fr(Wrapping(0))
}
fn one() -> Self {
Fr(Wrapping(1))
}
fn is_zero(&self) -> Choice {
(self.0).0.ct_eq(&0)
}
fn square(&self) -> Self {
Fr((self.0 * self.0) % MODULUS_R)
}
fn double(&self) -> Self {
Fr((self.0 << 1) % MODULUS_R)
}
fn invert(&self) -> CtOption<Self> {
CtOption::new(
self.pow_vartime(&[(MODULUS_R.0 as u64) - 2]),
!<Fr as Field>::is_zero(self),
)
}
#[allow(clippy::many_single_char_names)]
fn sqrt(&self) -> CtOption<Self> {
// Tonelli-Shank's algorithm for q mod 16 = 1
// https://eprint.iacr.org/2012/685.pdf (page 12, algorithm 5)
let mut c = Fr::root_of_unity();
// r = self^((t + 1) // 2)
let mut r = self.pow_vartime([32u64]);
// t = self^t
let mut t = self.pow_vartime([63u64]);
let mut m = Fr::S;
while t != <Fr as Field>::one() {
let mut i = 1;
{
let mut t2i = t.square();
loop {
if t2i == <Fr as Field>::one() {
break;
}
t2i = t2i.square();
i += 1;
}
}
for _ in 0..(m - i - 1) {
c = c.square();
}
MulAssign::mul_assign(&mut r, &c);
c = c.square();
MulAssign::mul_assign(&mut t, &c);
m = i;
}
CtOption::new(r, (r * r).ct_eq(self))
}
}
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
pub struct FrRepr([u8; 8]);
impl From<Fr> for FrRepr {
fn from(v: Fr) -> FrRepr {
FrRepr::from(&v)
}
}
impl<'a> From<&'a Fr> for FrRepr {
fn from(v: &'a Fr) -> FrRepr {
FrRepr(((v.0).0 as u64).to_le_bytes())
}
}
impl AsMut<[u8]> for FrRepr {
fn as_mut(&mut self) -> &mut [u8] {
&mut self.0[..]
}
}
impl AsRef<[u8]> for FrRepr {
fn as_ref(&self) -> &[u8] {
&self.0[..]
}
}
impl Default for FrRepr {
fn default() -> FrRepr {
FrRepr([0; 8])
}
}
impl PrimeField for Fr {
type Repr = FrRepr;
const NUM_BITS: u32 = 16;
const CAPACITY: u32 = 15;
const S: u32 = 10;
fn from_repr(repr: FrRepr) -> CtOption<Self> {
let v = u64::from_le_bytes(repr.0);
let is_some = Choice::from(if v >= (MODULUS_R.0 as u64) { 0 } else { 1 });
CtOption::new(Fr(Wrapping(v as u32)), is_some)
}
fn to_repr(&self) -> FrRepr {
FrRepr::from(*self)
}
fn is_odd(&self) -> Choice {
Choice::from(((self.0).0 % 2) as u8)
}
fn multiplicative_generator() -> Fr {
Fr(Wrapping(5))
}
fn root_of_unity() -> Fr {
Fr(Wrapping(57751))
}
}
impl PrimeFieldBits for Fr {
type ReprBits = u64;
fn to_le_bits(&self) -> FieldBits<Self::ReprBits> {
FieldBits::new((self.0).0 as u64)
}
fn char_le_bits() -> FieldBits<Self::ReprBits> {
FieldBits::new(MODULUS_R.0 as u64)
}
}
#[derive(Clone, Debug)]
pub struct DummyEngine;
impl Engine for DummyEngine {
type Fr = Fr;
type G1 = Fr;
type G1Affine = Fr;
type G2 = Fr;
type G2Affine = Fr;
// TODO: This should be F_645131 or something. Doesn't matter for now.
type Gt = Fr;
fn pairing(p: &Self::G1Affine, q: &Self::G2Affine) -> Self::Gt {
Self::multi_miller_loop(&[(p, &(*q))]).final_exponentiation()
}
}
impl MultiMillerLoop for DummyEngine {
type G2Prepared = Fr;
// TODO: This should be F_645131 or something. Doesn't matter for now.
type Result = Fr;
fn multi_miller_loop(terms: &[(&Self::G1Affine, &Self::G2Prepared)]) -> Self::Result {
let mut acc = <Fr as Field>::zero();
for &(a, b) in terms {
let mut tmp = *a;
MulAssign::mul_assign(&mut tmp, b);
AddAssign::add_assign(&mut acc, &tmp);
}
acc
}
}
impl MillerLoopResult for Fr {
type Gt = Fr;
/// Perform final exponentiation of the result of a miller loop.
fn final_exponentiation(&self) -> Self::Gt {
*self
}
}
impl Group for Fr {
type Scalar = Fr;
fn random(rng: impl RngCore) -> Self {
<Fr as Field>::random(rng)
}
fn identity() -> Self {
<Fr as Field>::zero()
}
fn generator() -> Self {
<Fr as Field>::one()
}
fn is_identity(&self) -> Choice {
<Fr as Field>::is_zero(self)
}
fn double(&self) -> Self {
<Fr as Field>::double(self)
}
}
impl PrimeGroup for Fr {}
impl Curve for Fr {
type AffineRepr = Fr;
fn to_affine(&self) -> Fr {
*self
}
}
impl WnafGroup for Fr {
fn recommended_wnaf_for_num_scalars(_: usize) -> usize {
3
}
}
impl PrimeCurve for Fr {
type Affine = Fr;
}
#[derive(Copy, Clone, Default)]
pub struct FakePoint;
impl AsMut<[u8]> for FakePoint {
fn as_mut(&mut self) -> &mut [u8] {
unimplemented!()
}
}
impl AsRef<[u8]> for FakePoint {
fn as_ref(&self) -> &[u8] {
unimplemented!()
}
}
impl PrimeCurveAffine for Fr {
type Curve = Fr;
type Scalar = Fr;
fn identity() -> Self {
<Fr as Field>::zero()
}
fn generator() -> Self {
<Fr as Field>::one()
}
fn is_identity(&self) -> Choice {
<Fr as Field>::is_zero(self)
}
fn to_curve(&self) -> Self::Curve {
*self
}
}
impl GroupEncoding for Fr {
type Repr = [u8; 4];
fn from_bytes(_bytes: &Self::Repr) -> CtOption<Self> {
unimplemented!()
}
fn from_bytes_unchecked(_bytes: &Self::Repr) -> CtOption<Self> {
unimplemented!()
}
fn to_bytes(&self) -> Self::Repr {
(self.0).0.to_be_bytes()
}
}
impl UncompressedEncoding for Fr {
type Uncompressed = FakePoint;
fn from_uncompressed(_bytes: &Self::Uncompressed) -> CtOption<Self> {
unimplemented!()
}
fn from_uncompressed_unchecked(_bytes: &Self::Uncompressed) -> CtOption<Self> {
unimplemented!()
}
fn to_uncompressed(&self) -> Self::Uncompressed {
unimplemented!()
}
}
impl PairingCurveAffine for Fr {
type Pair = Fr;
type PairingResult = Fr;
fn pairing_with(&self, other: &Self::Pair) -> Self::PairingResult {
self.mul(*other)
}
}

View File

@@ -0,0 +1,617 @@
use ff::{Field, PrimeField};
mod dummy_engine;
use self::dummy_engine::*;
use std::marker::PhantomData;
use std::ops::{AddAssign, MulAssign, SubAssign};
use crate::random::{RandomCircuit, RandomConstraintSystem};
use crate::{Circuit, ConstraintSystem, SynthesisError};
use super::{create_proof, generate_parameters, prepare_verifying_key, verify_proof};
struct XorDemo<Scalar: PrimeField> {
a: Option<bool>,
b: Option<bool>,
_marker: PhantomData<Scalar>,
}
impl<Scalar: PrimeField> RandomCircuit<Scalar> for XorDemo<Scalar> {
fn synthesize<CS: RandomConstraintSystem<Scalar>>(
self,
cs: &mut CS,
) -> Result<(), SynthesisError> {
let a_var = cs.alloc(
|| "a",
|| {
if self.a.is_some() {
if self.a.unwrap() {
Ok(Scalar::one())
} else {
Ok(Scalar::zero())
}
} else {
Err(SynthesisError::AssignmentMissing)
}
},
)?;
cs.enforce(
|| "a_boolean_constraint",
|lc| lc + CS::one() - a_var,
|lc| lc + a_var,
|lc| lc,
);
let b_var = cs.alloc(
|| "b",
|| {
if self.b.is_some() {
if self.b.unwrap() {
Ok(Scalar::one())
} else {
Ok(Scalar::zero())
}
} else {
Err(SynthesisError::AssignmentMissing)
}
},
)?;
cs.enforce(
|| "b_boolean_constraint",
|lc| lc + CS::one() - b_var,
|lc| lc + b_var,
|lc| lc,
);
let c_var = cs.alloc_input(
|| "c",
|| {
if self.a.is_some() && self.b.is_some() {
if self.a.unwrap() ^ self.b.unwrap() {
Ok(Scalar::one())
} else {
Ok(Scalar::zero())
}
} else {
Err(SynthesisError::AssignmentMissing)
}
},
)?;
cs.enforce(
|| "c_xor_constraint",
|lc| lc + a_var + a_var,
|lc| lc + b_var,
|lc| lc + a_var + b_var - c_var,
);
Ok(())
}
}
#[test]
fn test_xordemo() {
let g1 = Fr::one();
let g2 = Fr::one();
let alpha = Fr::from(48577);
let beta = Fr::from(22580);
let gamma = Fr::from(53332);
let delta = Fr::from(5481);
let deltap = Fr::from(432);
let tau = Fr::from(3673);
let params = {
let c = XorDemo {
a: None,
b: None,
_marker: PhantomData,
};
generate_parameters::<DummyEngine, _>(c, g1, g2, alpha, beta, gamma, delta, deltap, tau)
.unwrap()
};
// This will synthesize the constraint system:
//
// public inputs: a_0 = 1, a_1 = c
// aux inputs: a_2 = a, a_3 = b
// constraints:
// (a_0 - a_2) * (a_2) = 0
// (a_0 - a_3) * (a_3) = 0
// (a_2 + a_2) * (a_3) = (a_2 + a_3 - a_1)
// (a_0) * 0 = 0
// (a_1) * 0 = 0
// The evaluation domain is 8. The H query should
// have 7 elements (it's a quotient polynomial)
assert_eq!(7, params.h.len());
let mut root_of_unity = Fr::root_of_unity();
// We expect this to be a 2^10 root of unity
assert_eq!(Fr::one(), root_of_unity.pow_vartime(&[1u64 << 10]));
// Let's turn it into a 2^3 root of unity.
root_of_unity = root_of_unity.pow_vartime(&[1u64 << 7]);
assert_eq!(Fr::one(), root_of_unity.pow_vartime(&[1u64 << 3]));
assert_eq!(Fr::from(20201), root_of_unity);
// Let's compute all the points in our evaluation domain.
let mut points = Vec::with_capacity(8);
for i in 0u64..8 {
points.push(root_of_unity.pow_vartime(&[i]));
}
// Let's compute t(tau) = (tau - p_0)(tau - p_1)...
// = tau^8 - 1
let mut t_at_tau = tau.pow_vartime(&[8u64]);
t_at_tau.sub_assign(&Fr::one());
{
let mut tmp = Fr::one();
for p in &points {
let mut term = tau;
term.sub_assign(p);
tmp.mul_assign(&term);
}
assert_eq!(tmp, t_at_tau);
}
// We expect our H query to be 7 elements of the form...
// {tau^i t(tau) / delta}
let delta_inverse = delta.invert().unwrap();
let deltap_inverse = deltap.invert().unwrap();
let gamma_inverse = gamma.invert().unwrap();
{
let mut coeff = delta_inverse;
coeff.mul_assign(&t_at_tau);
let mut cur = Fr::one();
for h in params.h.iter() {
let mut tmp = cur;
tmp.mul_assign(&coeff);
assert_eq!(*h, tmp);
cur.mul_assign(&tau);
}
}
// The density of the IC query is 2 (2 inputs)
assert_eq!(2, params.vk.ic.len());
// The density of the J query is 2 (2 aux variables)
assert_eq!(2, params.j.len());
// The density of the L query is 2 (2 aux variables)
assert_eq!(0, params.l.len());
// The density of the A query is 4 (each variable is in at least one A term)
assert_eq!(4, params.a.len());
// The density of the B query is 2 (two variables are in at least one B term)
assert_eq!(2, params.b_g1.len());
assert_eq!(2, params.b_g2.len());
/*
Lagrange interpolation polynomials in our evaluation domain:
,-------------------------------. ,-------------------------------. ,-------------------------------.
| A TERM | | B TERM | | C TERM |
`-------------------------------. `-------------------------------' `-------------------------------'
| a_0 | a_1 | a_2 | a_3 | | a_0 | a_1 | a_2 | a_3 | | a_0 | a_1 | a_2 | a_3 |
| 1 | 0 | 64512 | 0 | | 0 | 0 | 1 | 0 | | 0 | 0 | 0 | 0 |
| 1 | 0 | 0 | 64512 | | 0 | 0 | 0 | 1 | | 0 | 0 | 0 | 0 |
| 0 | 0 | 2 | 0 | | 0 | 0 | 0 | 1 | | 0 | 64512 | 1 | 1 |
| 1 | 0 | 0 | 0 | | 0 | 0 | 0 | 0 | | 0 | 0 | 0 | 0 |
| 0 | 1 | 0 | 0 | | 0 | 0 | 0 | 0 | | 0 | 0 | 0 | 0 |
`-------'-------'-------'-------' `-------'-------'-------'-------' `-------'-------'-------'-------'
Example for u_0:
sage: r = 64513
sage: Fr = GF(r)
sage: omega = (Fr(5)^63)^(2^7)
sage: tau = Fr(3673)
sage: R.<x> = PolynomialRing(Fr, 'x')
sage: def eval(tau, c0, c1, c2, c3, c4):
....: p = R.lagrange_polynomial([(omega^0, c0), (omega^1, c1), (omega^2, c2), (omega^3, c3), (omega^4, c4), (omega^5, 0), (omega^6, 0), (omega^7, 0)])
....: return p.substitute(tau)
sage: eval(tau, 1, 1, 0, 1, 0)
59158
*/
let u_i = [59158, 48317, 21767, 10402]
.iter()
.map(|e| Fr::from(*e))
.collect::<Vec<Fr>>();
let v_i = [0, 0, 60619, 30791]
.iter()
.map(|e| Fr::from(*e))
.collect::<Vec<Fr>>();
let w_i = [0, 23320, 41193, 41193]
.iter()
.map(|e| Fr::from(*e))
.collect::<Vec<Fr>>();
for (u, a) in u_i.iter().zip(&params.a[..]) {
assert_eq!(u, a);
}
for (v, b) in v_i
.iter()
.filter(|&&e| e != Fr::zero())
.zip(&params.b_g1[..])
{
assert_eq!(v, b);
}
for (v, b) in v_i
.iter()
.filter(|&&e| e != Fr::zero())
.zip(&params.b_g2[..])
{
assert_eq!(v, b);
}
for i in 0..4 {
let mut tmp1 = beta;
tmp1.mul_assign(&u_i[i]);
let mut tmp2 = alpha;
tmp2.mul_assign(&v_i[i]);
tmp1.add_assign(&tmp2);
tmp1.add_assign(&w_i[i]);
if i < 2 {
// Check the correctness of the IC query elements
tmp1.mul_assign(&gamma_inverse);
assert_eq!(tmp1, params.vk.ic[i]);
} else {
// Check the correctness of the L query elements
tmp1.mul_assign(&deltap_inverse);
assert_eq!(tmp1, params.j[i - 2]);
}
}
// Check consistency of the other elements
assert_eq!(alpha, params.vk.alpha_g1);
assert_eq!(beta, params.vk.beta_g1);
assert_eq!(beta, params.vk.beta_g2);
assert_eq!(gamma, params.vk.gamma_g2);
assert_eq!(delta, params.vk.delta_g1);
assert_eq!(delta, params.vk.delta_g2);
let pvk = prepare_verifying_key(&params.vk);
let r = Fr::from(27134);
let s = Fr::from(17146);
let t = Fr::from(18231);
let proof = {
let c = XorDemo {
a: Some(true),
b: Some(false),
_marker: PhantomData,
};
create_proof(c, &params, r, s, t).unwrap()
};
// A(x) =
// a_0 * (44865*x^7 + 56449*x^6 + 44865*x^5 + 8064*x^4 + 3520*x^3 + 56449*x^2 + 3520*x + 40321) +
// a_1 * (8064*x^7 + 56449*x^6 + 8064*x^5 + 56449*x^4 + 8064*x^3 + 56449*x^2 + 8064*x + 56449) +
// a_2 * (16983*x^7 + 24192*x^6 + 63658*x^5 + 56449*x^4 + 16983*x^3 + 24192*x^2 + 63658*x + 56449) +
// a_3 * (5539*x^7 + 27797*x^6 + 6045*x^5 + 56449*x^4 + 58974*x^3 + 36716*x^2 + 58468*x + 8064) +
{
// proof A = alpha + A(tau) + delta * r
let mut expected_a = delta;
expected_a.mul_assign(&r);
expected_a.add_assign(&alpha);
expected_a.add_assign(&u_i[0]); // a_0 = 1
expected_a.add_assign(&u_i[1]); // a_1 = 1
expected_a.add_assign(&u_i[2]); // a_2 = 1
// a_3 = 0
assert_eq!(proof.a, expected_a);
}
// B(x) =
// a_0 * (0) +
// a_1 * (0) +
// a_2 * (56449*x^7 + 56449*x^6 + 56449*x^5 + 56449*x^4 + 56449*x^3 + 56449*x^2 + 56449*x + 56449) +
// a_3 * (31177*x^7 + 44780*x^6 + 21752*x^5 + 42255*x^3 + 35861*x^2 + 33842*x + 48385)
{
// proof B = beta + B(tau) + delta * s
let mut expected_b = delta;
expected_b.mul_assign(&s);
expected_b.add_assign(&beta);
expected_b.add_assign(&v_i[0]); // a_0 = 1
expected_b.add_assign(&v_i[1]); // a_1 = 1
expected_b.add_assign(&v_i[2]); // a_2 = 1
// a_3 = 0
assert_eq!(proof.b, expected_b);
}
//// C(x) =
//// a_0 * (0) +
//// a_1 * (27797*x^7 + 56449*x^6 + 36716*x^5 + 8064*x^4 + 27797*x^3 + 56449*x^2 + 36716*x + 8064) +
//// a_2 * (36716*x^7 + 8064*x^6 + 27797*x^5 + 56449*x^4 + 36716*x^3 + 8064*x^2 + 27797*x + 56449) +
//// a_3 * (36716*x^7 + 8064*x^6 + 27797*x^5 + 56449*x^4 + 36716*x^3 + 8064*x^2 + 27797*x + 56449)
////
//// If A * B = C at each point in the domain, then the following polynomial...
//// P(x) = A(x) * B(x) - C(x)
//// = 49752*x^14 + 13914*x^13 + 29243*x^12 + 27227*x^11 + 62362*x^10 + 35703*x^9 + 4032*x^8 + 14761*x^6 + 50599*x^5 + 35270*x^4 + 37286*x^3 + 2151*x^2 + 28810*x + 60481
////
//// ... should be divisible by t(x), producing the quotient polynomial:
//// h(x) = P(x) / t(x)
//// = 49752*x^6 + 13914*x^5 + 29243*x^4 + 27227*x^3 + 62362*x^2 + 35703*x + 4032
//{
// let mut expected_c = Fr::zero();
// // A * s
// let mut tmp = proof.a;
// tmp.mul_assign(&s);
// expected_c.add_assign(&tmp);
// // B * r
// let mut tmp = proof.b;
// tmp.mul_assign(&r);
// expected_c.add_assign(&tmp);
// // delta * r * s
// let mut tmp = delta;
// tmp.mul_assign(&r);
// tmp.mul_assign(&s);
// expected_c.sub_assign(&tmp);
// // L query answer
// // a_2 = 1, a_3 = 0
// expected_c.add_assign(&params.j[0]);
// // H query answer
// for (i, coeff) in [5040, 11763, 10755, 63633, 128, 9747, 8739]
// .iter()
// .enumerate()
// {
// let coeff = Fr::from(*coeff);
// let mut tmp = params.h[i];
// tmp.mul_assign(&coeff);
// expected_c.add_assign(&tmp);
// }
// assert_eq!(expected_c, proof.c);
//}
assert!(verify_proof(&pvk, &proof, &[Fr::one()]).is_ok());
}
struct MultWithZeroCoeffs<F> {
a: Option<F>,
b: Option<F>,
c: Option<F>,
/// Whether to attach the zero coefficient to the "1" variable, or a different variable.
one_var: bool,
}
impl<F: ff::PrimeField> RandomCircuit<F> for &MultWithZeroCoeffs<F> {
fn synthesize<CS: RandomConstraintSystem<F>>(self, cs: &mut CS) -> Result<(), SynthesisError> {
let a = cs.alloc(|| "a", || Ok(self.a.unwrap()))?;
let b = cs.alloc(|| "b", || Ok(self.b.unwrap()))?;
let c = cs.alloc(|| "c", || Ok(self.c.unwrap()))?;
if self.one_var {
cs.enforce(
|| "cs",
// notice the zero coefficient on the B term
|z| z + a,
|z| z + (F::from(0), CS::one()) + b,
|z| z + c,
);
} else {
cs.enforce(
|| "cs",
// notice the zero coefficient on the B term
|z| z + a,
|z| z + (F::from(0), a) + b,
|z| z + c,
);
}
Ok(())
}
}
fn zero_coeff_test(one_var: bool) {
let m = MultWithZeroCoeffs {
a: Some(Fr::from(5)),
b: Some(Fr::from(6)),
c: Some(Fr::from(30)),
one_var,
};
let g1 = Fr::one();
let g2 = Fr::one();
let alpha = Fr::from(48577);
let beta = Fr::from(22580);
let gamma = Fr::from(53332);
let delta = Fr::from(5481);
let deltap = Fr::from(481);
let tau = Fr::from(3673);
let pk =
generate_parameters::<DummyEngine, _>(&m, g1, g2, alpha, beta, gamma, delta, deltap, tau)
.unwrap();
let r = Fr::from(27134);
let s = Fr::from(17146);
let t = Fr::from(18231);
let pf = create_proof(&m, &pk, r, s, t).unwrap();
let pvk = prepare_verifying_key(&pk.vk);
verify_proof(&pvk, &pf, &[]).unwrap();
}
#[test]
fn zero_coeff_one_var() {
zero_coeff_test(true);
}
#[test]
fn zero_coeff_non_one_var() {
zero_coeff_test(false);
}
struct SingleRand<F> {
a: F,
b: F,
}
impl<F: ff::PrimeField> RandomCircuit<F> for &SingleRand<F> {
fn synthesize<CS: RandomConstraintSystem<F>>(self, cs: &mut CS) -> Result<(), SynthesisError> {
let a = cs.alloc(|| "a", || Ok(self.a))?;
let b = cs.alloc(|| "b", || Ok(self.b))?;
let (coin_var, coin) = cs.alloc_random_coin(|| "alpha")?;
//cs.alloc_dependent(|| "za", coin, |val| Ok(val - self.a));
//cs.alloc_dependent(|| "zb", coin, |val| Ok(val - self.b));
//let c = cs.alloc(|| "c", || Ok(self.c.unwrap()))?;
cs.enforce(
|| "cs",
|z| z - coin_var - a,
|z| z + CS::one(),
|z| z - coin_var - b,
);
Ok(())
}
}
#[test]
fn single_rand_test() {
let m = SingleRand {
a: Fr::from(5),
b: Fr::from(5),
};
let g1 = Fr::one();
let g2 = Fr::one();
let alpha = Fr::from(48577);
let beta = Fr::from(22580);
let gamma = Fr::from(53332);
let delta = Fr::from(5481);
let deltap = Fr::from(481);
let tau = Fr::from(3673);
let pk =
generate_parameters::<DummyEngine, _>(&m, g1, g2, alpha, beta, gamma, delta, deltap, tau)
.unwrap();
let r = Fr::from(27134);
let s = Fr::from(17146);
let t = Fr::from(18231);
let pf = create_proof(&m, &pk, r, s, t).unwrap();
let pvk = prepare_verifying_key(&pk.vk);
verify_proof(&pvk, &pf, &[]).unwrap();
}
struct PermutationWithCoin<F> {
a_vals: Vec<F>,
b_vals: Vec<F>,
}
impl<F: ff::PrimeField> RandomCircuit<F> for &PermutationWithCoin<F> {
fn synthesize<CS: RandomConstraintSystem<F>>(self, cs: &mut CS) -> Result<(), SynthesisError> {
assert!(self.a_vals.len() == self.b_vals.len());
let mut a_vars = vec![];
let mut b_vars = vec![];
for (i, a) in self.a_vals.iter().enumerate() {
a_vars.push(cs.alloc(|| format!("{}_{}", "a", i), || Ok(*a))?);
}
for (i, b) in self.b_vals.iter().enumerate() {
b_vars.push(cs.alloc(|| format!("{}_{}", "b", i), || Ok(*b))?);
}
let (coin_var, coin) = cs.alloc_random_coin(|| "alpha")?;
let mut last_acc_var = CS::one();
let mut last_acc_val = F::one();
let mut this_acc_val = F::one();
for (i, a) in self.a_vals.iter().enumerate() {
let this_acc_var = cs.alloc(
|| format!("{}_{}", "a_inds", i),
|| {
this_acc_val = last_acc_val * (coin - a);
Ok(this_acc_val)
},
)?;
cs.enforce(
|| "cs",
// notice the zero coefficient on the B term
|z| z + last_acc_var,
|z| z + coin_var - a_vars[i],
|z| z + this_acc_var,
);
last_acc_var = this_acc_var;
last_acc_val = this_acc_val;
}
let a_hash_var = last_acc_var;
last_acc_var = CS::one();
last_acc_val = F::one();
for (i, b) in self.b_vals.iter().enumerate() {
let this_acc_var = cs.alloc(
|| format!("{}_{}", "b_inds", i),
|| {
this_acc_val = last_acc_val * (coin - b);
Ok(this_acc_val)
},
)?;
cs.enforce(
|| "cs",
// notice the zero coefficient on the B term
|z| z + last_acc_var,
|z| z + coin_var - b_vars[i],
|z| z + this_acc_var,
);
last_acc_var = this_acc_var;
last_acc_val = this_acc_val;
}
let b_hash_var = last_acc_var;
cs.enforce(
|| format!("hashes eq"),
|z| z + a_hash_var,
|z| z + CS::one(),
|z| z + b_hash_var,
);
Ok(())
}
}
#[test]
fn perm_rand_test() {
let m = PermutationWithCoin {
a_vals: vec![Fr::from(5), Fr::from(20), Fr::from(30), Fr::from(50)],
b_vals: vec![Fr::from(50), Fr::from(5), Fr::from(30), Fr::from(20)],
};
let g1 = Fr::one();
let g2 = Fr::one();
let alpha = Fr::from(48577);
let beta = Fr::from(22580);
let gamma = Fr::from(53332);
let delta = Fr::from(5481);
let deltap = Fr::from(481);
let tau = Fr::from(3673);
let pk =
generate_parameters::<DummyEngine, _>(&m, g1, g2, alpha, beta, gamma, delta, deltap, tau);
match pk {
Err(ref e) => println!("error: {:?}", e),
_ => {}
}
let pk = pk.unwrap();
let r = Fr::from(27134);
let s = Fr::from(17146);
let t = Fr::from(18231);
let pf = create_proof(&m, &pk, r, s, t).unwrap();
let pvk = prepare_verifying_key(&pk.vk);
verify_proof(&pvk, &pf, &[]).unwrap();
}

View File

@@ -0,0 +1,84 @@
use group::{prime::PrimeCurveAffine, Curve};
use pairing::{MillerLoopResult, MultiMillerLoop};
use std::ops::{AddAssign, Neg};
use super::{PreparedVerifyingKey, Proof, VerifyingKey};
use crate::VerificationError;
pub mod batch;
pub fn prepare_verifying_key<E: MultiMillerLoop>(vk: &VerifyingKey<E>) -> PreparedVerifyingKey<E> {
let gamma = vk.gamma_g2.neg();
let delta = vk.delta_g2.neg();
let deltap = vk.deltap_g2.neg();
PreparedVerifyingKey {
alpha_g1_beta_g2: E::pairing(&vk.alpha_g1, &vk.beta_g2),
neg_gamma_g2: gamma.into(),
neg_delta_g2: delta.into(),
neg_deltap_g2: deltap.into(),
ic: vk.ic.clone(),
num_coins: vk.num_coins,
}
}
pub fn verify_proof<'a, E: MultiMillerLoop>(
pvk: &'a PreparedVerifyingKey<E>,
proof: &Proof<E>,
public_inputs: &[E::Fr],
) -> Result<(), VerificationError> {
if (public_inputs.len() + pvk.num_coins + 1) != pvk.ic.len() {
println!(
"got {} inputs and {} coins, expect total to be {}",
public_inputs.len(),
pvk.num_coins,
pvk.ic.len() - 1
);
return Err(VerificationError::InvalidVerifyingKey);
}
let mut all_inputs = public_inputs.to_vec();
// re-generate proof coins
for i in 0..pvk.num_coins {
use ff::Field;
// we use 1 when computing coins... so add it back...
let coin = super::compute_coin::<E, E::Fr>(
&[&[E::Fr::one()][..], &public_inputs].concat(),
proof.d,
i,
);
all_inputs.push(coin);
//assert_eq!(coin, *c);
}
let mut acc = pvk.ic[0].to_curve();
//let all_inputs = [public_inputs, &proof.coins].concat();
for (i, b) in all_inputs.iter().zip(pvk.ic.iter().skip(1)) {
AddAssign::<&E::G1>::add_assign(&mut acc, &(*b * i));
}
// The original verification equation is:
// A * B = alpha * beta + inputs * gamma + C * delta
// ... however, we rearrange it so that it is:
// A * B - inputs * gamma - C * delta = alpha * beta
// or equivalently:
// A * B + inputs * (-gamma) + C * (-delta) = alpha * beta
// which allows us to do a single final exponentiation.
if pvk.alpha_g1_beta_g2
== E::multi_miller_loop(&[
(&proof.a, &proof.b.into()),
(&acc.to_affine(), &pvk.neg_gamma_g2),
(&proof.c, &pvk.neg_delta_g2),
(&proof.d, &pvk.neg_deltap_g2),
])
.final_exponentiation()
{
Ok(())
} else {
Err(VerificationError::InvalidProof)
}
}

View File

@@ -0,0 +1,289 @@
//! Performs batch Groth16 proof verification.
//!
//! Batch verification asks whether *all* proofs in some set are valid,
//! rather than asking whether *each* of them is valid. This allows sharing
//! computations among all proof verifications, performing less work overall
//! at the cost of higher latency (the entire batch must complete), complexity of
//! caller code (which must assemble a batch of proofs across work-items),
//! and loss of the ability to easily pinpoint failing proofs.
//!
//! This batch verification implementation is non-adaptive, in the sense that it
//! assumes that all the proofs in the batch are verifiable by the same
//! `VerifyingKey`. The reason is that if you have different proof statements,
//! you need to specify which statement you are proving, which means that you
//! need to refer to or lookup a particular `VerifyingKey`. In practice, with
//! large enough batches, it's manageable and not much worse performance-wise to
//! keep batches of each statement type, vs one large adaptive batch.
use std::ops::AddAssign;
use ff::Field;
use group::{Curve, Group};
use pairing::{MillerLoopResult, MultiMillerLoop};
use rand_core::{CryptoRng, RngCore};
#[cfg(feature = "multicore")]
use rand_core::OsRng;
#[cfg(feature = "multicore")]
use rayon::{iter::ParallelIterator, prelude::ParallelSlice};
use crate::{
mirage::{PreparedVerifyingKey, Proof, VerifyingKey},
VerificationError,
};
/// A batch verification item.
///
/// This struct exists to allow batch processing to be decoupled from the
/// lifetime of the message. This is useful when using the batch verification
/// API in an async context.
#[derive(Clone, Debug)]
pub struct Item<E: MultiMillerLoop> {
proof: Proof<E>,
inputs: Vec<E::Fr>,
}
impl<E: MultiMillerLoop> From<(&Proof<E>, &[E::Fr])> for Item<E> {
fn from((proof, inputs): (&Proof<E>, &[E::Fr])) -> Self {
(proof.clone(), inputs.to_owned()).into()
}
}
impl<E: MultiMillerLoop> From<(Proof<E>, Vec<E::Fr>)> for Item<E> {
fn from((proof, inputs): (Proof<E>, Vec<E::Fr>)) -> Self {
Self { proof, inputs }
}
}
impl<E: MultiMillerLoop> Item<E> {
/// Perform non-batched verification of this `Item`.
///
/// This is useful (in combination with `Item::clone`) for implementing
/// fallback logic when batch verification fails.
pub fn verify_single(self, pvk: &PreparedVerifyingKey<E>) -> Result<(), VerificationError> {
super::verify_proof(pvk, &self.proof, &self.inputs)
}
}
/// A batch verification context.
///
/// In practice, you would create a batch verifier for each proof statement
/// requiring the same `VerifyingKey`.
#[derive(Debug)]
pub struct Verifier<E: MultiMillerLoop> {
items: Vec<Item<E>>,
}
// Need to impl Default by hand to avoid a derived E: Default bound
impl<E: MultiMillerLoop> Default for Verifier<E> {
fn default() -> Self {
Self { items: Vec::new() }
}
}
impl<E: MultiMillerLoop> Verifier<E>
where
E::G1: AddAssign<E::G1>,
{
/// Construct a new batch verifier.
pub fn new() -> Self {
Self::default()
}
/// Queue a (proof, inputs) tuple for verification.
pub fn queue<I: Into<Item<E>>>(&mut self, item: I) {
self.items.push(item.into())
}
/// Perform batch verification with a particular `VerifyingKey`, returning
/// `Ok(())` if all proofs were verified and `VerificationError` otherwise.
#[allow(non_snake_case)]
pub fn verify<R: RngCore + CryptoRng>(
self,
mut rng: R,
vk: &VerifyingKey<E>,
) -> Result<(), VerificationError> {
if self
.items
.iter()
.any(|Item { inputs, .. }| inputs.len() + 1 != vk.ic.len())
{
return Err(VerificationError::InvalidVerifyingKey);
}
let mut ml_terms = Vec::<(E::G1Affine, E::G2Prepared)>::new();
let mut acc_Gammas = vec![E::Fr::zero(); vk.ic.len()];
let mut acc_Delta = E::G1::identity();
let mut acc_Y = E::Fr::zero();
for Item { proof, inputs } in self.items.into_iter() {
// The spec is explicit that z != 0. Field::random is defined to
// return a uniformly-random field element (which may be 0), so we
// loop until it's not, avoiding needing an assert or throwing an
// error through no fault of the batch items. This will likely never
// actually loop, but handles the edge case.
let z = loop {
let z = E::Fr::random(&mut rng);
if !z.is_zero_vartime() {
break z;
}
};
ml_terms.push(((proof.a * z).into(), (-proof.b).into()));
acc_Gammas[0] += &z; // a_0 is implicitly set to 1
for (a_i, acc_Gamma_i) in Iterator::zip(inputs.iter(), acc_Gammas.iter_mut().skip(1)) {
*acc_Gamma_i += &(z * a_i);
}
acc_Delta += proof.c * z;
acc_Y += &z;
}
ml_terms.push((acc_Delta.to_affine(), E::G2Prepared::from(vk.delta_g2)));
let Psi = vk
.ic
.iter()
.zip(acc_Gammas.iter())
.map(|(&Psi_i, acc_Gamma_i)| Psi_i * acc_Gamma_i)
.sum();
ml_terms.push((E::G1Affine::from(Psi), E::G2Prepared::from(vk.gamma_g2)));
// Covers the [acc_Y]⋅e(alpha_g1, beta_g2) component
//
// The multiplication by acc_Y is expensive -- it involves
// exponentiating by acc_Y because the result of the pairing is an
// element of a multiplicative subgroup of a large extension field.
// Instead, we add
// ([acc_Y]⋅alpha_g1, beta_g2)
// to our Miller loop terms because
// [acc_Y]⋅e(alpha_g1, beta_g2) = e([acc_Y]⋅alpha_g1, beta_g2)
ml_terms.push((
E::G1Affine::from(vk.alpha_g1 * acc_Y),
E::G2Prepared::from(vk.beta_g2),
));
let ml_terms = ml_terms.iter().map(|(a, b)| (a, b)).collect::<Vec<_>>();
if E::multi_miller_loop(&ml_terms[..]).final_exponentiation() == E::Gt::identity() {
Ok(())
} else {
Err(VerificationError::InvalidProof)
}
}
/// Perform batch verification with a particular `VerifyingKey`, returning
/// `Ok(())` if all proofs were verified and `VerificationError` otherwise.
///
/// This performs the bulk of internal arithmetic over the global rayon
/// threadpool.
#[cfg(feature = "multicore")]
#[allow(non_snake_case)]
pub fn verify_multicore(self, vk: &VerifyingKey<E>) -> Result<(), VerificationError> {
if self
.items
.iter()
.any(|Item { inputs, .. }| inputs.len() + 1 != vk.ic.len())
{
return Err(VerificationError::InvalidVerifyingKey);
}
struct Accumulator<E: MultiMillerLoop> {
gammas: Vec<E::Fr>,
delta: E::G1,
y: E::Fr,
ml_result: Option<E::Result>,
}
impl<E: MultiMillerLoop> Accumulator<E> {
fn new(ic_len: usize) -> Self {
Accumulator {
gammas: vec![E::Fr::zero(); ic_len],
delta: E::G1::identity(),
y: E::Fr::zero(),
ml_result: None,
}
}
}
let ic_len = vk.ic.len();
let acc = self
.items
// This chunk size was obtained heuristically.
.par_chunks(8)
.map(|items| {
let mut acc = Accumulator::<E>::new(ic_len);
let mut ml_terms: Vec<(E::G1Affine, E::G2Prepared)> = vec![];
let z = loop {
let z = E::Fr::random(&mut OsRng);
if !z.is_zero_vartime() {
break z;
}
};
let mut cur_z = z;
for Item { proof, inputs } in items {
acc.gammas[0] += &cur_z;
for (a_i, acc_gamma_i) in
Iterator::zip(inputs.iter(), acc.gammas.iter_mut().skip(1))
{
*acc_gamma_i += &(cur_z * a_i);
}
acc.delta += proof.c * cur_z;
acc.y += &cur_z;
ml_terms.push(((proof.a * cur_z).into(), (-proof.b).into()));
cur_z *= z;
}
let ml_terms = ml_terms.iter().map(|(a, b)| (a, b)).collect::<Vec<_>>();
acc.ml_result = Some(E::multi_miller_loop(&ml_terms[..]));
acc
})
.reduce(
|| Accumulator::<E>::new(ic_len),
|mut a, b| {
for (a, b) in a.gammas.iter_mut().zip(b.gammas.into_iter()) {
*a += b;
}
a.delta += b.delta;
a.y += b.y;
a.ml_result = match (a.ml_result, b.ml_result) {
(Some(a), Some(b)) => Some(a + b),
(Some(a), None) | (None, Some(a)) => Some(a),
(None, None) => None,
};
a
},
);
match acc.ml_result {
None => Ok(()),
Some(mut ml_result) => {
// TODO: could use a multiexp (Bos-Coster maybe?)
let psi = vk
.ic
.iter()
.zip(acc.gammas.into_iter())
.map(|(&psi_i, acc_gamma_i)| psi_i * acc_gamma_i)
.sum();
ml_result += E::multi_miller_loop(&[
(&acc.delta.to_affine(), &E::G2Prepared::from(vk.delta_g2)),
(&E::G1Affine::from(psi), &E::G2Prepared::from(vk.gamma_g2)),
(
&E::G1Affine::from(vk.alpha_g1 * acc.y),
&E::G2Prepared::from(vk.beta_g2),
),
]);
if ml_result.final_exponentiation() == E::Gt::identity() {
Ok(())
} else {
Err(VerificationError::InvalidProof)
}
}
}
}
}

215
third_party/bellman/src/multicore.rs vendored Normal file
View File

@@ -0,0 +1,215 @@
//! An interface for dealing with the kinds of parallel computations involved in
//! `bellman`. It's currently just a thin wrapper around [`rayon`] but may be
//! extended in the future to allow for various parallelism strategies.
#[cfg(feature = "multicore")]
mod implementation {
use std::sync::atomic::{AtomicUsize, Ordering};
use crossbeam_channel::{bounded, Receiver};
use lazy_static::lazy_static;
use log::{error, trace};
use rayon::current_num_threads;
static WORKER_SPAWN_COUNTER: AtomicUsize = AtomicUsize::new(0);
lazy_static! {
// See Worker::compute below for a description of this.
static ref WORKER_SPAWN_MAX_COUNT: usize = current_num_threads() * 4;
}
#[derive(Clone, Default)]
pub struct Worker {}
impl Worker {
pub fn new() -> Worker {
Worker {}
}
pub fn log_num_threads(&self) -> u32 {
log2_floor(current_num_threads())
}
pub fn compute<F, R>(&self, f: F) -> Waiter<R>
where
F: FnOnce() -> R + Send + 'static,
R: Send + 'static,
{
let (sender, receiver) = bounded(1);
// We keep track here of how many times spawn has been called.
// It can be called without limit, each time, putting a
// request for a new thread to execute a method on the
// ThreadPool. However, if we allow it to be called without
// limits, we run the risk of memory exhaustion due to limited
// stack space consumed by all of the pending closures to be
// executed.
let previous_count = WORKER_SPAWN_COUNTER.fetch_add(1, Ordering::SeqCst);
// If the number of spawns requested has exceeded the number
// of cores available for processing by some factor (the
// default being 4), instead of requesting that we spawn a new
// thread, we instead execute the closure in the context of a
// scope call (which blocks the current thread) to help clear
// the growing work queue and minimize the chances of memory
// exhaustion.
if previous_count > *WORKER_SPAWN_MAX_COUNT {
let thread_index = rayon::current_thread_index().unwrap_or(0);
rayon::scope(move |_| {
trace!("[{}] switching to scope to help clear backlog [threads: current {}, requested {}]",
thread_index,
current_num_threads(),
WORKER_SPAWN_COUNTER.load(Ordering::SeqCst));
let res = f();
sender.send(res).unwrap();
WORKER_SPAWN_COUNTER.fetch_sub(1, Ordering::SeqCst);
});
} else {
rayon::spawn(move || {
let res = f();
sender.send(res).unwrap();
WORKER_SPAWN_COUNTER.fetch_sub(1, Ordering::SeqCst);
});
}
Waiter { receiver }
}
pub fn scope<'a, F, R>(&self, elements: usize, f: F) -> R
where
F: FnOnce(&rayon::Scope<'a>, usize) -> R + Send,
R: Send,
{
let num_threads = current_num_threads();
let chunk_size = if elements < num_threads {
1
} else {
elements / num_threads
};
rayon::scope(|scope| f(scope, chunk_size))
}
}
pub struct Waiter<T> {
receiver: Receiver<T>,
}
impl<T> Waiter<T> {
/// Consumes this waiter and blocks until the result is ready.
pub fn wait(self) -> T {
// This will be Some if this thread is in the global thread pool.
if rayon::current_thread_index().is_some() {
let msg = "wait() cannot be called from within a thread pool since that would lead to deadlocks";
// panic! doesn't necessarily kill the process, so we log as well.
error!("{}", msg);
panic!("{}", msg);
}
self.receiver.recv().unwrap()
}
/// One-off sending.
pub fn done(val: T) -> Self {
let (sender, receiver) = bounded(1);
sender.send(val).unwrap();
Waiter { receiver }
}
}
fn log2_floor(num: usize) -> u32 {
assert!(num > 0);
let mut pow = 0;
while (1 << (pow + 1)) <= num {
pow += 1;
}
pow
}
#[test]
fn test_log2_floor() {
assert_eq!(log2_floor(1), 0);
assert_eq!(log2_floor(2), 1);
assert_eq!(log2_floor(3), 1);
assert_eq!(log2_floor(4), 2);
assert_eq!(log2_floor(5), 2);
assert_eq!(log2_floor(6), 2);
assert_eq!(log2_floor(7), 2);
assert_eq!(log2_floor(8), 3);
}
}
#[cfg(not(feature = "multicore"))]
mod implementation {
#[derive(Clone)]
pub struct Worker;
impl Worker {
pub fn new() -> Worker {
Worker
}
pub fn log_num_threads(&self) -> u32 {
0
}
pub fn compute<F, R>(&self, f: F) -> Waiter<R>
where
F: FnOnce() -> R + Send + 'static,
R: Send + 'static,
{
Waiter::done(f())
}
pub fn scope<F, R>(&self, elements: usize, f: F) -> R
where
F: FnOnce(&DummyScope, usize) -> R,
{
f(&DummyScope, elements)
}
}
pub struct Waiter<T> {
val: Option<T>,
}
impl<T> Waiter<T> {
/// Consumes this waiter and blocks until the result is ready.
pub fn wait(mut self) -> T {
self.val.take().expect("unmet data dependency")
}
/// One-off sending.
pub fn done(val: T) -> Self {
Waiter { val: Some(val) }
}
}
pub struct DummyScope;
impl DummyScope {
pub fn spawn<F: FnOnce(&DummyScope)>(&self, f: F) {
f(self);
}
}
/// A fake rayon ParallelIterator that is just a serial iterator.
pub(crate) trait FakeParallelIterator {
type Iter: Iterator<Item = Self::Item>;
type Item: Send;
fn into_par_iter(self) -> Self::Iter;
}
impl FakeParallelIterator for core::ops::Range<u32> {
type Iter = Self;
type Item = u32;
fn into_par_iter(self) -> Self::Iter {
self
}
}
}
pub use self::implementation::*;

386
third_party/bellman/src/multiexp.rs vendored Normal file
View File

@@ -0,0 +1,386 @@
use super::multicore::{Waiter, Worker};
use bitvec::vec::BitVec;
use ff::{FieldBits, PrimeField, PrimeFieldBits};
use group::prime::{PrimeCurve, PrimeCurveAffine};
use std::io;
use std::iter;
use std::ops::AddAssign;
use std::sync::Arc;
#[cfg(feature = "multicore")]
use rayon::prelude::*;
#[cfg(not(feature = "multicore"))]
use crate::multicore::FakeParallelIterator;
use super::SynthesisError;
/// An object that builds a source of bases.
pub trait SourceBuilder<G: PrimeCurveAffine>: Send + Sync + 'static + Clone {
type Source: Source<G>;
fn build(self) -> Self::Source;
}
/// A source of bases, like an iterator.
pub trait Source<G: PrimeCurveAffine> {
fn next(&mut self) -> Result<&G, SynthesisError>;
/// Skips `amt` elements from the source, avoiding deserialization.
fn skip(&mut self, amt: usize) -> Result<(), SynthesisError>;
}
pub trait AddAssignFromSource: PrimeCurve {
/// Parses the element from the source. Fails if the point is at infinity.
fn add_assign_from_source<S: Source<<Self as PrimeCurve>::Affine>>(
&mut self,
source: &mut S,
) -> Result<(), SynthesisError> {
AddAssign::<&<Self as PrimeCurve>::Affine>::add_assign(self, source.next()?);
Ok(())
}
}
impl<G> AddAssignFromSource for G where G: PrimeCurve {}
impl<G: PrimeCurveAffine> SourceBuilder<G> for (Arc<Vec<G>>, usize) {
type Source = (Arc<Vec<G>>, usize);
fn build(self) -> (Arc<Vec<G>>, usize) {
(self.0.clone(), self.1)
}
}
impl<G: PrimeCurveAffine> Source<G> for (Arc<Vec<G>>, usize) {
fn next(&mut self) -> Result<&G, SynthesisError> {
if self.0.len() <= self.1 {
return Err(io::Error::new(
io::ErrorKind::UnexpectedEof,
format!(
"expected more bases from source {} {}",
self.0.len(),
self.1
),
)
.into());
}
if self.0[self.1].is_identity().into() {
return Err(SynthesisError::UnexpectedIdentity);
}
let ret = &self.0[self.1];
self.1 += 1;
Ok(ret)
}
fn skip(&mut self, amt: usize) -> Result<(), SynthesisError> {
if self.0.len() <= self.1 {
return Err(io::Error::new(
io::ErrorKind::UnexpectedEof,
format!(
"expected more bases from source {} {}",
self.0.len(),
self.1
),
)
.into());
}
self.1 += amt;
Ok(())
}
}
pub trait QueryDensity {
/// Returns whether the base exists.
type Iter: Iterator<Item = bool>;
fn iter(self) -> Self::Iter;
fn get_query_size(self) -> Option<usize>;
}
#[derive(Clone)]
pub struct FullDensity;
impl AsRef<FullDensity> for FullDensity {
fn as_ref(&self) -> &FullDensity {
self
}
}
impl<'a> QueryDensity for &'a FullDensity {
type Iter = iter::Repeat<bool>;
fn iter(self) -> Self::Iter {
iter::repeat(true)
}
fn get_query_size(self) -> Option<usize> {
None
}
}
pub struct DensityTracker {
bv: BitVec,
}
impl<'a> QueryDensity for &'a DensityTracker {
type Iter = Box<dyn 'a + Iterator<Item = bool>>;
fn iter(self) -> Self::Iter {
Box::new(self.bv.iter().by_vals())
}
fn get_query_size(self) -> Option<usize> {
Some(self.bv.len())
}
}
impl Default for DensityTracker {
fn default() -> Self {
Self::new()
}
}
impl DensityTracker {
pub fn new() -> DensityTracker {
DensityTracker { bv: BitVec::new() }
}
pub fn add_element(&mut self) {
self.bv.push(false);
}
pub fn inc(&mut self, idx: usize) {
if !self.bv.get(idx).unwrap() {
self.bv.set(idx, true);
}
}
pub fn get_total_density(&self) -> usize {
self.bv.count_ones()
}
}
enum ChunkedExponent {
Zero,
One,
Chunks(Vec<u64>),
}
/// An exponent
pub enum Exponent<F: PrimeFieldBits> {
Zero,
One,
Bits(FieldBits<F::ReprBits>),
}
impl<F: PrimeFieldBits> From<&F> for Exponent<F> {
fn from(exp: &F) -> Self {
if exp.is_zero_vartime() {
Exponent::Zero
} else if exp == &F::one() {
Exponent::One
} else {
Exponent::Bits(exp.to_le_bits())
}
}
}
impl<F: PrimeFieldBits> From<F> for Exponent<F> {
fn from(exp: F) -> Self {
(&exp).into()
}
}
impl<F: PrimeFieldBits> Exponent<F> {
fn chunks(&self, c: usize) -> ChunkedExponent {
match self {
Self::Zero => ChunkedExponent::Zero,
Self::One => ChunkedExponent::One,
Self::Bits(exp) => ChunkedExponent::Chunks(
exp.chunks(c)
.map(|chunk| {
chunk
.iter()
.by_vals()
.enumerate()
.fold(0u64, |acc, (i, b)| acc + ((b as u64) << i))
})
.collect(),
),
}
}
}
fn multiexp_inner<Q, D, G, S>(
bases: S,
density_map: D,
exponents: Arc<Vec<Exponent<G::Scalar>>>,
c: u32,
) -> Result<G, SynthesisError>
where
for<'a> &'a Q: QueryDensity,
D: Send + Sync + 'static + Clone + AsRef<Q>,
G: PrimeCurve,
G::Scalar: PrimeFieldBits,
S: SourceBuilder<<G as PrimeCurve>::Affine>,
{
// Perform this region of the multiexp
let this = move |bases: S,
density_map: D,
exponents: Arc<Vec<ChunkedExponent>>,
chunk: usize|
-> Result<_, SynthesisError> {
// Accumulate the result
let mut acc = G::identity();
// Build a source for the bases
let mut bases = bases.build();
// Create space for the buckets
let mut buckets = vec![G::identity(); (1 << c) - 1];
// only the first round uses this
let handle_trivial = chunk == 0;
// Sort the bases into buckets
for (exp, density) in exponents.iter().zip(density_map.as_ref().iter()) {
if density {
match exp {
ChunkedExponent::Zero => bases.skip(1)?,
ChunkedExponent::One => {
if handle_trivial {
acc.add_assign_from_source(&mut bases)?;
} else {
bases.skip(1)?;
}
}
ChunkedExponent::Chunks(chunks) => {
let exp = chunks[chunk];
if exp != 0 {
(&mut buckets[(exp - 1) as usize])
.add_assign_from_source(&mut bases)?;
} else {
bases.skip(1)?;
}
}
}
}
}
// Summation by parts
// e.g. 3a + 2b + 1c = a +
// (a) + b +
// ((a) + b) + c
let mut running_sum = G::identity();
for exp in buckets.into_iter().rev() {
running_sum.add_assign(&exp);
acc.add_assign(&running_sum);
}
Ok(acc)
};
// Split the exponents into chunks.
let exponents = Arc::new(
exponents
.iter()
.map(|exp| exp.chunks(c as usize))
.collect::<Vec<_>>(),
);
let parts = (0..G::Scalar::NUM_BITS)
.into_par_iter()
.step_by(c as usize)
.enumerate()
.map(|(chunk, _)| this(bases.clone(), density_map.clone(), exponents.clone(), chunk))
.collect::<Vec<Result<_, _>>>();
parts
.into_iter()
.rev()
.try_fold(G::identity(), |acc, part| {
part.map(|part| (0..c).fold(acc, |acc, _| acc.double()) + part)
})
}
/// Perform multi-exponentiation. The caller is responsible for ensuring the
/// query size is the same as the number of exponents.
pub fn multiexp<Q, D, G, S>(
pool: &Worker,
bases: S,
density_map: D,
exponents: Arc<Vec<Exponent<G::Scalar>>>,
) -> Waiter<Result<G, SynthesisError>>
where
for<'a> &'a Q: QueryDensity,
D: Send + Sync + 'static + Clone + AsRef<Q>,
G: PrimeCurve,
G::Scalar: PrimeFieldBits,
S: SourceBuilder<<G as PrimeCurve>::Affine>,
{
let c = if exponents.len() < 32 {
3u32
} else {
(f64::from(exponents.len() as u32)).ln().ceil() as u32
};
if let Some(query_size) = density_map.as_ref().get_query_size() {
// If the density map has a known query size, it should not be
// inconsistent with the number of exponents.
assert!(query_size == exponents.len());
}
pool.compute(move || multiexp_inner(bases, density_map, exponents, c))
}
#[cfg(feature = "pairing")]
#[test]
fn test_with_bls12() {
fn naive_multiexp<G: PrimeCurve>(
bases: Arc<Vec<<G as PrimeCurve>::Affine>>,
exponents: Arc<Vec<G::Scalar>>,
) -> G {
assert_eq!(bases.len(), exponents.len());
let mut acc = G::identity();
for (base, exp) in bases.iter().zip(exponents.iter()) {
AddAssign::<&G>::add_assign(&mut acc, &(*base * *exp));
}
acc
}
use bls12_381::{Bls12, Scalar};
use ff::Field;
use group::{Curve, Group};
use pairing::Engine;
const SAMPLES: usize = 1 << 14;
let mut rng = rand::thread_rng();
let v = Arc::new(
(0..SAMPLES)
.map(|_| Scalar::random(&mut rng))
.collect::<Vec<_>>(),
);
let v_bits = Arc::new(v.iter().map(|e| e.into()).collect::<Vec<_>>());
let g = Arc::new(
(0..SAMPLES)
.map(|_| <Bls12 as Engine>::G1::random(&mut rng).to_affine())
.collect::<Vec<_>>(),
);
let naive: <Bls12 as Engine>::G1 = naive_multiexp(g.clone(), v);
let pool = Worker::new();
let fast = multiexp(&pool, (g, 0), FullDensity, v_bits).wait().unwrap();
assert_eq!(naive, fast);
}

26
third_party/bellman/src/random.rs vendored Normal file
View File

@@ -0,0 +1,26 @@
use super::{ConstraintSystem, LinearCombination, SynthesisError, Variable};
use ff::PrimeField;
/// Computations are expressed in terms of arithmetic circuits, in particular
/// rank-1 quadratic constraint systems. The `Circuit` trait represents a
/// circuit that can be synthesized. The `synthesize` method is called during
/// CRS generation and during proving.
pub trait RandomCircuit<Scalar: PrimeField> {
/// Synthesize the circuit into a rank-1 quadratic constraint system
fn synthesize<CS: RandomConstraintSystem<Scalar>>(
self,
cs: &mut CS,
) -> Result<(), SynthesisError>;
}
/// Represents a constraint system which can have new variables
/// allocated and constrains between them formed.
pub trait RandomConstraintSystem<Scalar: PrimeField>: Sized + ConstraintSystem<Scalar> {
fn alloc_random_coin<A, AR>(
&mut self,
annotation: A,
) -> Result<(Variable, Scalar), SynthesisError>
where
A: FnOnce() -> AR,
AR: Into<String>;
}

129
third_party/bellman/tests/common/mod.rs vendored Normal file
View File

@@ -0,0 +1,129 @@
use ff::PrimeField;
use bellman::{Circuit, ConstraintSystem, SynthesisError};
pub const MIMC_ROUNDS: usize = 322;
/// This is an implementation of MiMC, specifically a
/// variant named `LongsightF322p3` for BLS12-381.
/// See http://eprint.iacr.org/2016/492 for more
/// information about this construction.
///
/// ```
/// function LongsightF322p3(xL ⦂ Fp, xR ⦂ Fp) {
/// for i from 0 up to 321 {
/// xL, xR := xR + (xL + Ci)^3, xL
/// }
/// return xL
/// }
/// ```
pub fn mimc<S: PrimeField>(mut xl: S, mut xr: S, constants: &[S]) -> S {
assert_eq!(constants.len(), MIMC_ROUNDS);
for c in constants {
let mut tmp1 = xl;
tmp1.add_assign(c);
let mut tmp2 = tmp1.square();
tmp2.mul_assign(&tmp1);
tmp2.add_assign(&xr);
xr = xl;
xl = tmp2;
}
xl
}
/// This is our demo circuit for proving knowledge of the
/// preimage of a MiMC hash invocation.
#[allow(clippy::upper_case_acronyms)]
pub struct MiMCDemo<'a, S: PrimeField> {
pub xl: Option<S>,
pub xr: Option<S>,
pub constants: &'a [S],
}
/// Our demo circuit implements this `Circuit` trait which
/// is used during paramgen and proving in order to
/// synthesize the constraint system.
impl<'a, S: PrimeField> Circuit<S> for MiMCDemo<'a, S> {
fn synthesize<CS: ConstraintSystem<S>>(self, cs: &mut CS) -> Result<(), SynthesisError> {
assert_eq!(self.constants.len(), MIMC_ROUNDS);
// Allocate the first component of the preimage.
let mut xl_value = self.xl;
let mut xl = cs.alloc(
|| "preimage xl",
|| xl_value.ok_or(SynthesisError::AssignmentMissing),
)?;
// Allocate the second component of the preimage.
let mut xr_value = self.xr;
let mut xr = cs.alloc(
|| "preimage xr",
|| xr_value.ok_or(SynthesisError::AssignmentMissing),
)?;
for i in 0..MIMC_ROUNDS {
// xL, xR := xR + (xL + Ci)^3, xL
let cs = &mut cs.namespace(|| format!("round {}", i));
// tmp = (xL + Ci)^2
let tmp_value = xl_value.map(|mut e| {
e.add_assign(&self.constants[i]);
e.square()
});
let tmp = cs.alloc(
|| "tmp",
|| tmp_value.ok_or(SynthesisError::AssignmentMissing),
)?;
cs.enforce(
|| "tmp = (xL + Ci)^2",
|lc| lc + xl + (self.constants[i], CS::one()),
|lc| lc + xl + (self.constants[i], CS::one()),
|lc| lc + tmp,
);
// new_xL = xR + (xL + Ci)^3
// new_xL = xR + tmp * (xL + Ci)
// new_xL - xR = tmp * (xL + Ci)
let new_xl_value = xl_value.map(|mut e| {
e.add_assign(&self.constants[i]);
e.mul_assign(&tmp_value.unwrap());
e.add_assign(&xr_value.unwrap());
e
});
let new_xl = if i == (MIMC_ROUNDS - 1) {
// This is the last round, xL is our image and so
// we allocate a public input.
cs.alloc_input(
|| "image",
|| new_xl_value.ok_or(SynthesisError::AssignmentMissing),
)?
} else {
cs.alloc(
|| "new_xl",
|| new_xl_value.ok_or(SynthesisError::AssignmentMissing),
)?
};
cs.enforce(
|| "new_xL = xR + (xL + Ci)^3",
|lc| lc + tmp,
|lc| lc + xl + (self.constants[i], CS::one()),
|lc| lc + new_xl - xr,
);
// xR = xL
xr = xl;
xr_value = xl_value;
// xL = new_xL
xl = new_xl;
xl_value = new_xl_value;
}
Ok(())
}
}

207
third_party/bellman/tests/mimc.rs vendored Normal file
View File

@@ -0,0 +1,207 @@
// For randomness (during paramgen and proof generation)
use rand::thread_rng;
// For benchmarking
use std::time::{Duration, Instant};
// Bring in some tools for using finite fiels
use ff::Field;
// We're going to use the BLS12-381 pairing-friendly elliptic curve.
use bls12_381::{Bls12, Scalar};
// We're going to use the Groth16 proving system.
use bellman::groth16::{
batch, create_random_proof, generate_random_parameters, prepare_verifying_key, verify_proof,
Proof,
};
mod common;
use common::*;
#[test]
fn test_mimc() {
// This may not be cryptographically safe, use
// `OsRng` (for example) in production software.
let mut rng = thread_rng();
// Generate the MiMC round constants
let constants = (0..MIMC_ROUNDS)
.map(|_| Scalar::random(&mut rng))
.collect::<Vec<_>>();
println!("Creating parameters...");
// Create parameters for our circuit
let params = {
let c = MiMCDemo {
xl: None,
xr: None,
constants: &constants,
};
generate_random_parameters::<Bls12, _, _>(c, &mut rng).unwrap()
};
// Prepare the verification key (for proof verification)
let pvk = prepare_verifying_key(&params.vk);
println!("Creating proofs...");
// Let's benchmark stuff!
const SAMPLES: u32 = 50;
let mut total_proving = Duration::new(0, 0);
let mut total_verifying = Duration::new(0, 0);
// Just a place to put the proof data, so we can
// benchmark deserialization.
let mut proof_vec = vec![];
for _ in 0..SAMPLES {
// Generate a random preimage and compute the image
let xl = Scalar::random(&mut rng);
let xr = Scalar::random(&mut rng);
let image = mimc(xl, xr, &constants);
proof_vec.truncate(0);
let start = Instant::now();
{
// Create an instance of our circuit (with the
// witness)
let c = MiMCDemo {
xl: Some(xl),
xr: Some(xr),
constants: &constants,
};
// Create a groth16 proof with our parameters.
let proof = create_random_proof(c, &params, &mut rng).unwrap();
proof.write(&mut proof_vec).unwrap();
}
total_proving += start.elapsed();
let start = Instant::now();
let proof = Proof::read(&proof_vec[..]).unwrap();
// Check the proof
assert!(verify_proof(&pvk, &proof, &[image]).is_ok());
total_verifying += start.elapsed();
}
let proving_avg = total_proving / SAMPLES;
let proving_avg =
proving_avg.subsec_nanos() as f64 / 1_000_000_000f64 + (proving_avg.as_secs() as f64);
let verifying_avg = total_verifying / SAMPLES;
let verifying_avg =
verifying_avg.subsec_nanos() as f64 / 1_000_000_000f64 + (verifying_avg.as_secs() as f64);
println!("Average proving time: {:?} seconds", proving_avg);
println!("Average verifying time: {:?} seconds", verifying_avg);
}
#[test]
fn batch_verify() {
let mut rng = thread_rng();
let mut batch = batch::Verifier::new();
// Generate the MiMC round constants
let constants = (0..MIMC_ROUNDS)
.map(|_| Scalar::random(&mut rng))
.collect::<Vec<_>>();
println!("Creating parameters...");
// Create parameters for our circuit
let params = {
let c = MiMCDemo {
xl: None,
xr: None,
constants: &constants,
};
generate_random_parameters::<Bls12, _, _>(c, &mut rng).unwrap()
};
// Prepare the verification key (for proof verification)
let pvk = prepare_verifying_key(&params.vk);
println!("Creating proofs...");
// Let's benchmark stuff!
const SAMPLES: u32 = 50;
let mut total_proving = Duration::new(0, 0);
let mut total_verifying = Duration::new(0, 0);
// Just a place to put the proof data, so we can
// benchmark deserialization.
let mut proof_vec = vec![];
for _ in 0..SAMPLES {
// Generate a random preimage and compute the image
let xl = Scalar::random(&mut rng);
let xr = Scalar::random(&mut rng);
let image = mimc(xl, xr, &constants);
proof_vec.truncate(0);
let start = Instant::now();
{
// Create an instance of our circuit (with the
// witness)
let c = MiMCDemo {
xl: Some(xl),
xr: Some(xr),
constants: &constants,
};
// Create a groth16 proof with our parameters.
let proof = create_random_proof(c, &params, &mut rng).unwrap();
proof.write(&mut proof_vec).unwrap();
}
total_proving += start.elapsed();
let start = Instant::now();
let proof = Proof::read(&proof_vec[..]).unwrap();
// Check the proof
assert!(verify_proof(&pvk, &proof, &[image]).is_ok());
total_verifying += start.elapsed();
// Queue the proof and inputs for batch verification.
batch.queue((proof, [image].into()));
}
let mut batch_verifying = Duration::new(0, 0);
let batch_start = Instant::now();
// Verify this batch for this specific verifying key
assert!(batch.verify(rng, &params.vk).is_ok());
batch_verifying += batch_start.elapsed();
let proving_avg = total_proving / SAMPLES;
let proving_avg =
proving_avg.subsec_nanos() as f64 / 1_000_000_000f64 + (proving_avg.as_secs() as f64);
let verifying_avg = total_verifying / SAMPLES;
let verifying_avg =
verifying_avg.subsec_nanos() as f64 / 1_000_000_000f64 + (verifying_avg.as_secs() as f64);
let batch_amortized = batch_verifying / SAMPLES;
let batch_amortized = batch_amortized.subsec_nanos() as f64 / 1_000_000_000f64
+ (batch_amortized.as_secs() as f64);
println!("Average proving time: {:?} seconds", proving_avg);
println!("Average verifying time: {:?} seconds", verifying_avg);
println!(
"Amortized batch verifying time: {:?} seconds",
batch_amortized
);
}