23 Commits

Author SHA1 Message Date
Daniel Tehrani
20565c8a25 Replace the testing R1CS to an instance that has variables 2023-03-31 12:51:57 +09:00
Daniel Tehrani
cad6dfb30b Fix 2023-03-31 12:51:15 +09:00
Daniel Tehrani
620b8921bd Use dynamic proof sizing 2023-03-30 19:17:02 +09:00
Daniel Tehrani
b9e63cb98c Expose proof values so we can read them in Hoplite 2023-03-29 22:24:29 +09:00
Daniel Tehrani
39218fe057 Use circuit_reader 2023-03-28 16:44:20 +09:00
Daniel Tehrani
8f073995c8 Merge main 2023-03-28 16:43:08 +09:00
Daniel Tehrani
ef4a70ad0a Proof verification circuit (but without final poly evaluation) 2023-03-26 13:35:48 +09:00
Daniel Tehrani
f6de8de3c2 .gitignore proofs loaded by hoplite_circuit 2023-03-24 16:56:15 +09:00
Daniel Tehrani
117f2605a1 Refactor 2023-03-24 16:55:30 +09:00
Daniel Tehrani
69a7821880 Add a prover to generate proofs which is used for testing the spartan-ecdsa halo2 circuit 2023-03-08 11:35:19 -08:00
Daniel Tehrani
05e9bbd9bc Implement sumcheck chip 2023-03-07 15:02:06 -07:00
Daniel Tehrani
9436fad9cc Fix 2023-03-03 19:38:39 -07:00
Daniel Tehrani
0206ac817f Add more circuit value structs 2023-03-03 19:31:10 -07:00
Daniel Tehrani
1a57e4085b Wrap props with Option for compatibility w/ hoplite_circuit 2023-03-03 15:05:47 -07:00
Daniel Tehrani
dfc77f142b Small fix 2023-03-03 14:00:47 -07:00
Daniel Tehrani
a67cd603b9 Specify num constraints & vars with generics 2023-03-03 13:08:03 -07:00
Daniel Tehrani
3ff73e0c5b Move circuit value related structs/traits into circuit_vals.rs 2023-03-03 13:06:17 -07:00
Daniel Tehrani
ad048acbfc Add comments 2023-03-03 12:40:45 -07:00
lsankar4033
5524adf4c2 fix filename 2023-03-01 17:35:38 -07:00
Daniel Tehrani
71f1ce73c5 Add rest of the verification processes in the ref impl 2023-03-01 11:28:55 -07:00
Daniel Tehrani
f46a2a5941 Comment 2023-02-26 14:48:47 +09:00
Daniel Tehrani
ad2ee6e4fb Add zk-sum-check circuit written in Halo2 (still incomplete!) 2023-02-26 14:42:32 +09:00
Daniel Tehrani
23cad0fa18 Move Hoplite into the repo 2023-02-25 19:30:42 +09:00
85 changed files with 13678 additions and 84 deletions

42
.github/workflows/publish.yaml vendored Normal file
View File

@@ -0,0 +1,42 @@
name: Publish Package to npmjs
on:
release:
types: [published]
workflow_dispatch:
jobs:
publish:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
ref: ${{ github.ref_name }}
# Setup Node.js
- uses: actions/setup-node@v3
with:
node-version: 18
registry-url: "https://registry.npmjs.org"
# Setup Rust
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2022-10-31
- run: rustup component add rust-src --toolchain nightly-2022-10-31-x86_64-unknown-linux-gnu
- run: rustup target add x86_64-apple-darwin
# Install circom-secq
- uses: GuillaumeFalourd/clone-github-repo-action@v2
with:
owner: "DanTehrani"
repository: "circom-secq"
- run: cd circom-secq && cargo build --release && cargo install --path circom
# Install wasm-pack
- uses: jetli/wasm-pack-action@v0.4.0
with:
version: "0.10.3"
- run: cargo test --release
- run: yarn
- run: yarn build
- run: yarn test
- run: npm publish
working-directory: ./packages/lib
env:
NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }}

4
.gitignore vendored
View File

@@ -41,3 +41,7 @@ wasm_bytes.ts
packages/lib/src/circuits/
packages/lib/example/
packages/hoplite_circuit/params/
packages/hoplite_circuit/prover/proof.bin
packages/hoplite_circuit/prover/input.bin

View File

@@ -2,6 +2,7 @@
"editor.formatOnSave": true,
"cSpell.words": [
"merkle",
"NIZK"
"NIZK",
"Secq"
]
}

View File

@@ -1,6 +1,11 @@
[workspace]
members = [
"packages/spartan_wasm",
# "packages/spartan_wasm",
"packages/secq256k1",
# "packages/poseidon",
"packages/hoplite",
"packages/hoplite_circuit",
"packages/poseidon",
"packages/Spartan-secq",
"packages/circuit_reader",
]

View File

@@ -6,7 +6,8 @@
"repository": "https://github.com/DanTehrani/spartan-wasm.git",
"author": "Daniel Tehrani <contact@dantehrani.com>",
"scripts": {
"build": "sh ./scripts/build.sh && lerna run build"
"build": "sh ./scripts/build.sh && lerna run build",
"test": "sh ./scripts/test.sh"
},
"devDependencies": {
"@types/jest": "^29.2.4",

View File

@@ -0,0 +1,9 @@
# Microsoft Open Source Code of Conduct
This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/).
Resources:
- [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/)
- [Microsoft Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/)
- Contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with questions or concerns

View File

@@ -0,0 +1,12 @@
This project welcomes contributions and suggestions. Most contributions require you to
agree to a Contributor License Agreement (CLA) declaring that you have the right to,
and actually do, grant us the rights to use your contribution. For details, visit
https://cla.microsoft.com.
When you submit a pull request, a CLA-bot will automatically determine whether you need
to provide a CLA and decorate the PR appropriately (e.g., label, comment). Simply follow the
instructions provided by the bot. You will only need to do this once across all repositories using our CLA.
This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/).
For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/)
or contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments.

View File

@@ -0,0 +1,39 @@
[package]
name = "spartan"
version = "0.7.1"
authors = ["Srinath Setty <srinath@microsoft.com>, Dan Tehrani"]
edition = "2021"
description = "High-speed zkSNARKs without trusted setup"
readme = "README.md"
license-file = "LICENSE"
keywords = ["zkSNARKs", "cryptography", "proofs"]
[dependencies]
num-bigint-dig = "^0.7"
secq256k1 = { path = "../secq256k1" }
merlin = "3.0.0"
rand = "0.7.3"
digest = "0.8.1"
sha3 = "0.8.2"
byteorder = "1.3.4"
rayon = { version = "1.3.0", optional = true }
serde = { version = "1.0.106", features = ["derive"] }
bincode = "1.2.1"
subtle = { version = "2.4", default-features = false }
rand_core = { version = "0.6", default-features = false }
zeroize = { version = "1", default-features = false }
itertools = "0.10.0"
colored = "2.0.0"
flate2 = "1.0.14"
thiserror = "1.0"
num-traits = "0.2.15"
hex-literal = { version = "0.3" }
multiexp = "0.2.2"
[dev-dependencies]
criterion = "0.3.1"
[lib]
name = "libspartan"
path = "src/lib.rs"
crate-type = ["cdylib", "rlib"]

View File

@@ -0,0 +1,21 @@
MIT License
Copyright (c) Microsoft Corporation.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE

View File

@@ -0,0 +1,10 @@
## Fork of [Spartan](https://github.com/microsoft/Spartan)
_This fork is still under development._
Modify Spartan to operate over the **base field** of secp256k1.
### Changes from the original Spartan
- Use the secq256k1 crate instead of curve25519-dalek
- Modify values in scalar.rs (originally ristretto255.rs)
Please refer to [spartan-ecdsa](https://github.com/personaelabs/spartan-ecdsa) for development status.

View File

@@ -0,0 +1,41 @@
<!-- BEGIN MICROSOFT SECURITY.MD V0.0.3 BLOCK -->
## Security
Microsoft takes the security of our software products and services seriously, which includes all source code repositories managed through our GitHub organizations, which include [Microsoft](https://github.com/Microsoft), [Azure](https://github.com/Azure), [DotNet](https://github.com/dotnet), [AspNet](https://github.com/aspnet), [Xamarin](https://github.com/xamarin), and [our GitHub organizations](https://opensource.microsoft.com/).
If you believe you have found a security vulnerability in any Microsoft-owned repository that meets Microsoft's [Microsoft's definition of a security vulnerability](https://docs.microsoft.com/en-us/previous-versions/tn-archive/cc751383(v=technet.10)) of a security vulnerability, please report it to us as described below.
## Reporting Security Issues
**Please do not report security vulnerabilities through public GitHub issues.**
Instead, please report them to the Microsoft Security Response Center (MSRC) at [https://msrc.microsoft.com/create-report](https://msrc.microsoft.com/create-report).
If you prefer to submit without logging in, send email to [secure@microsoft.com](mailto:secure@microsoft.com). If possible, encrypt your message with our PGP key; please download it from the the [Microsoft Security Response Center PGP Key page](https://www.microsoft.com/en-us/msrc/pgp-key-msrc).
You should receive a response within 24 hours. If for some reason you do not, please follow up via email to ensure we received your original message. Additional information can be found at [microsoft.com/msrc](https://www.microsoft.com/msrc).
Please include the requested information listed below (as much as you can provide) to help us better understand the nature and scope of the possible issue:
* Type of issue (e.g. buffer overflow, SQL injection, cross-site scripting, etc.)
* Full paths of source file(s) related to the manifestation of the issue
* The location of the affected source code (tag/branch/commit or direct URL)
* Any special configuration required to reproduce the issue
* Step-by-step instructions to reproduce the issue
* Proof-of-concept or exploit code (if possible)
* Impact of the issue, including how an attacker might exploit the issue
This information will help us triage your report more quickly.
If you are reporting for a bug bounty, more complete reports can contribute to a higher bounty award. Please visit our [Microsoft Bug Bounty Program](https://microsoft.com/msrc/bounty) page for more details about our active programs.
## Preferred Languages
We prefer all communications to be in English.
## Policy
Microsoft follows the principle of [Coordinated Vulnerability Disclosure](https://www.microsoft.com/en-us/msrc/cvd).
<!-- END MICROSOFT SECURITY.MD BLOCK -->

View File

@@ -0,0 +1,92 @@
#![allow(clippy::assertions_on_result_states)]
extern crate byteorder;
extern crate core;
extern crate criterion;
extern crate digest;
extern crate libspartan;
extern crate merlin;
extern crate rand;
extern crate sha3;
use libspartan::{Instance, NIZKGens, NIZK};
use merlin::Transcript;
use criterion::*;
fn nizk_prove_benchmark(c: &mut Criterion) {
for &s in [10, 12, 16].iter() {
let plot_config = PlotConfiguration::default().summary_scale(AxisScale::Logarithmic);
let mut group = c.benchmark_group("NIZK_prove_benchmark");
group.plot_config(plot_config);
let num_vars = (2_usize).pow(s as u32);
let num_cons = num_vars;
let num_inputs = 10;
let (inst, vars, inputs) = Instance::produce_synthetic_r1cs(num_cons, num_vars, num_inputs);
let gens = NIZKGens::new(num_cons, num_vars, num_inputs);
let name = format!("NIZK_prove_{}", num_vars);
group.bench_function(&name, move |b| {
b.iter(|| {
let mut prover_transcript = Transcript::new(b"example");
NIZK::prove(
black_box(&inst),
black_box(vars.clone()),
black_box(&inputs),
black_box(&gens),
black_box(&mut prover_transcript),
);
});
});
group.finish();
}
}
fn nizk_verify_benchmark(c: &mut Criterion) {
for &s in [10, 12, 16].iter() {
let plot_config = PlotConfiguration::default().summary_scale(AxisScale::Logarithmic);
let mut group = c.benchmark_group("NIZK_verify_benchmark");
group.plot_config(plot_config);
let num_vars = (2_usize).pow(s as u32);
let num_cons = num_vars;
let num_inputs = 10;
let (inst, vars, inputs) = Instance::produce_synthetic_r1cs(num_cons, num_vars, num_inputs);
let gens = NIZKGens::new(num_cons, num_vars, num_inputs);
// produce a proof of satisfiability
let mut prover_transcript = Transcript::new(b"example");
let proof = NIZK::prove(&inst, vars, &inputs, &gens, &mut prover_transcript);
let name = format!("NIZK_verify_{}", num_cons);
group.bench_function(&name, move |b| {
b.iter(|| {
let mut verifier_transcript = Transcript::new(b"example");
assert!(proof
.verify(
black_box(&inst),
black_box(&inputs),
black_box(&mut verifier_transcript),
black_box(&gens)
)
.is_ok());
});
});
group.finish();
}
}
fn set_duration() -> Criterion {
Criterion::default().sample_size(10)
}
criterion_group! {
name = benches_nizk;
config = set_duration();
targets = nizk_prove_benchmark, nizk_verify_benchmark
}
criterion_main!(benches_nizk);

View File

@@ -0,0 +1,131 @@
#![allow(clippy::assertions_on_result_states)]
extern crate libspartan;
extern crate merlin;
use libspartan::{Instance, SNARKGens, SNARK};
use merlin::Transcript;
use criterion::*;
fn snark_encode_benchmark(c: &mut Criterion) {
for &s in [10, 12, 16].iter() {
let plot_config = PlotConfiguration::default().summary_scale(AxisScale::Logarithmic);
let mut group = c.benchmark_group("SNARK_encode_benchmark");
group.plot_config(plot_config);
let num_vars = (2_usize).pow(s as u32);
let num_cons = num_vars;
let num_inputs = 10;
let (inst, _vars, _inputs) = Instance::produce_synthetic_r1cs(num_cons, num_vars, num_inputs);
// produce public parameters
let gens = SNARKGens::new(num_cons, num_vars, num_inputs, num_cons);
// produce a commitment to R1CS instance
let name = format!("SNARK_encode_{}", num_cons);
group.bench_function(&name, move |b| {
b.iter(|| {
SNARK::encode(black_box(&inst), black_box(&gens));
});
});
group.finish();
}
}
fn snark_prove_benchmark(c: &mut Criterion) {
for &s in [10, 12, 16].iter() {
let plot_config = PlotConfiguration::default().summary_scale(AxisScale::Logarithmic);
let mut group = c.benchmark_group("SNARK_prove_benchmark");
group.plot_config(plot_config);
let num_vars = (2_usize).pow(s as u32);
let num_cons = num_vars;
let num_inputs = 10;
let (inst, vars, inputs) = Instance::produce_synthetic_r1cs(num_cons, num_vars, num_inputs);
// produce public parameters
let gens = SNARKGens::new(num_cons, num_vars, num_inputs, num_cons);
// produce a commitment to R1CS instance
let (comm, decomm) = SNARK::encode(&inst, &gens);
// produce a proof
let name = format!("SNARK_prove_{}", num_cons);
group.bench_function(&name, move |b| {
b.iter(|| {
let mut prover_transcript = Transcript::new(b"example");
SNARK::prove(
black_box(&inst),
black_box(&comm),
black_box(&decomm),
black_box(vars.clone()),
black_box(&inputs),
black_box(&gens),
black_box(&mut prover_transcript),
);
});
});
group.finish();
}
}
fn snark_verify_benchmark(c: &mut Criterion) {
for &s in [10, 12, 16].iter() {
let plot_config = PlotConfiguration::default().summary_scale(AxisScale::Logarithmic);
let mut group = c.benchmark_group("SNARK_verify_benchmark");
group.plot_config(plot_config);
let num_vars = (2_usize).pow(s as u32);
let num_cons = num_vars;
let num_inputs = 10;
let (inst, vars, inputs) = Instance::produce_synthetic_r1cs(num_cons, num_vars, num_inputs);
// produce public parameters
let gens = SNARKGens::new(num_cons, num_vars, num_inputs, num_cons);
// produce a commitment to R1CS instance
let (comm, decomm) = SNARK::encode(&inst, &gens);
// produce a proof of satisfiability
let mut prover_transcript = Transcript::new(b"example");
let proof = SNARK::prove(
&inst,
&comm,
&decomm,
vars,
&inputs,
&gens,
&mut prover_transcript,
);
// verify the proof
let name = format!("SNARK_verify_{}", num_cons);
group.bench_function(&name, move |b| {
b.iter(|| {
let mut verifier_transcript = Transcript::new(b"example");
assert!(proof
.verify(
black_box(&comm),
black_box(&inputs),
black_box(&mut verifier_transcript),
black_box(&gens)
)
.is_ok());
});
});
group.finish();
}
}
fn set_duration() -> Criterion {
Criterion::default().sample_size(10)
}
criterion_group! {
name = benches_snark;
config = set_duration();
targets = snark_encode_benchmark, snark_prove_benchmark, snark_verify_benchmark
}
criterion_main!(benches_snark);

View File

@@ -0,0 +1,147 @@
//! Demonstrates how to produces a proof for canonical cubic equation: `x^3 + x + 5 = y`.
//! The example is described in detail [here].
//!
//! The R1CS for this problem consists of the following 4 constraints:
//! `Z0 * Z0 - Z1 = 0`
//! `Z1 * Z0 - Z2 = 0`
//! `(Z2 + Z0) * 1 - Z3 = 0`
//! `(Z3 + 5) * 1 - I0 = 0`
//!
//! [here]: https://medium.com/@VitalikButerin/quadratic-arithmetic-programs-from-zero-to-hero-f6d558cea649
#![allow(clippy::assertions_on_result_states)]
use libspartan::{InputsAssignment, Instance, SNARKGens, VarsAssignment, SNARK};
use merlin::Transcript;
use rand_core::OsRng;
use secq256k1::elliptic_curve::Field;
use secq256k1::Scalar;
#[allow(non_snake_case)]
fn produce_r1cs() -> (
usize,
usize,
usize,
usize,
Instance,
VarsAssignment,
InputsAssignment,
) {
// parameters of the R1CS instance
let num_cons = 4;
let num_vars = 4;
let num_inputs = 1;
let num_non_zero_entries = 8;
// We will encode the above constraints into three matrices, where
// the coefficients in the matrix are in the little-endian byte order
let mut A: Vec<(usize, usize, [u8; 32])> = Vec::new();
let mut B: Vec<(usize, usize, [u8; 32])> = Vec::new();
let mut C: Vec<(usize, usize, [u8; 32])> = Vec::new();
let one: [u8; 32] = Scalar::ONE.to_bytes().into();
// R1CS is a set of three sparse matrices A B C, where is a row for every
// constraint and a column for every entry in z = (vars, 1, inputs)
// An R1CS instance is satisfiable iff:
// Az \circ Bz = Cz, where z = (vars, 1, inputs)
// constraint 0 entries in (A,B,C)
// constraint 0 is Z0 * Z0 - Z1 = 0.
A.push((0, 0, one));
B.push((0, 0, one));
C.push((0, 1, one));
// constraint 1 entries in (A,B,C)
// constraint 1 is Z1 * Z0 - Z2 = 0.
A.push((1, 1, one));
B.push((1, 0, one));
C.push((1, 2, one));
// constraint 2 entries in (A,B,C)
// constraint 2 is (Z2 + Z0) * 1 - Z3 = 0.
A.push((2, 2, one));
A.push((2, 0, one));
B.push((2, num_vars, one));
C.push((2, 3, one));
// constraint 3 entries in (A,B,C)
// constraint 3 is (Z3 + 5) * 1 - I0 = 0.
A.push((3, 3, one));
A.push((3, num_vars, Scalar::from(5u32).to_bytes().into()));
B.push((3, num_vars, one));
C.push((3, num_vars + 1, one));
let inst = Instance::new(num_cons, num_vars, num_inputs, &A, &B, &C).unwrap();
// compute a satisfying assignment
let mut csprng: OsRng = OsRng;
let z0 = Scalar::random(&mut csprng);
let z1 = z0 * z0; // constraint 0
let z2 = z1 * z0; // constraint 1
let z3 = z2 + z0; // constraint 2
let i0 = z3 + Scalar::from(5u32); // constraint 3
// create a VarsAssignment
let mut vars: Vec<[u8; 32]> = vec![Scalar::ZERO.to_bytes().into(); num_vars];
vars[0] = z0.to_bytes().into();
vars[1] = z1.to_bytes().into();
vars[2] = z2.to_bytes().into();
vars[3] = z3.to_bytes().into();
let assignment_vars = VarsAssignment::new(&vars).unwrap();
// create an InputsAssignment
let mut inputs: Vec<[u8; 32]> = vec![Scalar::ZERO.to_bytes().into(); num_inputs];
inputs[0] = i0.to_bytes().into();
let assignment_inputs = InputsAssignment::new(&inputs).unwrap();
// check if the instance we created is satisfiable
let res = inst.is_sat(&assignment_vars, &assignment_inputs);
assert!(res.unwrap(), "should be satisfied");
(
num_cons,
num_vars,
num_inputs,
num_non_zero_entries,
inst,
assignment_vars,
assignment_inputs,
)
}
fn main() {
// produce an R1CS instance
let (
num_cons,
num_vars,
num_inputs,
num_non_zero_entries,
inst,
assignment_vars,
assignment_inputs,
) = produce_r1cs();
// produce public parameters
let gens = SNARKGens::new(num_cons, num_vars, num_inputs, num_non_zero_entries);
// create a commitment to the R1CS instance
let (comm, decomm) = SNARK::encode(&inst, &gens);
// produce a proof of satisfiability
let mut prover_transcript = Transcript::new(b"snark_example");
let proof = SNARK::prove(
&inst,
&comm,
&decomm,
assignment_vars,
&assignment_inputs,
&gens,
&mut prover_transcript,
);
// verify the proof of satisfiability
let mut verifier_transcript = Transcript::new(b"snark_example");
assert!(proof
.verify(&comm, &assignment_inputs, &mut verifier_transcript, &gens)
.is_ok());
println!("proof verification successful!");
}

View File

@@ -0,0 +1,52 @@
#![allow(non_snake_case)]
#![allow(clippy::assertions_on_result_states)]
extern crate flate2;
extern crate libspartan;
extern crate merlin;
extern crate rand;
use flate2::{write::ZlibEncoder, Compression};
use libspartan::{Instance, NIZKGens, NIZK};
use merlin::Transcript;
fn print(msg: &str) {
let star = "* ";
println!("{:indent$}{}{}", "", star, msg, indent = 2);
}
pub fn main() {
// the list of number of variables (and constraints) in an R1CS instance
let inst_sizes = vec![10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20];
println!("Profiler:: NIZK");
for &s in inst_sizes.iter() {
let num_vars = (2_usize).pow(s as u32);
let num_cons = num_vars;
let num_inputs = 10;
// produce a synthetic R1CSInstance
let (inst, vars, inputs) = Instance::produce_synthetic_r1cs(num_cons, num_vars, num_inputs);
// produce public generators
let gens = NIZKGens::new(num_cons, num_vars, num_inputs);
// produce a proof of satisfiability
let mut prover_transcript = Transcript::new(b"nizk_example");
let proof = NIZK::prove(&inst, vars, &inputs, &gens, &mut prover_transcript);
let mut encoder = ZlibEncoder::new(Vec::new(), Compression::default());
bincode::serialize_into(&mut encoder, &proof).unwrap();
let proof_encoded = encoder.finish().unwrap();
let msg_proof_len = format!("NIZK::proof_compressed_len {:?}", proof_encoded.len());
print(&msg_proof_len);
// verify the proof of satisfiability
let mut verifier_transcript = Transcript::new(b"nizk_example");
assert!(proof
.verify(&inst, &inputs, &mut verifier_transcript, &gens)
.is_ok());
println!();
}
}

View File

@@ -0,0 +1,62 @@
#![allow(non_snake_case)]
#![allow(clippy::assertions_on_result_states)]
extern crate flate2;
extern crate libspartan;
extern crate merlin;
use flate2::{write::ZlibEncoder, Compression};
use libspartan::{Instance, SNARKGens, SNARK};
use merlin::Transcript;
fn print(msg: &str) {
let star = "* ";
println!("{:indent$}{}{}", "", star, msg, indent = 2);
}
pub fn main() {
// the list of number of variables (and constraints) in an R1CS instance
let inst_sizes = vec![10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20];
println!("Profiler:: SNARK");
for &s in inst_sizes.iter() {
let num_vars = (2_usize).pow(s as u32);
let num_cons = num_vars;
let num_inputs = 10;
// produce a synthetic R1CSInstance
let (inst, vars, inputs) = Instance::produce_synthetic_r1cs(num_cons, num_vars, num_inputs);
// produce public generators
let gens = SNARKGens::new(num_cons, num_vars, num_inputs, num_cons);
// create a commitment to R1CSInstance
let (comm, decomm) = SNARK::encode(&inst, &gens);
// produce a proof of satisfiability
let mut prover_transcript = Transcript::new(b"snark_example");
let proof = SNARK::prove(
&inst,
&comm,
&decomm,
vars,
&inputs,
&gens,
&mut prover_transcript,
);
let mut encoder = ZlibEncoder::new(Vec::new(), Compression::default());
bincode::serialize_into(&mut encoder, &proof).unwrap();
let proof_encoded = encoder.finish().unwrap();
let msg_proof_len = format!("SNARK::proof_compressed_len {:?}", proof_encoded.len());
print(&msg_proof_len);
// verify the proof of satisfiability
let mut verifier_transcript = Transcript::new(b"snark_example");
assert!(proof
.verify(&comm, &inputs, &mut verifier_transcript, &gens)
.is_ok());
println!();
}
}

View File

@@ -0,0 +1,4 @@
edition = "2018"
tab_spaces = 2
newline_style = "Unix"
use_try_shorthand = true

View File

@@ -0,0 +1,54 @@
use hex_literal::hex;
use num_bigint_dig::{BigInt, BigUint, ModInverse, ToBigInt};
use num_traits::{FromPrimitive, ToPrimitive};
use std::ops::Neg;
fn get_words(n: &BigUint) -> [u64; 4] {
let mut words = [0u64; 4];
for i in 0..4 {
let word = n.clone() >> (64 * i) & BigUint::from(0xffffffffffffffffu64);
words[i] = word.to_u64().unwrap();
}
words
}
fn render_hex(label: String, words: &[u64; 4]) {
println!("// {}", label);
for word in words {
println!("0x{:016x},", word);
}
}
fn main() {
let modulus = BigUint::from_bytes_be(&hex!(
"fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f"
));
let r = BigUint::from_u8(2)
.unwrap()
.modpow(&BigUint::from_u64(256).unwrap(), &modulus);
let r2 = BigUint::from_u8(2)
.unwrap()
.modpow(&BigUint::from_u64(512).unwrap(), &modulus);
let r3 = BigUint::from_u8(2)
.unwrap()
.modpow(&BigUint::from_u64(768).unwrap(), &modulus);
let two_pow_64 = BigUint::from_u128(18446744073709551616u128).unwrap();
let one = BigInt::from_u8(1).unwrap();
let inv = modulus
.clone()
.mod_inverse(&two_pow_64)
.unwrap()
.neg()
.modpow(&one, &two_pow_64.to_bigint().unwrap());
render_hex("Modulus".to_string(), &get_words(&modulus));
render_hex("R".to_string(), &get_words(&r));
render_hex("R2".to_string(), &get_words(&r2));
render_hex("R3".to_string(), &get_words(&r3));
render_hex("INV".to_string(), &get_words(&inv.to_biguint().unwrap()));
}

View File

@@ -0,0 +1,96 @@
use super::group::{GroupElement, VartimeMultiscalarMul};
use super::scalar::Scalar;
use digest::{ExtendableOutput, Input};
use secq256k1::AffinePoint;
use sha3::Shake256;
use std::io::Read;
#[derive(Debug)]
pub struct MultiCommitGens {
pub n: usize,
pub G: Vec<GroupElement>,
pub h: GroupElement,
}
impl MultiCommitGens {
pub fn new(n: usize, label: &[u8]) -> Self {
let mut shake = Shake256::default();
shake.input(label);
shake.input(AffinePoint::generator().compress().as_bytes());
let mut reader = shake.xof_result();
let mut gens: Vec<GroupElement> = Vec::new();
let mut uniform_bytes = [0u8; 128];
for _ in 0..n + 1 {
reader.read_exact(&mut uniform_bytes).unwrap();
gens.push(AffinePoint::from_uniform_bytes(&uniform_bytes));
}
MultiCommitGens {
n,
G: gens[..n].to_vec(),
h: gens[n],
}
}
pub fn clone(&self) -> MultiCommitGens {
MultiCommitGens {
n: self.n,
h: self.h,
G: self.G.clone(),
}
}
pub fn scale(&self, s: &Scalar) -> MultiCommitGens {
MultiCommitGens {
n: self.n,
h: self.h,
G: (0..self.n).map(|i| s * self.G[i]).collect(),
}
}
pub fn split_at(&self, mid: usize) -> (MultiCommitGens, MultiCommitGens) {
let (G1, G2) = self.G.split_at(mid);
(
MultiCommitGens {
n: G1.len(),
G: G1.to_vec(),
h: self.h,
},
MultiCommitGens {
n: G2.len(),
G: G2.to_vec(),
h: self.h,
},
)
}
}
pub trait Commitments {
fn commit(&self, blind: &Scalar, gens_n: &MultiCommitGens) -> GroupElement;
}
impl Commitments for Scalar {
fn commit(&self, blind: &Scalar, gens_n: &MultiCommitGens) -> GroupElement {
assert_eq!(gens_n.n, 1);
GroupElement::vartime_multiscalar_mul(
[*self, *blind].to_vec(),
[gens_n.G[0], gens_n.h].to_vec(),
)
}
}
impl Commitments for Vec<Scalar> {
fn commit(&self, blind: &Scalar, gens_n: &MultiCommitGens) -> GroupElement {
assert_eq!(gens_n.n, self.len());
GroupElement::vartime_multiscalar_mul((*self).clone(), gens_n.G.clone()) + blind * gens_n.h
}
}
impl Commitments for [Scalar] {
fn commit(&self, blind: &Scalar, gens_n: &MultiCommitGens) -> GroupElement {
assert_eq!(gens_n.n, self.len());
GroupElement::vartime_multiscalar_mul(self.to_vec(), gens_n.G.clone()) + blind * gens_n.h
}
}

View File

@@ -0,0 +1,602 @@
#![allow(clippy::too_many_arguments)]
use super::commitments::{Commitments, MultiCommitGens};
use super::errors::ProofVerifyError;
use super::group::{CompressedGroup, GroupElement, VartimeMultiscalarMul};
use super::math::Math;
use super::nizk::{DotProductProofGens, DotProductProofLog};
use super::random::RandomTape;
use super::scalar::Scalar;
use super::transcript::{AppendToTranscript, ProofTranscript};
use crate::group::DecompressEncodedPoint;
use core::ops::Index;
use merlin::Transcript;
use serde::{Deserialize, Serialize};
#[cfg(feature = "multicore")]
use rayon::prelude::*;
#[derive(Debug)]
pub struct DensePolynomial {
num_vars: usize, // the number of variables in the multilinear polynomial
len: usize,
Z: Vec<Scalar>, // evaluations of the polynomial in all the 2^num_vars Boolean inputs
}
pub struct PolyCommitmentGens {
pub gens: DotProductProofGens,
}
impl PolyCommitmentGens {
// the number of variables in the multilinear polynomial
pub fn new(num_vars: usize, label: &'static [u8]) -> PolyCommitmentGens {
let (_left, right) = EqPolynomial::compute_factored_lens(num_vars);
let gens = DotProductProofGens::new(right.pow2(), label);
PolyCommitmentGens { gens }
}
}
pub struct PolyCommitmentBlinds {
blinds: Vec<Scalar>,
}
#[derive(Debug, Serialize, Deserialize)]
pub struct PolyCommitment {
pub C: Vec<CompressedGroup>,
}
#[derive(Debug, Serialize, Deserialize)]
pub struct ConstPolyCommitment {
C: CompressedGroup,
}
pub struct EqPolynomial {
r: Vec<Scalar>,
}
impl EqPolynomial {
pub fn new(r: Vec<Scalar>) -> Self {
EqPolynomial { r }
}
pub fn evaluate(&self, rx: &[Scalar]) -> Scalar {
assert_eq!(self.r.len(), rx.len());
(0..rx.len())
.map(|i| self.r[i] * rx[i] + (Scalar::one() - self.r[i]) * (Scalar::one() - rx[i]))
.product()
}
pub fn evals(&self) -> Vec<Scalar> {
let ell = self.r.len();
let mut evals: Vec<Scalar> = vec![Scalar::one(); ell.pow2()];
let mut size = 1;
for j in 0..ell {
// in each iteration, we double the size of chis
size *= 2;
for i in (0..size).rev().step_by(2) {
// copy each element from the prior iteration twice
let scalar = evals[i / 2];
evals[i] = scalar * self.r[j];
evals[i - 1] = scalar - evals[i];
}
}
evals
}
pub fn compute_factored_lens(ell: usize) -> (usize, usize) {
(ell / 2, ell - ell / 2)
}
pub fn compute_factored_evals(&self) -> (Vec<Scalar>, Vec<Scalar>) {
let ell = self.r.len();
let (left_num_vars, _right_num_vars) = EqPolynomial::compute_factored_lens(ell);
let L = EqPolynomial::new(self.r[..left_num_vars].to_vec()).evals();
let R = EqPolynomial::new(self.r[left_num_vars..ell].to_vec()).evals();
(L, R)
}
}
pub struct IdentityPolynomial {
size_point: usize,
}
impl IdentityPolynomial {
pub fn new(size_point: usize) -> Self {
IdentityPolynomial { size_point }
}
pub fn evaluate(&self, r: &[Scalar]) -> Scalar {
let len = r.len();
assert_eq!(len, self.size_point);
(0..len)
.map(|i| Scalar::from((len - i - 1).pow2() as u64) * r[i])
.sum()
}
}
impl DensePolynomial {
pub fn new(Z: Vec<Scalar>) -> Self {
DensePolynomial {
num_vars: Z.len().log_2(),
len: Z.len(),
Z,
}
}
pub fn get_num_vars(&self) -> usize {
self.num_vars
}
pub fn len(&self) -> usize {
self.len
}
pub fn clone(&self) -> DensePolynomial {
DensePolynomial::new(self.Z[0..self.len].to_vec())
}
pub fn split(&self, idx: usize) -> (DensePolynomial, DensePolynomial) {
assert!(idx < self.len());
(
DensePolynomial::new(self.Z[..idx].to_vec()),
DensePolynomial::new(self.Z[idx..2 * idx].to_vec()),
)
}
#[cfg(feature = "multicore")]
fn commit_inner(&self, blinds: &[Scalar], gens: &MultiCommitGens) -> PolyCommitment {
let L_size = blinds.len();
let R_size = self.Z.len() / L_size;
assert_eq!(L_size * R_size, self.Z.len());
let C = (0..L_size)
.into_par_iter()
.map(|i| {
self.Z[R_size * i..R_size * (i + 1)]
.commit(&blinds[i], gens)
.compress()
})
.collect();
PolyCommitment { C }
}
#[cfg(not(feature = "multicore"))]
fn commit_inner(&self, blinds: &[Scalar], gens: &MultiCommitGens) -> PolyCommitment {
let L_size = blinds.len();
let R_size = self.Z.len() / L_size;
assert_eq!(L_size * R_size, self.Z.len());
let C = (0..L_size)
.map(|i| {
self.Z[R_size * i..R_size * (i + 1)]
.commit(&blinds[i], gens)
.compress()
})
.collect();
PolyCommitment { C }
}
pub fn commit(
&self,
gens: &PolyCommitmentGens,
random_tape: Option<&mut RandomTape>,
) -> (PolyCommitment, PolyCommitmentBlinds) {
let n = self.Z.len();
let ell = self.get_num_vars();
assert_eq!(n, ell.pow2());
let (left_num_vars, right_num_vars) = EqPolynomial::compute_factored_lens(ell);
let L_size = left_num_vars.pow2();
let R_size = right_num_vars.pow2();
assert_eq!(L_size * R_size, n);
let blinds = if let Some(t) = random_tape {
PolyCommitmentBlinds {
blinds: t.random_vector(b"poly_blinds", L_size),
}
} else {
PolyCommitmentBlinds {
blinds: vec![Scalar::zero(); L_size],
}
};
(self.commit_inner(&blinds.blinds, &gens.gens.gens_n), blinds)
}
pub fn bound(&self, L: &[Scalar]) -> Vec<Scalar> {
let (left_num_vars, right_num_vars) = EqPolynomial::compute_factored_lens(self.get_num_vars());
let L_size = left_num_vars.pow2();
let R_size = right_num_vars.pow2();
(0..R_size)
.map(|i| (0..L_size).map(|j| L[j] * self.Z[j * R_size + i]).sum())
.collect()
}
pub fn bound_poly_var_top(&mut self, r: &Scalar) {
let n = self.len() / 2;
for i in 0..n {
self.Z[i] = self.Z[i] + r * (self.Z[i + n] - self.Z[i]);
}
self.num_vars -= 1;
self.len = n;
}
pub fn bound_poly_var_bot(&mut self, r: &Scalar) {
let n = self.len() / 2;
for i in 0..n {
self.Z[i] = self.Z[2 * i] + r * (self.Z[2 * i + 1] - self.Z[2 * i]);
}
self.num_vars -= 1;
self.len = n;
}
// returns Z(r) in O(n) time
pub fn evaluate(&self, r: &[Scalar]) -> Scalar {
// r must have a value for each variable
assert_eq!(r.len(), self.get_num_vars());
let chis = EqPolynomial::new(r.to_vec()).evals();
assert_eq!(chis.len(), self.Z.len());
DotProductProofLog::compute_dotproduct(&self.Z, &chis)
}
fn vec(&self) -> &Vec<Scalar> {
&self.Z
}
pub fn extend(&mut self, other: &DensePolynomial) {
// TODO: allow extension even when some vars are bound
assert_eq!(self.Z.len(), self.len);
let other_vec = other.vec();
assert_eq!(other_vec.len(), self.len);
self.Z.extend(other_vec);
self.num_vars += 1;
self.len *= 2;
assert_eq!(self.Z.len(), self.len);
}
pub fn merge<'a, I>(polys: I) -> DensePolynomial
where
I: IntoIterator<Item = &'a DensePolynomial>,
{
let mut Z: Vec<Scalar> = Vec::new();
for poly in polys.into_iter() {
Z.extend(poly.vec());
}
// pad the polynomial with zero polynomial at the end
Z.resize(Z.len().next_power_of_two(), Scalar::zero());
DensePolynomial::new(Z)
}
pub fn from_usize(Z: &[usize]) -> Self {
DensePolynomial::new(
(0..Z.len())
.map(|i| Scalar::from(Z[i] as u64))
.collect::<Vec<Scalar>>(),
)
}
}
impl Index<usize> for DensePolynomial {
type Output = Scalar;
#[inline(always)]
fn index(&self, _index: usize) -> &Scalar {
&(self.Z[_index])
}
}
impl AppendToTranscript for PolyCommitment {
fn append_to_transcript(&self, label: &'static [u8], transcript: &mut Transcript) {
transcript.append_message(label, b"poly_commitment_begin");
for i in 0..self.C.len() {
transcript.append_point(b"poly_commitment_share", &self.C[i]);
}
transcript.append_message(label, b"poly_commitment_end");
}
}
#[derive(Debug, Serialize, Deserialize)]
pub struct PolyEvalProof {
pub proof: DotProductProofLog,
}
impl PolyEvalProof {
fn protocol_name() -> &'static [u8] {
b"polynomial evaluation proof"
}
pub fn prove(
poly: &DensePolynomial,
blinds_opt: Option<&PolyCommitmentBlinds>,
r: &[Scalar], // point at which the polynomial is evaluated
Zr: &Scalar, // evaluation of \widetilde{Z}(r)
blind_Zr_opt: Option<&Scalar>, // specifies a blind for Zr
gens: &PolyCommitmentGens,
transcript: &mut Transcript,
random_tape: &mut RandomTape,
) -> (PolyEvalProof, CompressedGroup) {
transcript.append_protocol_name(PolyEvalProof::protocol_name());
// assert vectors are of the right size
assert_eq!(poly.get_num_vars(), r.len());
let (left_num_vars, right_num_vars) = EqPolynomial::compute_factored_lens(r.len());
let L_size = left_num_vars.pow2();
let R_size = right_num_vars.pow2();
let default_blinds = PolyCommitmentBlinds {
blinds: vec![Scalar::zero(); L_size],
};
let blinds = blinds_opt.map_or(&default_blinds, |p| p);
assert_eq!(blinds.blinds.len(), L_size);
let zero = Scalar::zero();
let blind_Zr = blind_Zr_opt.map_or(&zero, |p| p);
// compute the L and R vectors
let eq = EqPolynomial::new(r.to_vec());
let (L, R) = eq.compute_factored_evals();
assert_eq!(L.len(), L_size);
assert_eq!(R.len(), R_size);
// compute the vector underneath L*Z and the L*blinds
// compute vector-matrix product between L and Z viewed as a matrix
let LZ = poly.bound(&L);
let LZ_blind: Scalar = (0..L.len()).map(|i| blinds.blinds[i] * L[i]).sum();
// a dot product proof of size R_size
let (proof, _C_LR, C_Zr_prime) = DotProductProofLog::prove(
&gens.gens,
transcript,
random_tape,
&LZ,
&LZ_blind,
&R,
Zr,
blind_Zr,
);
(PolyEvalProof { proof }, C_Zr_prime)
}
pub fn verify(
&self,
gens: &PolyCommitmentGens,
transcript: &mut Transcript,
r: &[Scalar], // point at which the polynomial is evaluated
C_Zr: &CompressedGroup, // commitment to \widetilde{Z}(r)
comm: &PolyCommitment,
) -> Result<(), ProofVerifyError> {
transcript.append_protocol_name(PolyEvalProof::protocol_name());
// compute L and R
let eq = EqPolynomial::new(r.to_vec());
let (L, R) = eq.compute_factored_evals();
// compute a weighted sum of commitments and L
let C_decompressed = comm.C.iter().map(|pt| pt.decompress().unwrap());
let C_LZ = GroupElement::vartime_multiscalar_mul(L, C_decompressed.collect()).compress();
self
.proof
.verify(R.len(), &gens.gens, transcript, &R, &C_LZ, C_Zr)
}
pub fn verify_plain(
&self,
gens: &PolyCommitmentGens,
transcript: &mut Transcript,
r: &[Scalar], // point at which the polynomial is evaluated
Zr: &Scalar, // evaluation \widetilde{Z}(r)
comm: &PolyCommitment,
) -> Result<(), ProofVerifyError> {
// compute a commitment to Zr with a blind of zero
let C_Zr = Zr.commit(&Scalar::zero(), &gens.gens.gens_1).compress();
self.verify(gens, transcript, r, &C_Zr, comm)
}
}
#[cfg(test)]
mod tests {
use super::super::scalar::ScalarFromPrimitives;
use super::*;
use rand_core::OsRng;
fn evaluate_with_LR(Z: &[Scalar], r: &[Scalar]) -> Scalar {
let eq = EqPolynomial::new(r.to_vec());
let (L, R) = eq.compute_factored_evals();
let ell = r.len();
// ensure ell is even
assert!(ell % 2 == 0);
// compute n = 2^\ell
let n = ell.pow2();
// compute m = sqrt(n) = 2^{\ell/2}
let m = n.square_root();
// compute vector-matrix product between L and Z viewed as a matrix
let LZ = (0..m)
.map(|i| (0..m).map(|j| L[j] * Z[j * m + i]).sum())
.collect::<Vec<Scalar>>();
// compute dot product between LZ and R
DotProductProofLog::compute_dotproduct(&LZ, &R)
}
#[test]
fn check_polynomial_evaluation() {
// Z = [1, 2, 1, 4]
let Z = vec![
Scalar::one(),
(2_usize).to_scalar(),
(1_usize).to_scalar(),
(4_usize).to_scalar(),
];
// r = [4,3]
let r = vec![(4_usize).to_scalar(), (3_usize).to_scalar()];
let eval_with_LR = evaluate_with_LR(&Z, &r);
let poly = DensePolynomial::new(Z);
let eval = poly.evaluate(&r);
assert_eq!(eval, (28_usize).to_scalar());
assert_eq!(eval_with_LR, eval);
}
pub fn compute_factored_chis_at_r(r: &[Scalar]) -> (Vec<Scalar>, Vec<Scalar>) {
let mut L: Vec<Scalar> = Vec::new();
let mut R: Vec<Scalar> = Vec::new();
let ell = r.len();
assert!(ell % 2 == 0); // ensure ell is even
let n = ell.pow2();
let m = n.square_root();
// compute row vector L
for i in 0..m {
let mut chi_i = Scalar::one();
for j in 0..ell / 2 {
let bit_j = ((m * i) & (1 << (r.len() - j - 1))) > 0;
if bit_j {
chi_i *= r[j];
} else {
chi_i *= Scalar::one() - r[j];
}
}
L.push(chi_i);
}
// compute column vector R
for i in 0..m {
let mut chi_i = Scalar::one();
for j in ell / 2..ell {
let bit_j = (i & (1 << (r.len() - j - 1))) > 0;
if bit_j {
chi_i *= r[j];
} else {
chi_i *= Scalar::one() - r[j];
}
}
R.push(chi_i);
}
(L, R)
}
pub fn compute_chis_at_r(r: &[Scalar]) -> Vec<Scalar> {
let ell = r.len();
let n = ell.pow2();
let mut chis: Vec<Scalar> = Vec::new();
for i in 0..n {
let mut chi_i = Scalar::one();
for j in 0..r.len() {
let bit_j = (i & (1 << (r.len() - j - 1))) > 0;
if bit_j {
chi_i *= r[j];
} else {
chi_i *= Scalar::one() - r[j];
}
}
chis.push(chi_i);
}
chis
}
pub fn compute_outerproduct(L: Vec<Scalar>, R: Vec<Scalar>) -> Vec<Scalar> {
assert_eq!(L.len(), R.len());
(0..L.len())
.map(|i| (0..R.len()).map(|j| L[i] * R[j]).collect::<Vec<Scalar>>())
.collect::<Vec<Vec<Scalar>>>()
.into_iter()
.flatten()
.collect::<Vec<Scalar>>()
}
#[test]
fn check_memoized_chis() {
let mut csprng: OsRng = OsRng;
let s = 10;
let mut r: Vec<Scalar> = Vec::new();
for _i in 0..s {
r.push(Scalar::random(&mut csprng));
}
let chis = tests::compute_chis_at_r(&r);
let chis_m = EqPolynomial::new(r).evals();
assert_eq!(chis, chis_m);
}
#[test]
fn check_factored_chis() {
let mut csprng: OsRng = OsRng;
let s = 10;
let mut r: Vec<Scalar> = Vec::new();
for _i in 0..s {
r.push(Scalar::random(&mut csprng));
}
let chis = EqPolynomial::new(r.clone()).evals();
let (L, R) = EqPolynomial::new(r).compute_factored_evals();
let O = compute_outerproduct(L, R);
assert_eq!(chis, O);
}
#[test]
fn check_memoized_factored_chis() {
let mut csprng: OsRng = OsRng;
let s = 10;
let mut r: Vec<Scalar> = Vec::new();
for _i in 0..s {
r.push(Scalar::random(&mut csprng));
}
let (L, R) = tests::compute_factored_chis_at_r(&r);
let eq = EqPolynomial::new(r);
let (L2, R2) = eq.compute_factored_evals();
assert_eq!(L, L2);
assert_eq!(R, R2);
}
#[test]
fn check_polynomial_commit() {
let Z = vec![
(1_usize).to_scalar(),
(2_usize).to_scalar(),
(1_usize).to_scalar(),
(4_usize).to_scalar(),
];
let poly = DensePolynomial::new(Z);
// r = [4,3]
let r = vec![(4_usize).to_scalar(), (3_usize).to_scalar()];
let eval = poly.evaluate(&r);
assert_eq!(eval, (28_usize).to_scalar());
let gens = PolyCommitmentGens::new(poly.get_num_vars(), b"test-two");
let (poly_commitment, blinds) = poly.commit(&gens, None);
let mut random_tape = RandomTape::new(b"proof");
let mut prover_transcript = Transcript::new(b"example");
let (proof, C_Zr) = PolyEvalProof::prove(
&poly,
Some(&blinds),
&r,
&eval,
None,
&gens,
&mut prover_transcript,
&mut random_tape,
);
let mut verifier_transcript = Transcript::new(b"example");
assert!(proof
.verify(&gens, &mut verifier_transcript, &r, &C_Zr, &poly_commitment)
.is_ok());
}
}

View File

@@ -0,0 +1,32 @@
use core::fmt::Debug;
use thiserror::Error;
#[derive(Error, Debug)]
pub enum ProofVerifyError {
#[error("Proof verification failed")]
InternalError,
#[error("Compressed group element failed to decompress: {0:?}")]
DecompressionError([u8; 32]),
}
impl Default for ProofVerifyError {
fn default() -> Self {
ProofVerifyError::InternalError
}
}
#[derive(Clone, Debug, Eq, PartialEq)]
pub enum R1CSError {
/// returned if the number of constraints is not a power of 2
NonPowerOfTwoCons,
/// returned if the number of variables is not a power of 2
NonPowerOfTwoVars,
/// returned if a wrong number of inputs in an assignment are supplied
InvalidNumberOfInputs,
/// returned if a wrong number of variables in an assignment are supplied
InvalidNumberOfVars,
/// returned if a [u8;32] does not parse into a valid Scalar in the field of secq256k1
InvalidScalar,
/// returned if the supplied row or col in (row,col,val) tuple is out of range
InvalidIndex,
}

View File

@@ -0,0 +1,138 @@
use secq256k1::{AffinePoint, ProjectivePoint};
use super::errors::ProofVerifyError;
use super::scalar::{Scalar, ScalarBytes, ScalarBytesFromScalar};
use core::ops::{Mul, MulAssign};
use multiexp::multiexp;
pub type GroupElement = secq256k1::AffinePoint;
pub type CompressedGroup = secq256k1::EncodedPoint;
pub trait CompressedGroupExt {
type Group;
fn unpack(&self) -> Result<Self::Group, ProofVerifyError>;
}
impl CompressedGroupExt for CompressedGroup {
type Group = secq256k1::AffinePoint;
fn unpack(&self) -> Result<Self::Group, ProofVerifyError> {
let result = AffinePoint::decompress(*self);
if result.is_some().into() {
return Ok(result.unwrap());
} else {
Err(ProofVerifyError::DecompressionError(
(*self.to_bytes()).try_into().unwrap(),
))
}
}
}
pub trait DecompressEncodedPoint {
fn decompress(&self) -> Option<GroupElement>;
}
impl DecompressEncodedPoint for CompressedGroup {
fn decompress(&self) -> Option<GroupElement> {
Some(self.unpack().unwrap())
}
}
impl<'b> MulAssign<&'b Scalar> for GroupElement {
fn mul_assign(&mut self, scalar: &'b Scalar) {
let result = (self as &GroupElement) * Scalar::decompress_scalar(scalar);
*self = result;
}
}
impl<'a, 'b> Mul<&'b Scalar> for &'a GroupElement {
type Output = GroupElement;
fn mul(self, scalar: &'b Scalar) -> GroupElement {
*self * Scalar::decompress_scalar(scalar)
}
}
impl<'a, 'b> Mul<&'b GroupElement> for &'a Scalar {
type Output = GroupElement;
fn mul(self, point: &'b GroupElement) -> GroupElement {
(*point * Scalar::decompress_scalar(self)).into()
}
}
macro_rules! define_mul_variants {
(LHS = $lhs:ty, RHS = $rhs:ty, Output = $out:ty) => {
impl<'b> Mul<&'b $rhs> for $lhs {
type Output = $out;
fn mul(self, rhs: &'b $rhs) -> $out {
&self * rhs
}
}
impl<'a> Mul<$rhs> for &'a $lhs {
type Output = $out;
fn mul(self, rhs: $rhs) -> $out {
self * &rhs
}
}
impl Mul<$rhs> for $lhs {
type Output = $out;
fn mul(self, rhs: $rhs) -> $out {
&self * &rhs
}
}
};
}
macro_rules! define_mul_assign_variants {
(LHS = $lhs:ty, RHS = $rhs:ty) => {
impl MulAssign<$rhs> for $lhs {
fn mul_assign(&mut self, rhs: $rhs) {
*self *= &rhs;
}
}
};
}
define_mul_assign_variants!(LHS = GroupElement, RHS = Scalar);
define_mul_variants!(LHS = GroupElement, RHS = Scalar, Output = GroupElement);
define_mul_variants!(LHS = Scalar, RHS = GroupElement, Output = GroupElement);
pub trait VartimeMultiscalarMul {
type Scalar;
fn vartime_multiscalar_mul(scalars: Vec<Scalar>, points: Vec<GroupElement>) -> Self;
}
impl VartimeMultiscalarMul for GroupElement {
type Scalar = super::scalar::Scalar;
// TODO Borrow the arguments so we don't have to clone them, as it was in the original implementation
fn vartime_multiscalar_mul(scalars: Vec<Scalar>, points: Vec<GroupElement>) -> Self {
let points: Vec<ProjectivePoint> = points.iter().map(|p| ProjectivePoint::from(p.0)).collect();
let pairs: Vec<(ScalarBytes, ProjectivePoint)> = scalars
.into_iter()
.enumerate()
.map(|(i, s)| (Scalar::decompress_scalar(&s), points[i]))
.collect();
let result = multiexp::<ProjectivePoint>(pairs.as_slice());
AffinePoint(result.to_affine())
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn msm() {
let scalars = vec![Scalar::from(1), Scalar::from(2), Scalar::from(3)];
let points = vec![
GroupElement::generator(),
GroupElement::generator(),
GroupElement::generator(),
];
let result = GroupElement::vartime_multiscalar_mul(scalars, points);
assert_eq!(result, GroupElement::generator() * Scalar::from(6));
}
}

View File

@@ -0,0 +1,748 @@
#![allow(non_snake_case)]
#![allow(clippy::assertions_on_result_states)]
extern crate byteorder;
extern crate core;
extern crate digest;
extern crate merlin;
extern crate rand;
extern crate sha3;
#[cfg(feature = "multicore")]
extern crate rayon;
pub mod commitments;
pub mod dense_mlpoly;
mod errors;
pub mod group;
pub mod math;
pub mod nizk;
mod product_tree;
pub mod r1csinstance;
mod r1csproof;
pub mod random;
pub mod scalar;
mod sparse_mlpoly;
pub mod sumcheck;
mod timer;
pub mod transcript;
mod unipoly;
use core::cmp::max;
use errors::{ProofVerifyError, R1CSError};
use merlin::Transcript;
use r1csinstance::{
R1CSCommitment, R1CSCommitmentGens, R1CSDecommitment, R1CSEvalProof, R1CSInstance,
};
use r1csproof::{R1CSGens, R1CSProof};
use random::RandomTape;
use scalar::Scalar;
use serde::{Deserialize, Serialize};
use timer::Timer;
use transcript::{AppendToTranscript, ProofTranscript};
/// `ComputationCommitment` holds a public preprocessed NP statement (e.g., R1CS)
pub struct ComputationCommitment {
comm: R1CSCommitment,
}
/// `ComputationDecommitment` holds information to decommit `ComputationCommitment`
pub struct ComputationDecommitment {
decomm: R1CSDecommitment,
}
/// `Assignment` holds an assignment of values to either the inputs or variables in an `Instance`
#[derive(Serialize, Deserialize, Clone)]
pub struct Assignment {
pub assignment: Vec<Scalar>,
}
impl Assignment {
/// Constructs a new `Assignment` from a vector
pub fn new(assignment: &[[u8; 32]]) -> Result<Assignment, R1CSError> {
let bytes_to_scalar = |vec: &[[u8; 32]]| -> Result<Vec<Scalar>, R1CSError> {
let mut vec_scalar: Vec<Scalar> = Vec::new();
for v in vec {
let val = Scalar::from_bytes(v);
if val.is_some().unwrap_u8() == 1 {
vec_scalar.push(val.unwrap());
} else {
return Err(R1CSError::InvalidScalar);
}
}
Ok(vec_scalar)
};
let assignment_scalar = bytes_to_scalar(assignment);
// check for any parsing errors
if assignment_scalar.is_err() {
return Err(R1CSError::InvalidScalar);
}
Ok(Assignment {
assignment: assignment_scalar.unwrap(),
})
}
/// pads Assignment to the specified length
fn pad(&self, len: usize) -> VarsAssignment {
// check that the new length is higher than current length
assert!(len > self.assignment.len());
let padded_assignment = {
let mut padded_assignment = self.assignment.clone();
padded_assignment.extend(vec![Scalar::zero(); len - self.assignment.len()]);
padded_assignment
};
VarsAssignment {
assignment: padded_assignment,
}
}
}
/// `VarsAssignment` holds an assignment of values to variables in an `Instance`
pub type VarsAssignment = Assignment;
/// `InputsAssignment` holds an assignment of values to variables in an `Instance`
pub type InputsAssignment = Assignment;
/// `Instance` holds the description of R1CS matrices and a hash of the matrices
#[derive(Serialize, Deserialize)]
pub struct Instance {
/// R1CS instance
pub inst: R1CSInstance,
pub digest: Vec<u8>,
}
impl Instance {
/// Constructs a new `Instance` and an associated satisfying assignment
pub fn new(
num_cons: usize,
num_vars: usize,
num_inputs: usize,
A: &[(usize, usize, [u8; 32])],
B: &[(usize, usize, [u8; 32])],
C: &[(usize, usize, [u8; 32])],
) -> Result<Instance, R1CSError> {
let (num_vars_padded, num_cons_padded) = {
let num_vars_padded = {
let mut num_vars_padded = num_vars;
// ensure that num_inputs + 1 <= num_vars
num_vars_padded = max(num_vars_padded, num_inputs + 1);
// ensure that num_vars_padded a power of two
if num_vars_padded.next_power_of_two() != num_vars_padded {
num_vars_padded = num_vars_padded.next_power_of_two();
}
num_vars_padded
};
let num_cons_padded = {
let mut num_cons_padded = num_cons;
// ensure that num_cons_padded is at least 2
if num_cons_padded == 0 || num_cons_padded == 1 {
num_cons_padded = 2;
}
// ensure that num_cons_padded is power of 2
if num_cons.next_power_of_two() != num_cons {
num_cons_padded = num_cons.next_power_of_two();
}
num_cons_padded
};
(num_vars_padded, num_cons_padded)
};
let bytes_to_scalar =
|tups: &[(usize, usize, [u8; 32])]| -> Result<Vec<(usize, usize, Scalar)>, R1CSError> {
let mut mat: Vec<(usize, usize, Scalar)> = Vec::new();
for &(row, col, val_bytes) in tups {
// row must be smaller than num_cons
if row >= num_cons {
return Err(R1CSError::InvalidIndex);
}
// col must be smaller than num_vars + 1 + num_inputs
if col >= num_vars + 1 + num_inputs {
return Err(R1CSError::InvalidIndex);
}
let val = Scalar::from_bytes(&val_bytes);
if val.is_some().unwrap_u8() == 1 {
// if col >= num_vars, it means that it is referencing a 1 or input in the satisfying
// assignment
if col >= num_vars {
mat.push((row, col + num_vars_padded - num_vars, val.unwrap()));
} else {
mat.push((row, col, val.unwrap()));
}
} else {
return Err(R1CSError::InvalidScalar);
}
}
// pad with additional constraints up until num_cons_padded if the original constraints were 0 or 1
// we do not need to pad otherwise because the dummy constraints are implicit in the sum-check protocol
if num_cons == 0 || num_cons == 1 {
for i in tups.len()..num_cons_padded {
mat.push((i, num_vars, Scalar::zero()));
}
}
Ok(mat)
};
let A_scalar = bytes_to_scalar(A);
if A_scalar.is_err() {
return Err(A_scalar.err().unwrap());
}
let B_scalar = bytes_to_scalar(B);
if B_scalar.is_err() {
return Err(B_scalar.err().unwrap());
}
let C_scalar = bytes_to_scalar(C);
if C_scalar.is_err() {
return Err(C_scalar.err().unwrap());
}
let inst = R1CSInstance::new(
num_cons_padded,
num_vars_padded,
num_inputs,
&A_scalar.unwrap(),
&B_scalar.unwrap(),
&C_scalar.unwrap(),
);
let digest = inst.get_digest();
Ok(Instance { inst, digest })
}
/// Checks if a given R1CSInstance is satisfiable with a given variables and inputs assignments
pub fn is_sat(
&self,
vars: &VarsAssignment,
inputs: &InputsAssignment,
) -> Result<bool, R1CSError> {
if vars.assignment.len() > self.inst.get_num_vars() {
return Err(R1CSError::InvalidNumberOfInputs);
}
if inputs.assignment.len() != self.inst.get_num_inputs() {
return Err(R1CSError::InvalidNumberOfInputs);
}
// we might need to pad variables
let padded_vars = {
let num_padded_vars = self.inst.get_num_vars();
let num_vars = vars.assignment.len();
if num_padded_vars > num_vars {
vars.pad(num_padded_vars)
} else {
vars.clone()
}
};
Ok(self
.inst
.is_sat(&padded_vars.assignment, &inputs.assignment))
}
/// Constructs a new synthetic R1CS `Instance` and an associated satisfying assignment
pub fn produce_synthetic_r1cs(
num_cons: usize,
num_vars: usize,
num_inputs: usize,
) -> (Instance, VarsAssignment, InputsAssignment) {
let (inst, vars, inputs) =
R1CSInstance::produce_synthetic_r1cs(num_cons, num_vars, num_inputs);
let digest = inst.get_digest();
(
Instance { inst, digest },
VarsAssignment { assignment: vars },
InputsAssignment { assignment: inputs },
)
}
}
/// `SNARKGens` holds public parameters for producing and verifying proofs with the Spartan SNARK
pub struct SNARKGens {
gens_r1cs_sat: R1CSGens,
gens_r1cs_eval: R1CSCommitmentGens,
}
impl SNARKGens {
/// Constructs a new `SNARKGens` given the size of the R1CS statement
/// `num_nz_entries` specifies the maximum number of non-zero entries in any of the three R1CS matrices
pub fn new(num_cons: usize, num_vars: usize, num_inputs: usize, num_nz_entries: usize) -> Self {
let num_vars_padded = {
let mut num_vars_padded = max(num_vars, num_inputs + 1);
if num_vars_padded != num_vars_padded.next_power_of_two() {
num_vars_padded = num_vars_padded.next_power_of_two();
}
num_vars_padded
};
let gens_r1cs_sat = R1CSGens::new(b"gens_r1cs_sat", num_cons, num_vars_padded);
let gens_r1cs_eval = R1CSCommitmentGens::new(
b"gens_r1cs_eval",
num_cons,
num_vars_padded,
num_inputs,
num_nz_entries,
);
SNARKGens {
gens_r1cs_sat,
gens_r1cs_eval,
}
}
}
/// `SNARK` holds a proof produced by Spartan SNARK
#[derive(Serialize, Deserialize, Debug)]
pub struct SNARK {
r1cs_sat_proof: R1CSProof,
inst_evals: (Scalar, Scalar, Scalar),
r1cs_eval_proof: R1CSEvalProof,
}
impl SNARK {
fn protocol_name() -> &'static [u8] {
b"Spartan SNARK proof"
}
/// A public computation to create a commitment to an R1CS instance
pub fn encode(
inst: &Instance,
gens: &SNARKGens,
) -> (ComputationCommitment, ComputationDecommitment) {
let timer_encode = Timer::new("SNARK::encode");
let (comm, decomm) = inst.inst.commit(&gens.gens_r1cs_eval);
timer_encode.stop();
(
ComputationCommitment { comm },
ComputationDecommitment { decomm },
)
}
/// A method to produce a SNARK proof of the satisfiability of an R1CS instance
pub fn prove(
inst: &Instance,
comm: &ComputationCommitment,
decomm: &ComputationDecommitment,
vars: VarsAssignment,
inputs: &InputsAssignment,
gens: &SNARKGens,
transcript: &mut Transcript,
) -> Self {
let timer_prove = Timer::new("SNARK::prove");
// we create a Transcript object seeded with a random Scalar
// to aid the prover produce its randomness
let mut random_tape = RandomTape::new(b"proof");
transcript.append_protocol_name(SNARK::protocol_name());
comm.comm.append_to_transcript(b"comm", transcript);
let (r1cs_sat_proof, rx, ry) = {
let (proof, rx, ry) = {
// we might need to pad variables
let padded_vars = {
let num_padded_vars = inst.inst.get_num_vars();
let num_vars = vars.assignment.len();
if num_padded_vars > num_vars {
vars.pad(num_padded_vars)
} else {
vars
}
};
R1CSProof::prove(
&inst.inst,
padded_vars.assignment,
&inputs.assignment,
&gens.gens_r1cs_sat,
transcript,
&mut random_tape,
)
};
let proof_encoded: Vec<u8> = bincode::serialize(&proof).unwrap();
Timer::print(&format!("len_r1cs_sat_proof {:?}", proof_encoded.len()));
(proof, rx, ry)
};
// We send evaluations of A, B, C at r = (rx, ry) as claims
// to enable the verifier complete the first sum-check
let timer_eval = Timer::new("eval_sparse_polys");
let inst_evals = {
let (Ar, Br, Cr) = inst.inst.evaluate(&rx, &ry);
Ar.append_to_transcript(b"Ar_claim", transcript);
Br.append_to_transcript(b"Br_claim", transcript);
Cr.append_to_transcript(b"Cr_claim", transcript);
(Ar, Br, Cr)
};
timer_eval.stop();
let r1cs_eval_proof = {
let proof = R1CSEvalProof::prove(
&decomm.decomm,
&rx,
&ry,
&inst_evals,
&gens.gens_r1cs_eval,
transcript,
&mut random_tape,
);
let proof_encoded: Vec<u8> = bincode::serialize(&proof).unwrap();
Timer::print(&format!("len_r1cs_eval_proof {:?}", proof_encoded.len()));
proof
};
timer_prove.stop();
SNARK {
r1cs_sat_proof,
inst_evals,
r1cs_eval_proof,
}
}
/// A method to verify the SNARK proof of the satisfiability of an R1CS instance
pub fn verify(
&self,
comm: &ComputationCommitment,
input: &InputsAssignment,
transcript: &mut Transcript,
gens: &SNARKGens,
) -> Result<(), ProofVerifyError> {
let timer_verify = Timer::new("SNARK::verify");
transcript.append_protocol_name(SNARK::protocol_name());
// append a commitment to the computation to the transcript
comm.comm.append_to_transcript(b"comm", transcript);
let timer_sat_proof = Timer::new("verify_sat_proof");
assert_eq!(input.assignment.len(), comm.comm.get_num_inputs());
let (rx, ry) = self.r1cs_sat_proof.verify(
comm.comm.get_num_vars(),
comm.comm.get_num_cons(),
&input.assignment,
&self.inst_evals,
transcript,
&gens.gens_r1cs_sat,
)?;
timer_sat_proof.stop();
let timer_eval_proof = Timer::new("verify_eval_proof");
let (Ar, Br, Cr) = &self.inst_evals;
Ar.append_to_transcript(b"Ar_claim", transcript);
Br.append_to_transcript(b"Br_claim", transcript);
Cr.append_to_transcript(b"Cr_claim", transcript);
self.r1cs_eval_proof.verify(
&comm.comm,
&rx,
&ry,
&self.inst_evals,
&gens.gens_r1cs_eval,
transcript,
)?;
timer_eval_proof.stop();
timer_verify.stop();
Ok(())
}
}
/// `NIZKGens` holds public parameters for producing and verifying proofs with the Spartan NIZK
pub struct NIZKGens {
pub gens_r1cs_sat: R1CSGens,
}
impl NIZKGens {
/// Constructs a new `NIZKGens` given the size of the R1CS statement
pub fn new(num_cons: usize, num_vars: usize, num_inputs: usize) -> Self {
let num_vars_padded = {
let mut num_vars_padded = max(num_vars, num_inputs + 1);
if num_vars_padded != num_vars_padded.next_power_of_two() {
num_vars_padded = num_vars_padded.next_power_of_two();
}
num_vars_padded
};
let gens_r1cs_sat = R1CSGens::new(b"gens_r1cs_sat", num_cons, num_vars_padded);
NIZKGens { gens_r1cs_sat }
}
}
/// `NIZK` holds a proof produced by Spartan NIZK
#[derive(Serialize, Deserialize, Debug)]
pub struct NIZK {
pub r1cs_sat_proof: R1CSProof,
pub r: (Vec<Scalar>, Vec<Scalar>),
}
impl NIZK {
fn protocol_name() -> &'static [u8] {
b"Spartan NIZK proof"
}
/// A method to produce a NIZK proof of the satisfiability of an R1CS instance
pub fn prove(
inst: &Instance,
vars: VarsAssignment,
input: &InputsAssignment,
gens: &NIZKGens,
transcript: &mut Transcript,
) -> Self {
let timer_prove = Timer::new("NIZK::prove");
// we create a Transcript object seeded with a random Scalar
// to aid the prover produce its randomness
let mut random_tape = RandomTape::new(b"proof");
transcript.append_protocol_name(NIZK::protocol_name());
transcript.append_message(b"R1CSInstanceDigest", &inst.digest);
let (r1cs_sat_proof, rx, ry) = {
// we might need to pad variables
let padded_vars = {
let num_padded_vars = inst.inst.get_num_vars();
let num_vars = vars.assignment.len();
if num_padded_vars > num_vars {
vars.pad(num_padded_vars)
} else {
vars
}
};
let (proof, rx, ry) = R1CSProof::prove(
&inst.inst,
padded_vars.assignment,
&input.assignment,
&gens.gens_r1cs_sat,
transcript,
&mut random_tape,
);
let proof_encoded: Vec<u8> = bincode::serialize(&proof).unwrap();
Timer::print(&format!("len_r1cs_sat_proof {:?}", proof_encoded.len()));
(proof, rx, ry)
};
timer_prove.stop();
NIZK {
r1cs_sat_proof,
r: (rx, ry),
}
}
/// A method to verify a NIZK proof of the satisfiability of an R1CS instance
pub fn verify(
&self,
inst: &Instance,
input: &InputsAssignment,
transcript: &mut Transcript,
gens: &NIZKGens,
) -> Result<(), ProofVerifyError> {
let timer_verify = Timer::new("NIZK::verify");
transcript.append_protocol_name(NIZK::protocol_name());
transcript.append_message(b"R1CSInstanceDigest", &inst.digest);
// We send evaluations of A, B, C at r = (rx, ry) as claims
// to enable the verifier complete the first sum-check
let timer_eval = Timer::new("eval_sparse_polys");
let (claimed_rx, claimed_ry) = &self.r;
let inst_evals = inst.inst.evaluate(claimed_rx, claimed_ry);
timer_eval.stop();
let timer_sat_proof = Timer::new("verify_sat_proof");
assert_eq!(input.assignment.len(), inst.inst.get_num_inputs());
let (rx, ry) = self.r1cs_sat_proof.verify(
inst.inst.get_num_vars(),
inst.inst.get_num_cons(),
&input.assignment,
&inst_evals,
transcript,
&gens.gens_r1cs_sat,
)?;
// verify if claimed rx and ry are correct
assert_eq!(rx, *claimed_rx);
assert_eq!(ry, *claimed_ry);
timer_sat_proof.stop();
timer_verify.stop();
Ok(())
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
pub fn check_snark() {
let num_vars = 256;
let num_cons = num_vars;
let num_inputs = 10;
// produce public generators
let gens = SNARKGens::new(num_cons, num_vars, num_inputs, num_cons);
// produce a synthetic R1CSInstance
let (inst, vars, inputs) = Instance::produce_synthetic_r1cs(num_cons, num_vars, num_inputs);
// create a commitment to R1CSInstance
let (comm, decomm) = SNARK::encode(&inst, &gens);
// produce a proof
let mut prover_transcript = Transcript::new(b"example");
let proof = SNARK::prove(
&inst,
&comm,
&decomm,
vars,
&inputs,
&gens,
&mut prover_transcript,
);
// verify the proof
let mut verifier_transcript = Transcript::new(b"example");
assert!(proof
.verify(&comm, &inputs, &mut verifier_transcript, &gens)
.is_ok());
}
#[test]
pub fn check_r1cs_invalid_index() {
let num_cons = 4;
let num_vars = 8;
let num_inputs = 1;
let zero: [u8; 32] = [
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0,
];
let A = vec![(0, 0, zero)];
let B = vec![(100, 1, zero)];
let C = vec![(1, 1, zero)];
let inst = Instance::new(num_cons, num_vars, num_inputs, &A, &B, &C);
assert!(inst.is_err());
assert_eq!(inst.err(), Some(R1CSError::InvalidIndex));
}
#[test]
pub fn check_r1cs_invalid_scalar() {
let num_cons = 4;
let num_vars = 8;
let num_inputs = 1;
let zero: [u8; 32] = [
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0,
];
let larger_than_mod = [255; 32];
let A = vec![(0, 0, zero)];
let B = vec![(1, 1, larger_than_mod)];
let C = vec![(1, 1, zero)];
let inst = Instance::new(num_cons, num_vars, num_inputs, &A, &B, &C);
assert!(inst.is_err());
assert_eq!(inst.err(), Some(R1CSError::InvalidScalar));
}
#[test]
fn test_padded_constraints() {
// parameters of the R1CS instance
let num_cons = 1;
let num_vars = 0;
let num_inputs = 3;
let num_non_zero_entries = 3;
// We will encode the above constraints into three matrices, where
// the coefficients in the matrix are in the little-endian byte order
let mut A: Vec<(usize, usize, [u8; 32])> = Vec::new(); // <row, column, value>
let mut B: Vec<(usize, usize, [u8; 32])> = Vec::new();
let mut C: Vec<(usize, usize, [u8; 32])> = Vec::new();
// Create a^2 + b + 13
A.push((0, num_vars + 2, Scalar::one().to_bytes())); // 1*a
B.push((0, num_vars + 2, Scalar::one().to_bytes())); // 1*a
C.push((0, num_vars + 1, Scalar::one().to_bytes())); // 1*z
C.push((0, num_vars, (-Scalar::from(13u64)).to_bytes())); // -13*1
C.push((0, num_vars + 3, (-Scalar::one()).to_bytes())); // -1*b
// Var Assignments (Z_0 = 16 is the only output)
let vars = vec![Scalar::zero().to_bytes(); num_vars];
// create an InputsAssignment (a = 1, b = 2)
let mut inputs = vec![Scalar::zero().to_bytes(); num_inputs];
inputs[0] = Scalar::from(16u64).to_bytes();
inputs[1] = Scalar::from(1u64).to_bytes();
inputs[2] = Scalar::from(2u64).to_bytes();
let assignment_inputs = InputsAssignment::new(&inputs).unwrap();
let assignment_vars = VarsAssignment::new(&vars).unwrap();
// Check if instance is satisfiable
let inst = Instance::new(num_cons, num_vars, num_inputs, &A, &B, &C).unwrap();
let res = inst.is_sat(&assignment_vars, &assignment_inputs);
assert!(res.unwrap(), "should be satisfied");
// SNARK public params
let gens = SNARKGens::new(num_cons, num_vars, num_inputs, num_non_zero_entries);
// create a commitment to the R1CS instance
let (comm, decomm) = SNARK::encode(&inst, &gens);
// produce a SNARK
let mut prover_transcript = Transcript::new(b"snark_example");
let proof = SNARK::prove(
&inst,
&comm,
&decomm,
assignment_vars.clone(),
&assignment_inputs,
&gens,
&mut prover_transcript,
);
// verify the SNARK
let mut verifier_transcript = Transcript::new(b"snark_example");
assert!(proof
.verify(&comm, &assignment_inputs, &mut verifier_transcript, &gens)
.is_ok());
// NIZK public params
let gens = NIZKGens::new(num_cons, num_vars, num_inputs);
// produce a NIZK
let mut prover_transcript = Transcript::new(b"nizk_example");
let proof = NIZK::prove(
&inst,
assignment_vars,
&assignment_inputs,
&gens,
&mut prover_transcript,
);
// verify the NIZK
let mut verifier_transcript = Transcript::new(b"nizk_example");
assert!(proof
.verify(&inst, &assignment_inputs, &mut verifier_transcript, &gens)
.is_ok());
}
}

View File

@@ -0,0 +1,36 @@
pub trait Math {
fn square_root(self) -> usize;
fn pow2(self) -> usize;
fn get_bits(self, num_bits: usize) -> Vec<bool>;
fn log_2(self) -> usize;
}
impl Math for usize {
#[inline]
fn square_root(self) -> usize {
(self as f64).sqrt() as usize
}
#[inline]
fn pow2(self) -> usize {
let base: usize = 2;
base.pow(self as u32)
}
/// Returns the num_bits from n in a canonical order
fn get_bits(self, num_bits: usize) -> Vec<bool> {
(0..num_bits)
.map(|shift_amount| ((self & (1 << (num_bits - shift_amount - 1))) > 0))
.collect::<Vec<bool>>()
}
fn log_2(self) -> usize {
assert_ne!(self, 0);
if self.is_power_of_two() {
(1usize.leading_zeros() - self.leading_zeros()) as usize
} else {
(0usize.leading_zeros() - self.leading_zeros()) as usize
}
}
}

View File

@@ -0,0 +1,267 @@
//! This module is an adaptation of code from the bulletproofs crate.
//! See NOTICE.md for more details
#![allow(non_snake_case)]
#![allow(clippy::type_complexity)]
#![allow(clippy::too_many_arguments)]
use super::super::errors::ProofVerifyError;
use super::super::group::{CompressedGroup, GroupElement, VartimeMultiscalarMul};
use super::super::math::Math;
use super::super::scalar::Scalar;
use super::super::transcript::ProofTranscript;
use crate::group::DecompressEncodedPoint;
use core::iter;
use merlin::Transcript;
use serde::{Deserialize, Serialize};
#[derive(Debug, Serialize, Deserialize)]
pub struct BulletReductionProof {
pub L_vec: Vec<CompressedGroup>,
pub R_vec: Vec<CompressedGroup>,
}
impl BulletReductionProof {
/// Create an inner-product proof.
///
/// The proof is created with respect to the bases \\(G\\).
///
/// The `transcript` is passed in as a parameter so that the
/// challenges depend on the *entire* transcript (including parent
/// protocols).
///
/// The lengths of the vectors must all be the same, and must all be
/// either 0 or a power of 2.
pub fn prove(
transcript: &mut Transcript,
Q: &GroupElement,
G_vec: &[GroupElement],
H: &GroupElement,
a_vec: &[Scalar],
b_vec: &[Scalar],
blind: &Scalar,
blinds_vec: &[(Scalar, Scalar)],
) -> (
BulletReductionProof,
GroupElement,
Scalar,
Scalar,
GroupElement,
Scalar,
) {
// Create slices G, H, a, b backed by their respective
// vectors. This lets us reslice as we compress the lengths
// of the vectors in the main loop below.
let mut G = &mut G_vec.to_owned()[..];
let mut a = &mut a_vec.to_owned()[..];
let mut b = &mut b_vec.to_owned()[..];
// All of the input vectors must have a length that is a power of two.
let mut n = G.len();
assert!(n.is_power_of_two());
let lg_n = n.log_2();
// All of the input vectors must have the same length.
assert_eq!(G.len(), n);
assert_eq!(a.len(), n);
assert_eq!(b.len(), n);
assert_eq!(blinds_vec.len(), 2 * lg_n);
let mut L_vec = Vec::with_capacity(lg_n);
let mut R_vec = Vec::with_capacity(lg_n);
let mut blinds_iter = blinds_vec.iter();
let mut blind_fin = *blind;
while n != 1 {
n /= 2;
let (a_L, a_R) = a.split_at_mut(n);
let (b_L, b_R) = b.split_at_mut(n);
let (G_L, G_R) = G.split_at_mut(n);
let c_L = inner_product(a_L, b_R);
let c_R = inner_product(a_R, b_L);
let (blind_L, blind_R) = blinds_iter.next().unwrap();
let L = GroupElement::vartime_multiscalar_mul(
a_L
.iter()
.chain(iter::once(&c_L))
.chain(iter::once(blind_L))
.map(|s| *s)
.collect(),
G_R
.iter()
.chain(iter::once(Q))
.chain(iter::once(H))
.map(|s| *s)
.collect(),
);
let R = GroupElement::vartime_multiscalar_mul(
a_R
.iter()
.chain(iter::once(&c_R))
.chain(iter::once(blind_R))
.map(|s| *s)
.collect(),
G_L
.iter()
.chain(iter::once(Q))
.chain(iter::once(H))
.map(|s| *s)
.collect(),
);
transcript.append_point(b"L", &L.compress());
transcript.append_point(b"R", &R.compress());
let u = transcript.challenge_scalar(b"u");
let u_inv = u.invert().unwrap();
for i in 0..n {
a_L[i] = a_L[i] * u + u_inv * a_R[i];
b_L[i] = b_L[i] * u_inv + u * b_R[i];
G_L[i] =
GroupElement::vartime_multiscalar_mul([u_inv, u].to_vec(), [G_L[i], G_R[i]].to_vec());
}
blind_fin = blind_fin + blind_L * u * u + blind_R * u_inv * u_inv;
L_vec.push(L.compress());
R_vec.push(R.compress());
a = a_L;
b = b_L;
G = G_L;
}
let Gamma_hat = GroupElement::vartime_multiscalar_mul(
[a[0], a[0] * b[0], blind_fin].to_vec(),
[G[0], *Q, *H].to_vec(),
);
(
BulletReductionProof { L_vec, R_vec },
Gamma_hat,
a[0],
b[0],
G[0],
blind_fin,
)
}
/// Computes three vectors of verification scalars \\([u\_{i}^{2}]\\), \\([u\_{i}^{-2}]\\) and \\([s\_{i}]\\) for combined multiscalar multiplication
/// in a parent protocol. See [inner product protocol notes](index.html#verification-equation) for details.
/// The verifier must provide the input length \\(n\\) explicitly to avoid unbounded allocation within the inner product proof.
fn verification_scalars(
&self,
n: usize,
transcript: &mut Transcript,
) -> Result<(Vec<Scalar>, Vec<Scalar>, Vec<Scalar>), ProofVerifyError> {
let lg_n = self.L_vec.len();
if lg_n >= 32 {
// 4 billion multiplications should be enough for anyone
// and this check prevents overflow in 1<<lg_n below.
return Err(ProofVerifyError::InternalError);
}
if n != (1 << lg_n) {
return Err(ProofVerifyError::InternalError);
}
// 1. Recompute x_k,...,x_1 based on the proof transcript
let mut challenges = Vec::with_capacity(lg_n);
for (L, R) in self.L_vec.iter().zip(self.R_vec.iter()) {
transcript.append_point(b"L", L);
transcript.append_point(b"R", R);
challenges.push(transcript.challenge_scalar(b"u"));
}
// 2. Compute 1/(u_k...u_1) and 1/u_k, ..., 1/u_1
let mut challenges_inv = challenges.clone();
let allinv = Scalar::batch_invert(&mut challenges_inv);
// 3. Compute u_i^2 and (1/u_i)^2
for i in 0..lg_n {
challenges[i] = challenges[i].square();
challenges_inv[i] = challenges_inv[i].square();
}
let challenges_sq = challenges;
let challenges_inv_sq = challenges_inv;
// 4. Compute s values inductively.
let mut s = Vec::with_capacity(n);
s.push(allinv);
for i in 1..n {
let lg_i = (32 - 1 - (i as u32).leading_zeros()) as usize;
let k = 1 << lg_i;
// The challenges are stored in "creation order" as [u_k,...,u_1],
// so u_{lg(i)+1} = is indexed by (lg_n-1) - lg_i
let u_lg_i_sq = challenges_sq[(lg_n - 1) - lg_i];
s.push(s[i - k] * u_lg_i_sq);
}
Ok((challenges_sq, challenges_inv_sq, s))
}
/// This method is for testing that proof generation work,
/// but for efficiency the actual protocols would use `verification_scalars`
/// method to combine inner product verification with other checks
/// in a single multiscalar multiplication.
pub fn verify(
&self,
n: usize,
a: &[Scalar],
transcript: &mut Transcript,
Gamma: &GroupElement,
G: &[GroupElement],
) -> Result<(GroupElement, GroupElement, Scalar), ProofVerifyError> {
let (u_sq, u_inv_sq, s) = self.verification_scalars(n, transcript)?;
let Ls = self
.L_vec
.iter()
.map(|p| p.decompress().ok_or(ProofVerifyError::InternalError))
.collect::<Result<Vec<_>, _>>()?;
let Rs = self
.R_vec
.iter()
.map(|p| p.decompress().ok_or(ProofVerifyError::InternalError))
.collect::<Result<Vec<_>, _>>()?;
let G_hat = GroupElement::vartime_multiscalar_mul(s.clone(), G.to_vec());
let a_hat = inner_product(a, &s);
let Gamma_hat = GroupElement::vartime_multiscalar_mul(
u_sq
.iter()
.chain(u_inv_sq.iter())
.chain(iter::once(&Scalar::one()))
.map(|s| *s)
.collect(),
Ls.iter()
.chain(Rs.iter())
.chain(iter::once(Gamma))
.map(|p| *p)
.collect(),
);
Ok((G_hat, Gamma_hat, a_hat))
}
}
/// Computes an inner product of two vectors
/// \\[
/// {\langle {\mathbf{a}}, {\mathbf{b}} \rangle} = \sum\_{i=0}^{n-1} a\_i \cdot b\_i.
/// \\]
/// Panics if the lengths of \\(\mathbf{a}\\) and \\(\mathbf{b}\\) are not equal.
pub fn inner_product(a: &[Scalar], b: &[Scalar]) -> Scalar {
assert!(
a.len() == b.len(),
"inner_product(a,b): lengths of vectors do not match"
);
let mut out = Scalar::zero();
for i in 0..a.len() {
out += a[i] * b[i];
}
out
}

View File

@@ -0,0 +1,735 @@
#![allow(clippy::too_many_arguments)]
use super::commitments::{Commitments, MultiCommitGens};
use super::errors::ProofVerifyError;
use super::group::{CompressedGroup, CompressedGroupExt};
use super::math::Math;
use super::random::RandomTape;
use super::scalar::Scalar;
use super::transcript::{AppendToTranscript, ProofTranscript};
use crate::group::DecompressEncodedPoint;
use merlin::Transcript;
use serde::{Deserialize, Serialize};
mod bullet;
pub use bullet::BulletReductionProof;
#[derive(Serialize, Deserialize, Debug)]
pub struct KnowledgeProof {
pub alpha: CompressedGroup,
pub z1: Scalar,
pub z2: Scalar,
}
impl KnowledgeProof {
fn protocol_name() -> &'static [u8] {
b"knowledge proof"
}
pub fn prove(
gens_n: &MultiCommitGens,
transcript: &mut Transcript,
random_tape: &mut RandomTape,
x: &Scalar,
r: &Scalar,
) -> (KnowledgeProof, CompressedGroup) {
transcript.append_protocol_name(KnowledgeProof::protocol_name());
// produce two random Scalars
let t1 = random_tape.random_scalar(b"t1");
let t2 = random_tape.random_scalar(b"t2");
let C = x.commit(r, gens_n).compress();
C.append_to_transcript(b"C", transcript);
let alpha = t1.commit(&t2, gens_n).compress();
alpha.append_to_transcript(b"alpha", transcript);
let c = transcript.challenge_scalar(b"c");
let z1 = x * c + t1;
let z2 = r * c + t2;
(KnowledgeProof { alpha, z1, z2 }, C)
}
pub fn verify(
&self,
gens_n: &MultiCommitGens,
transcript: &mut Transcript,
C: &CompressedGroup,
) -> Result<(), ProofVerifyError> {
transcript.append_protocol_name(KnowledgeProof::protocol_name());
C.append_to_transcript(b"C", transcript);
self.alpha.append_to_transcript(b"alpha", transcript);
let c = transcript.challenge_scalar(b"c");
let lhs = self.z1.commit(&self.z2, gens_n).compress();
let rhs = (c * C.unpack()? + self.alpha.unpack()?).compress();
if lhs == rhs {
Ok(())
} else {
Err(ProofVerifyError::InternalError)
}
}
}
#[derive(Serialize, Deserialize, Debug)]
pub struct EqualityProof {
pub alpha: CompressedGroup,
pub z: Scalar,
}
impl EqualityProof {
fn protocol_name() -> &'static [u8] {
b"equality proof"
}
pub fn prove(
gens_n: &MultiCommitGens,
transcript: &mut Transcript,
random_tape: &mut RandomTape,
v1: &Scalar,
s1: &Scalar,
v2: &Scalar,
s2: &Scalar,
) -> (EqualityProof, CompressedGroup, CompressedGroup) {
transcript.append_protocol_name(EqualityProof::protocol_name());
// produce a random Scalar
let r = random_tape.random_scalar(b"r");
let C1 = v1.commit(s1, gens_n).compress();
C1.append_to_transcript(b"C1", transcript);
let C2 = v2.commit(s2, gens_n).compress();
C2.append_to_transcript(b"C2", transcript);
let alpha = (r * gens_n.h).compress();
alpha.append_to_transcript(b"alpha", transcript);
let c = transcript.challenge_scalar(b"c");
let z = c * (s1 - s2) + r;
(EqualityProof { alpha, z }, C1, C2)
}
pub fn verify(
&self,
gens_n: &MultiCommitGens,
transcript: &mut Transcript,
C1: &CompressedGroup,
C2: &CompressedGroup,
) -> Result<(), ProofVerifyError> {
transcript.append_protocol_name(EqualityProof::protocol_name());
C1.append_to_transcript(b"C1", transcript);
C2.append_to_transcript(b"C2", transcript);
self.alpha.append_to_transcript(b"alpha", transcript);
let c = transcript.challenge_scalar(b"c");
let rhs = {
let C = C1.unpack()? - C2.unpack()?;
(c * C + self.alpha.unpack()?).compress()
};
let lhs = (self.z * gens_n.h).compress();
if lhs == rhs {
Ok(())
} else {
Err(ProofVerifyError::InternalError)
}
}
}
#[derive(Serialize, Deserialize, Debug)]
pub struct ProductProof {
pub alpha: CompressedGroup,
pub beta: CompressedGroup,
pub delta: CompressedGroup,
pub z: [Scalar; 5],
}
impl ProductProof {
fn protocol_name() -> &'static [u8] {
b"product proof"
}
pub fn prove(
gens_n: &MultiCommitGens,
transcript: &mut Transcript,
random_tape: &mut RandomTape,
x: &Scalar,
rX: &Scalar,
y: &Scalar,
rY: &Scalar,
z: &Scalar,
rZ: &Scalar,
) -> (
ProductProof,
CompressedGroup,
CompressedGroup,
CompressedGroup,
) {
transcript.append_protocol_name(ProductProof::protocol_name());
// produce five random Scalar
let b1 = random_tape.random_scalar(b"b1");
let b2 = random_tape.random_scalar(b"b2");
let b3 = random_tape.random_scalar(b"b3");
let b4 = random_tape.random_scalar(b"b4");
let b5 = random_tape.random_scalar(b"b5");
let X = x.commit(rX, gens_n).compress();
X.append_to_transcript(b"X", transcript);
let Y = y.commit(rY, gens_n).compress();
Y.append_to_transcript(b"Y", transcript);
let Z = z.commit(rZ, gens_n).compress();
Z.append_to_transcript(b"Z", transcript);
let alpha = b1.commit(&b2, gens_n).compress();
alpha.append_to_transcript(b"alpha", transcript);
let beta = b3.commit(&b4, gens_n).compress();
beta.append_to_transcript(b"beta", transcript);
let delta = {
let gens_X = &MultiCommitGens {
n: 1,
G: vec![X.decompress().unwrap()],
h: gens_n.h,
};
b3.commit(&b5, gens_X).compress()
};
delta.append_to_transcript(b"delta", transcript);
let c = transcript.challenge_scalar(b"c");
let z1 = b1 + c * x;
let z2 = b2 + c * rX;
let z3 = b3 + c * y;
let z4 = b4 + c * rY;
let z5 = b5 + c * (rZ - rX * y);
let z = [z1, z2, z3, z4, z5];
(
ProductProof {
alpha,
beta,
delta,
z,
},
X,
Y,
Z,
)
}
fn check_equality(
P: &CompressedGroup,
X: &CompressedGroup,
c: &Scalar,
gens_n: &MultiCommitGens,
z1: &Scalar,
z2: &Scalar,
) -> bool {
let lhs = (P.decompress().unwrap() + c * X.decompress().unwrap()).compress();
let rhs = z1.commit(z2, gens_n).compress();
lhs == rhs
}
pub fn verify(
&self,
gens_n: &MultiCommitGens,
transcript: &mut Transcript,
X: &CompressedGroup,
Y: &CompressedGroup,
Z: &CompressedGroup,
) -> Result<(), ProofVerifyError> {
transcript.append_protocol_name(ProductProof::protocol_name());
X.append_to_transcript(b"X", transcript);
Y.append_to_transcript(b"Y", transcript);
Z.append_to_transcript(b"Z", transcript);
self.alpha.append_to_transcript(b"alpha", transcript);
self.beta.append_to_transcript(b"beta", transcript);
self.delta.append_to_transcript(b"delta", transcript);
let z1 = self.z[0];
let z2 = self.z[1];
let z3 = self.z[2];
let z4 = self.z[3];
let z5 = self.z[4];
let c = transcript.challenge_scalar(b"c");
if ProductProof::check_equality(&self.alpha, X, &c, gens_n, &z1, &z2)
&& ProductProof::check_equality(&self.beta, Y, &c, gens_n, &z3, &z4)
&& ProductProof::check_equality(
&self.delta,
Z,
&c,
&MultiCommitGens {
n: 1,
G: vec![X.unpack()?],
h: gens_n.h,
},
&z3,
&z5,
)
{
Ok(())
} else {
Err(ProofVerifyError::InternalError)
}
}
}
#[derive(Debug, Serialize, Deserialize)]
pub struct DotProductProof {
pub delta: CompressedGroup,
pub beta: CompressedGroup,
pub z: Vec<Scalar>,
pub z_delta: Scalar,
pub z_beta: Scalar,
}
impl DotProductProof {
fn protocol_name() -> &'static [u8] {
b"dot product proof"
}
pub fn compute_dotproduct(a: &[Scalar], b: &[Scalar]) -> Scalar {
assert_eq!(a.len(), b.len());
(0..a.len()).map(|i| a[i] * b[i]).sum()
}
pub fn prove(
gens_1: &MultiCommitGens,
gens_n: &MultiCommitGens,
transcript: &mut Transcript,
random_tape: &mut RandomTape,
x_vec: &[Scalar],
blind_x: &Scalar,
a_vec: &[Scalar],
y: &Scalar,
blind_y: &Scalar,
) -> (DotProductProof, CompressedGroup, CompressedGroup) {
transcript.append_protocol_name(DotProductProof::protocol_name());
let n = x_vec.len();
assert_eq!(x_vec.len(), a_vec.len());
assert_eq!(gens_n.n, a_vec.len());
assert_eq!(gens_1.n, 1);
// produce randomness for the proofs
let d_vec = random_tape.random_vector(b"d_vec", n);
let r_delta = random_tape.random_scalar(b"r_delta");
let r_beta = random_tape.random_scalar(b"r_beta");
let Cx = x_vec.commit(blind_x, gens_n).compress();
Cx.append_to_transcript(b"Cx", transcript);
let Cy = y.commit(blind_y, gens_1).compress();
Cy.append_to_transcript(b"Cy", transcript);
a_vec.append_to_transcript(b"a", transcript);
let delta = d_vec.commit(&r_delta, gens_n).compress();
delta.append_to_transcript(b"delta", transcript);
let dotproduct_a_d = DotProductProof::compute_dotproduct(a_vec, &d_vec);
let beta = dotproduct_a_d.commit(&r_beta, gens_1).compress();
beta.append_to_transcript(b"beta", transcript);
let c = transcript.challenge_scalar(b"c");
let z = (0..d_vec.len())
.map(|i| c * x_vec[i] + d_vec[i])
.collect::<Vec<Scalar>>();
let z_delta = c * blind_x + r_delta;
let z_beta = c * blind_y + r_beta;
(
DotProductProof {
delta,
beta,
z,
z_delta,
z_beta,
},
Cx,
Cy,
)
}
pub fn verify(
&self,
gens_1: &MultiCommitGens,
gens_n: &MultiCommitGens,
transcript: &mut Transcript,
a: &[Scalar],
Cx: &CompressedGroup,
Cy: &CompressedGroup,
) -> Result<(), ProofVerifyError> {
assert_eq!(gens_n.n, a.len());
assert_eq!(gens_1.n, 1);
transcript.append_protocol_name(DotProductProof::protocol_name());
Cx.append_to_transcript(b"Cx", transcript);
Cy.append_to_transcript(b"Cy", transcript);
a.append_to_transcript(b"a", transcript);
self.delta.append_to_transcript(b"delta", transcript);
self.beta.append_to_transcript(b"beta", transcript);
let c = transcript.challenge_scalar(b"c");
let mut result =
c * Cx.unpack()? + self.delta.unpack()? == self.z.commit(&self.z_delta, gens_n);
let dotproduct_z_a = DotProductProof::compute_dotproduct(&self.z, a);
result &= c * Cy.unpack()? + self.beta.unpack()? == dotproduct_z_a.commit(&self.z_beta, gens_1);
if result {
Ok(())
} else {
Err(ProofVerifyError::InternalError)
}
}
}
pub struct DotProductProofGens {
n: usize,
pub gens_n: MultiCommitGens,
pub gens_1: MultiCommitGens,
}
impl DotProductProofGens {
pub fn new(n: usize, label: &[u8]) -> Self {
let (gens_n, gens_1) = MultiCommitGens::new(n + 1, label).split_at(n);
DotProductProofGens { n, gens_n, gens_1 }
}
}
#[derive(Debug, Serialize, Deserialize)]
pub struct DotProductProofLog {
pub bullet_reduction_proof: BulletReductionProof,
pub delta: CompressedGroup,
pub beta: CompressedGroup,
pub z1: Scalar,
pub z2: Scalar,
}
impl DotProductProofLog {
fn protocol_name() -> &'static [u8] {
b"dot product proof (log)"
}
pub fn compute_dotproduct(a: &[Scalar], b: &[Scalar]) -> Scalar {
assert_eq!(a.len(), b.len());
(0..a.len()).map(|i| a[i] * b[i]).sum()
}
pub fn prove(
gens: &DotProductProofGens,
transcript: &mut Transcript,
random_tape: &mut RandomTape,
x_vec: &[Scalar],
blind_x: &Scalar,
a_vec: &[Scalar],
y: &Scalar,
blind_y: &Scalar,
) -> (DotProductProofLog, CompressedGroup, CompressedGroup) {
transcript.append_protocol_name(DotProductProofLog::protocol_name());
let n = x_vec.len();
assert_eq!(x_vec.len(), a_vec.len());
assert_eq!(gens.n, n);
// produce randomness for generating a proof
let d = random_tape.random_scalar(b"d");
let r_delta = random_tape.random_scalar(b"r_delta");
let r_beta = random_tape.random_scalar(b"r_delta");
let blinds_vec = {
let v1 = random_tape.random_vector(b"blinds_vec_1", 2 * n.log_2());
let v2 = random_tape.random_vector(b"blinds_vec_2", 2 * n.log_2());
(0..v1.len())
.map(|i| (v1[i], v2[i]))
.collect::<Vec<(Scalar, Scalar)>>()
};
let Cx = x_vec.commit(blind_x, &gens.gens_n).compress();
Cx.append_to_transcript(b"Cx", transcript);
let Cy = y.commit(blind_y, &gens.gens_1).compress();
Cy.append_to_transcript(b"Cy", transcript);
a_vec.append_to_transcript(b"a", transcript);
// sample a random base and scale the generator used for
// the output of the inner product
let r = transcript.challenge_scalar(b"r");
let gens_1_scaled = gens.gens_1.scale(&r);
let blind_Gamma = blind_x + r * blind_y;
let (bullet_reduction_proof, _Gamma_hat, x_hat, a_hat, g_hat, rhat_Gamma) =
BulletReductionProof::prove(
transcript,
&gens_1_scaled.G[0],
&gens.gens_n.G,
&gens.gens_n.h,
x_vec,
a_vec,
&blind_Gamma,
&blinds_vec,
);
let y_hat = x_hat * a_hat;
let delta = {
let gens_hat = MultiCommitGens {
n: 1,
G: vec![g_hat],
h: gens.gens_1.h,
};
d.commit(&r_delta, &gens_hat).compress()
};
delta.append_to_transcript(b"delta", transcript);
let beta = d.commit(&r_beta, &gens_1_scaled).compress();
beta.append_to_transcript(b"beta", transcript);
let c = transcript.challenge_scalar(b"c");
let z1 = d + c * y_hat;
let z2 = a_hat * (c * rhat_Gamma + r_beta) + r_delta;
(
DotProductProofLog {
bullet_reduction_proof,
delta,
beta,
z1,
z2,
},
Cx,
Cy,
)
}
pub fn verify(
&self,
n: usize,
gens: &DotProductProofGens,
transcript: &mut Transcript,
a: &[Scalar],
Cx: &CompressedGroup,
Cy: &CompressedGroup,
) -> Result<(), ProofVerifyError> {
assert_eq!(gens.n, n);
assert_eq!(a.len(), n);
transcript.append_protocol_name(DotProductProofLog::protocol_name());
Cx.append_to_transcript(b"Cx", transcript);
Cy.append_to_transcript(b"Cy", transcript);
a.append_to_transcript(b"a", transcript);
// sample a random base and scale the generator used for
// the output of the inner product
let r = transcript.challenge_scalar(b"r");
let gens_1_scaled = gens.gens_1.scale(&r);
let Gamma = Cx.unpack()? + r * Cy.unpack()?;
let (g_hat, Gamma_hat, a_hat) =
self
.bullet_reduction_proof
.verify(n, a, transcript, &Gamma, &gens.gens_n.G)?;
self.delta.append_to_transcript(b"delta", transcript);
self.beta.append_to_transcript(b"beta", transcript);
let c = transcript.challenge_scalar(b"c");
let c_s = &c;
let beta_s = self.beta.unpack()?;
let a_hat_s = &a_hat;
let delta_s = self.delta.unpack()?;
let z1_s = &self.z1;
let z2_s = &self.z2;
let lhs = ((Gamma_hat * c_s + beta_s) * a_hat_s + delta_s).compress();
let rhs = ((g_hat + gens_1_scaled.G[0] * a_hat_s) * z1_s + gens_1_scaled.h * z2_s).compress();
assert_eq!(lhs, rhs);
if lhs == rhs {
Ok(())
} else {
Err(ProofVerifyError::InternalError)
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use rand_core::OsRng;
#[test]
fn check_knowledgeproof() {
let mut csprng: OsRng = OsRng;
let gens_1 = MultiCommitGens::new(1, b"test-knowledgeproof");
let x = Scalar::random(&mut csprng);
let r = Scalar::random(&mut csprng);
let mut random_tape = RandomTape::new(b"proof");
let mut prover_transcript = Transcript::new(b"example");
let (proof, committed_value) =
KnowledgeProof::prove(&gens_1, &mut prover_transcript, &mut random_tape, &x, &r);
let mut verifier_transcript = Transcript::new(b"example");
assert!(proof
.verify(&gens_1, &mut verifier_transcript, &committed_value)
.is_ok());
}
#[test]
fn check_equalityproof() {
let mut csprng: OsRng = OsRng;
let gens_1 = MultiCommitGens::new(1, b"test-equalityproof");
let v1 = Scalar::random(&mut csprng);
let v2 = v1;
let s1 = Scalar::random(&mut csprng);
let s2 = Scalar::random(&mut csprng);
let mut random_tape = RandomTape::new(b"proof");
let mut prover_transcript = Transcript::new(b"example");
let (proof, C1, C2) = EqualityProof::prove(
&gens_1,
&mut prover_transcript,
&mut random_tape,
&v1,
&s1,
&v2,
&s2,
);
let mut verifier_transcript = Transcript::new(b"example");
assert!(proof
.verify(&gens_1, &mut verifier_transcript, &C1, &C2)
.is_ok());
}
#[test]
fn check_productproof() {
let mut csprng: OsRng = OsRng;
let gens_1 = MultiCommitGens::new(1, b"test-productproof");
let x = Scalar::random(&mut csprng);
let rX = Scalar::random(&mut csprng);
let y = Scalar::random(&mut csprng);
let rY = Scalar::random(&mut csprng);
let z = x * y;
let rZ = Scalar::random(&mut csprng);
let mut random_tape = RandomTape::new(b"proof");
let mut prover_transcript = Transcript::new(b"example");
let (proof, X, Y, Z) = ProductProof::prove(
&gens_1,
&mut prover_transcript,
&mut random_tape,
&x,
&rX,
&y,
&rY,
&z,
&rZ,
);
let mut verifier_transcript = Transcript::new(b"example");
assert!(proof
.verify(&gens_1, &mut verifier_transcript, &X, &Y, &Z)
.is_ok());
}
#[test]
fn check_dotproductproof() {
let mut csprng: OsRng = OsRng;
let n = 1024;
let gens_1 = MultiCommitGens::new(1, b"test-two");
let gens_1024 = MultiCommitGens::new(n, b"test-1024");
let mut x: Vec<Scalar> = Vec::new();
let mut a: Vec<Scalar> = Vec::new();
for _ in 0..n {
x.push(Scalar::random(&mut csprng));
a.push(Scalar::random(&mut csprng));
}
let y = DotProductProofLog::compute_dotproduct(&x, &a);
let r_x = Scalar::random(&mut csprng);
let r_y = Scalar::random(&mut csprng);
let mut random_tape = RandomTape::new(b"proof");
let mut prover_transcript = Transcript::new(b"example");
let (proof, Cx, Cy) = DotProductProof::prove(
&gens_1,
&gens_1024,
&mut prover_transcript,
&mut random_tape,
&x,
&r_x,
&a,
&y,
&r_y,
);
let mut verifier_transcript = Transcript::new(b"example");
assert!(proof
.verify(&gens_1, &gens_1024, &mut verifier_transcript, &a, &Cx, &Cy)
.is_ok());
}
#[test]
fn check_dotproductproof_log() {
let mut csprng: OsRng = OsRng;
let n = 1024;
let gens = DotProductProofGens::new(n, b"test-1024");
let x: Vec<Scalar> = (0..n).map(|_i| Scalar::random(&mut csprng)).collect();
let a: Vec<Scalar> = (0..n).map(|_i| Scalar::random(&mut csprng)).collect();
let y = DotProductProof::compute_dotproduct(&x, &a);
let r_x = Scalar::random(&mut csprng);
let r_y = Scalar::random(&mut csprng);
let mut random_tape = RandomTape::new(b"proof");
let mut prover_transcript = Transcript::new(b"example");
let (proof, Cx, Cy) = DotProductProofLog::prove(
&gens,
&mut prover_transcript,
&mut random_tape,
&x,
&r_x,
&a,
&y,
&r_y,
);
let mut verifier_transcript = Transcript::new(b"example");
assert!(proof
.verify(n, &gens, &mut verifier_transcript, &a, &Cx, &Cy)
.is_ok());
}
}

View File

@@ -0,0 +1,486 @@
#![allow(dead_code)]
use super::dense_mlpoly::DensePolynomial;
use super::dense_mlpoly::EqPolynomial;
use super::math::Math;
use super::scalar::Scalar;
use super::sumcheck::SumcheckInstanceProof;
use super::transcript::ProofTranscript;
use merlin::Transcript;
use serde::{Deserialize, Serialize};
#[derive(Debug)]
pub struct ProductCircuit {
left_vec: Vec<DensePolynomial>,
right_vec: Vec<DensePolynomial>,
}
impl ProductCircuit {
fn compute_layer(
inp_left: &DensePolynomial,
inp_right: &DensePolynomial,
) -> (DensePolynomial, DensePolynomial) {
let len = inp_left.len() + inp_right.len();
let outp_left = (0..len / 4)
.map(|i| inp_left[i] * inp_right[i])
.collect::<Vec<Scalar>>();
let outp_right = (len / 4..len / 2)
.map(|i| inp_left[i] * inp_right[i])
.collect::<Vec<Scalar>>();
(
DensePolynomial::new(outp_left),
DensePolynomial::new(outp_right),
)
}
pub fn new(poly: &DensePolynomial) -> Self {
let mut left_vec: Vec<DensePolynomial> = Vec::new();
let mut right_vec: Vec<DensePolynomial> = Vec::new();
let num_layers = poly.len().log_2();
let (outp_left, outp_right) = poly.split(poly.len() / 2);
left_vec.push(outp_left);
right_vec.push(outp_right);
for i in 0..num_layers - 1 {
let (outp_left, outp_right) = ProductCircuit::compute_layer(&left_vec[i], &right_vec[i]);
left_vec.push(outp_left);
right_vec.push(outp_right);
}
ProductCircuit {
left_vec,
right_vec,
}
}
pub fn evaluate(&self) -> Scalar {
let len = self.left_vec.len();
assert_eq!(self.left_vec[len - 1].get_num_vars(), 0);
assert_eq!(self.right_vec[len - 1].get_num_vars(), 0);
self.left_vec[len - 1][0] * self.right_vec[len - 1][0]
}
}
pub struct DotProductCircuit {
left: DensePolynomial,
right: DensePolynomial,
weight: DensePolynomial,
}
impl DotProductCircuit {
pub fn new(left: DensePolynomial, right: DensePolynomial, weight: DensePolynomial) -> Self {
assert_eq!(left.len(), right.len());
assert_eq!(left.len(), weight.len());
DotProductCircuit {
left,
right,
weight,
}
}
pub fn evaluate(&self) -> Scalar {
(0..self.left.len())
.map(|i| self.left[i] * self.right[i] * self.weight[i])
.sum()
}
pub fn split(&mut self) -> (DotProductCircuit, DotProductCircuit) {
let idx = self.left.len() / 2;
assert_eq!(idx * 2, self.left.len());
let (l1, l2) = self.left.split(idx);
let (r1, r2) = self.right.split(idx);
let (w1, w2) = self.weight.split(idx);
(
DotProductCircuit {
left: l1,
right: r1,
weight: w1,
},
DotProductCircuit {
left: l2,
right: r2,
weight: w2,
},
)
}
}
#[allow(dead_code)]
#[derive(Debug, Serialize, Deserialize)]
pub struct LayerProof {
pub proof: SumcheckInstanceProof,
pub claims: Vec<Scalar>,
}
#[allow(dead_code)]
impl LayerProof {
pub fn verify(
&self,
claim: Scalar,
num_rounds: usize,
degree_bound: usize,
transcript: &mut Transcript,
) -> (Scalar, Vec<Scalar>) {
self
.proof
.verify(claim, num_rounds, degree_bound, transcript)
.unwrap()
}
}
#[allow(dead_code)]
#[derive(Debug, Serialize, Deserialize)]
pub struct LayerProofBatched {
pub proof: SumcheckInstanceProof,
pub claims_prod_left: Vec<Scalar>,
pub claims_prod_right: Vec<Scalar>,
}
#[allow(dead_code)]
impl LayerProofBatched {
pub fn verify(
&self,
claim: Scalar,
num_rounds: usize,
degree_bound: usize,
transcript: &mut Transcript,
) -> (Scalar, Vec<Scalar>) {
self
.proof
.verify(claim, num_rounds, degree_bound, transcript)
.unwrap()
}
}
#[derive(Debug, Serialize, Deserialize)]
pub struct ProductCircuitEvalProof {
proof: Vec<LayerProof>,
}
#[derive(Debug, Serialize, Deserialize)]
pub struct ProductCircuitEvalProofBatched {
proof: Vec<LayerProofBatched>,
claims_dotp: (Vec<Scalar>, Vec<Scalar>, Vec<Scalar>),
}
impl ProductCircuitEvalProof {
#![allow(dead_code)]
pub fn prove(
circuit: &mut ProductCircuit,
transcript: &mut Transcript,
) -> (Self, Scalar, Vec<Scalar>) {
let mut proof: Vec<LayerProof> = Vec::new();
let num_layers = circuit.left_vec.len();
let mut claim = circuit.evaluate();
let mut rand = Vec::new();
for layer_id in (0..num_layers).rev() {
let len = circuit.left_vec[layer_id].len() + circuit.right_vec[layer_id].len();
let mut poly_C = DensePolynomial::new(EqPolynomial::new(rand.clone()).evals());
assert_eq!(poly_C.len(), len / 2);
let num_rounds_prod = poly_C.len().log_2();
let comb_func_prod = |poly_A_comp: &Scalar,
poly_B_comp: &Scalar,
poly_C_comp: &Scalar|
-> Scalar { poly_A_comp * poly_B_comp * poly_C_comp };
let (proof_prod, rand_prod, claims_prod) = SumcheckInstanceProof::prove_cubic(
&claim,
num_rounds_prod,
&mut circuit.left_vec[layer_id],
&mut circuit.right_vec[layer_id],
&mut poly_C,
comb_func_prod,
transcript,
);
transcript.append_scalar(b"claim_prod_left", &claims_prod[0]);
transcript.append_scalar(b"claim_prod_right", &claims_prod[1]);
// produce a random challenge
let r_layer = transcript.challenge_scalar(b"challenge_r_layer");
claim = claims_prod[0] + r_layer * (claims_prod[1] - claims_prod[0]);
let mut ext = vec![r_layer];
ext.extend(rand_prod);
rand = ext;
proof.push(LayerProof {
proof: proof_prod,
claims: claims_prod[0..claims_prod.len() - 1].to_vec(),
});
}
(ProductCircuitEvalProof { proof }, claim, rand)
}
pub fn verify(
&self,
eval: Scalar,
len: usize,
transcript: &mut Transcript,
) -> (Scalar, Vec<Scalar>) {
let num_layers = len.log_2();
let mut claim = eval;
let mut rand: Vec<Scalar> = Vec::new();
//let mut num_rounds = 0;
assert_eq!(self.proof.len(), num_layers);
for (num_rounds, i) in (0..num_layers).enumerate() {
let (claim_last, rand_prod) = self.proof[i].verify(claim, num_rounds, 3, transcript);
let claims_prod = &self.proof[i].claims;
transcript.append_scalar(b"claim_prod_left", &claims_prod[0]);
transcript.append_scalar(b"claim_prod_right", &claims_prod[1]);
assert_eq!(rand.len(), rand_prod.len());
let eq: Scalar = (0..rand.len())
.map(|i| {
rand[i] * rand_prod[i] + (Scalar::one() - rand[i]) * (Scalar::one() - rand_prod[i])
})
.product();
assert_eq!(claims_prod[0] * claims_prod[1] * eq, claim_last);
// produce a random challenge
let r_layer = transcript.challenge_scalar(b"challenge_r_layer");
claim = (Scalar::one() - r_layer) * claims_prod[0] + r_layer * claims_prod[1];
let mut ext = vec![r_layer];
ext.extend(rand_prod);
rand = ext;
}
(claim, rand)
}
}
impl ProductCircuitEvalProofBatched {
pub fn prove(
prod_circuit_vec: &mut Vec<&mut ProductCircuit>,
dotp_circuit_vec: &mut Vec<&mut DotProductCircuit>,
transcript: &mut Transcript,
) -> (Self, Vec<Scalar>) {
assert!(!prod_circuit_vec.is_empty());
let mut claims_dotp_final = (Vec::new(), Vec::new(), Vec::new());
let mut proof_layers: Vec<LayerProofBatched> = Vec::new();
let num_layers = prod_circuit_vec[0].left_vec.len();
let mut claims_to_verify = (0..prod_circuit_vec.len())
.map(|i| prod_circuit_vec[i].evaluate())
.collect::<Vec<Scalar>>();
let mut rand = Vec::new();
for layer_id in (0..num_layers).rev() {
// prepare paralell instance that share poly_C first
let len = prod_circuit_vec[0].left_vec[layer_id].len()
+ prod_circuit_vec[0].right_vec[layer_id].len();
let mut poly_C_par = DensePolynomial::new(EqPolynomial::new(rand.clone()).evals());
assert_eq!(poly_C_par.len(), len / 2);
let num_rounds_prod = poly_C_par.len().log_2();
let comb_func_prod = |poly_A_comp: &Scalar,
poly_B_comp: &Scalar,
poly_C_comp: &Scalar|
-> Scalar { poly_A_comp * poly_B_comp * poly_C_comp };
let mut poly_A_batched_par: Vec<&mut DensePolynomial> = Vec::new();
let mut poly_B_batched_par: Vec<&mut DensePolynomial> = Vec::new();
for prod_circuit in prod_circuit_vec.iter_mut() {
poly_A_batched_par.push(&mut prod_circuit.left_vec[layer_id]);
poly_B_batched_par.push(&mut prod_circuit.right_vec[layer_id])
}
let poly_vec_par = (
&mut poly_A_batched_par,
&mut poly_B_batched_par,
&mut poly_C_par,
);
// prepare sequential instances that don't share poly_C
let mut poly_A_batched_seq: Vec<&mut DensePolynomial> = Vec::new();
let mut poly_B_batched_seq: Vec<&mut DensePolynomial> = Vec::new();
let mut poly_C_batched_seq: Vec<&mut DensePolynomial> = Vec::new();
if layer_id == 0 && !dotp_circuit_vec.is_empty() {
// add additional claims
for item in dotp_circuit_vec.iter() {
claims_to_verify.push(item.evaluate());
assert_eq!(len / 2, item.left.len());
assert_eq!(len / 2, item.right.len());
assert_eq!(len / 2, item.weight.len());
}
for dotp_circuit in dotp_circuit_vec.iter_mut() {
poly_A_batched_seq.push(&mut dotp_circuit.left);
poly_B_batched_seq.push(&mut dotp_circuit.right);
poly_C_batched_seq.push(&mut dotp_circuit.weight);
}
}
let poly_vec_seq = (
&mut poly_A_batched_seq,
&mut poly_B_batched_seq,
&mut poly_C_batched_seq,
);
// produce a fresh set of coeffs and a joint claim
let coeff_vec =
transcript.challenge_vector(b"rand_coeffs_next_layer", claims_to_verify.len());
let claim = (0..claims_to_verify.len())
.map(|i| claims_to_verify[i] * coeff_vec[i])
.sum();
let (proof, rand_prod, claims_prod, claims_dotp) = SumcheckInstanceProof::prove_cubic_batched(
&claim,
num_rounds_prod,
poly_vec_par,
poly_vec_seq,
&coeff_vec,
comb_func_prod,
transcript,
);
let (claims_prod_left, claims_prod_right, _claims_eq) = claims_prod;
for i in 0..prod_circuit_vec.len() {
transcript.append_scalar(b"claim_prod_left", &claims_prod_left[i]);
transcript.append_scalar(b"claim_prod_right", &claims_prod_right[i]);
}
if layer_id == 0 && !dotp_circuit_vec.is_empty() {
let (claims_dotp_left, claims_dotp_right, claims_dotp_weight) = claims_dotp;
for i in 0..dotp_circuit_vec.len() {
transcript.append_scalar(b"claim_dotp_left", &claims_dotp_left[i]);
transcript.append_scalar(b"claim_dotp_right", &claims_dotp_right[i]);
transcript.append_scalar(b"claim_dotp_weight", &claims_dotp_weight[i]);
}
claims_dotp_final = (claims_dotp_left, claims_dotp_right, claims_dotp_weight);
}
// produce a random challenge to condense two claims into a single claim
let r_layer = transcript.challenge_scalar(b"challenge_r_layer");
claims_to_verify = (0..prod_circuit_vec.len())
.map(|i| claims_prod_left[i] + r_layer * (claims_prod_right[i] - claims_prod_left[i]))
.collect::<Vec<Scalar>>();
let mut ext = vec![r_layer];
ext.extend(rand_prod);
rand = ext;
proof_layers.push(LayerProofBatched {
proof,
claims_prod_left,
claims_prod_right,
});
}
(
ProductCircuitEvalProofBatched {
proof: proof_layers,
claims_dotp: claims_dotp_final,
},
rand,
)
}
pub fn verify(
&self,
claims_prod_vec: &[Scalar],
claims_dotp_vec: &[Scalar],
len: usize,
transcript: &mut Transcript,
) -> (Vec<Scalar>, Vec<Scalar>, Vec<Scalar>) {
let num_layers = len.log_2();
let mut rand: Vec<Scalar> = Vec::new();
//let mut num_rounds = 0;
assert_eq!(self.proof.len(), num_layers);
let mut claims_to_verify = claims_prod_vec.to_owned();
let mut claims_to_verify_dotp: Vec<Scalar> = Vec::new();
for (num_rounds, i) in (0..num_layers).enumerate() {
if i == num_layers - 1 {
claims_to_verify.extend(claims_dotp_vec);
}
// produce random coefficients, one for each instance
let coeff_vec =
transcript.challenge_vector(b"rand_coeffs_next_layer", claims_to_verify.len());
// produce a joint claim
let claim = (0..claims_to_verify.len())
.map(|i| claims_to_verify[i] * coeff_vec[i])
.sum();
let (claim_last, rand_prod) = self.proof[i].verify(claim, num_rounds, 3, transcript);
let claims_prod_left = &self.proof[i].claims_prod_left;
let claims_prod_right = &self.proof[i].claims_prod_right;
assert_eq!(claims_prod_left.len(), claims_prod_vec.len());
assert_eq!(claims_prod_right.len(), claims_prod_vec.len());
for i in 0..claims_prod_vec.len() {
transcript.append_scalar(b"claim_prod_left", &claims_prod_left[i]);
transcript.append_scalar(b"claim_prod_right", &claims_prod_right[i]);
}
assert_eq!(rand.len(), rand_prod.len());
let eq: Scalar = (0..rand.len())
.map(|i| {
rand[i] * rand_prod[i] + (Scalar::one() - rand[i]) * (Scalar::one() - rand_prod[i])
})
.product();
let mut claim_expected: Scalar = (0..claims_prod_vec.len())
.map(|i| coeff_vec[i] * (claims_prod_left[i] * claims_prod_right[i] * eq))
.sum();
// add claims from the dotp instances
if i == num_layers - 1 {
let num_prod_instances = claims_prod_vec.len();
let (claims_dotp_left, claims_dotp_right, claims_dotp_weight) = &self.claims_dotp;
for i in 0..claims_dotp_left.len() {
transcript.append_scalar(b"claim_dotp_left", &claims_dotp_left[i]);
transcript.append_scalar(b"claim_dotp_right", &claims_dotp_right[i]);
transcript.append_scalar(b"claim_dotp_weight", &claims_dotp_weight[i]);
claim_expected += coeff_vec[i + num_prod_instances]
* claims_dotp_left[i]
* claims_dotp_right[i]
* claims_dotp_weight[i];
}
}
assert_eq!(claim_expected, claim_last);
// produce a random challenge
let r_layer = transcript.challenge_scalar(b"challenge_r_layer");
claims_to_verify = (0..claims_prod_left.len())
.map(|i| claims_prod_left[i] + r_layer * (claims_prod_right[i] - claims_prod_left[i]))
.collect::<Vec<Scalar>>();
// add claims to verify for dotp circuit
if i == num_layers - 1 {
let (claims_dotp_left, claims_dotp_right, claims_dotp_weight) = &self.claims_dotp;
for i in 0..claims_dotp_vec.len() / 2 {
// combine left claims
let claim_left = claims_dotp_left[2 * i]
+ r_layer * (claims_dotp_left[2 * i + 1] - claims_dotp_left[2 * i]);
let claim_right = claims_dotp_right[2 * i]
+ r_layer * (claims_dotp_right[2 * i + 1] - claims_dotp_right[2 * i]);
let claim_weight = claims_dotp_weight[2 * i]
+ r_layer * (claims_dotp_weight[2 * i + 1] - claims_dotp_weight[2 * i]);
claims_to_verify_dotp.push(claim_left);
claims_to_verify_dotp.push(claim_right);
claims_to_verify_dotp.push(claim_weight);
}
}
let mut ext = vec![r_layer];
ext.extend(rand_prod);
rand = ext;
}
(claims_to_verify, claims_to_verify_dotp, rand)
}
}

View File

@@ -0,0 +1,367 @@
use crate::transcript::AppendToTranscript;
use super::dense_mlpoly::DensePolynomial;
use super::errors::ProofVerifyError;
use super::math::Math;
use super::random::RandomTape;
use super::scalar::Scalar;
use super::sparse_mlpoly::{
MultiSparseMatPolynomialAsDense, SparseMatEntry, SparseMatPolyCommitment,
SparseMatPolyCommitmentGens, SparseMatPolyEvalProof, SparseMatPolynomial,
};
use super::timer::Timer;
use flate2::{write::ZlibEncoder, Compression};
use merlin::Transcript;
use rand_core::OsRng;
use serde::{Deserialize, Serialize};
#[derive(Debug, Serialize, Deserialize)]
pub struct R1CSInstance {
num_cons: usize,
num_vars: usize,
num_inputs: usize,
A: SparseMatPolynomial,
B: SparseMatPolynomial,
C: SparseMatPolynomial,
}
pub struct R1CSCommitmentGens {
gens: SparseMatPolyCommitmentGens,
}
impl R1CSCommitmentGens {
pub fn new(
label: &'static [u8],
num_cons: usize,
num_vars: usize,
num_inputs: usize,
num_nz_entries: usize,
) -> R1CSCommitmentGens {
assert!(num_inputs < num_vars);
let num_poly_vars_x = num_cons.log_2();
let num_poly_vars_y = (2 * num_vars).log_2();
let gens =
SparseMatPolyCommitmentGens::new(label, num_poly_vars_x, num_poly_vars_y, num_nz_entries, 3);
R1CSCommitmentGens { gens }
}
}
#[derive(Debug, Serialize, Deserialize)]
pub struct R1CSCommitment {
num_cons: usize,
num_vars: usize,
num_inputs: usize,
comm: SparseMatPolyCommitment,
}
impl AppendToTranscript for R1CSCommitment {
fn append_to_transcript(&self, _label: &'static [u8], transcript: &mut Transcript) {
transcript.append_u64(b"num_cons", self.num_cons as u64);
transcript.append_u64(b"num_vars", self.num_vars as u64);
transcript.append_u64(b"num_inputs", self.num_inputs as u64);
self.comm.append_to_transcript(b"comm", transcript);
}
}
pub struct R1CSDecommitment {
dense: MultiSparseMatPolynomialAsDense,
}
impl R1CSCommitment {
pub fn get_num_cons(&self) -> usize {
self.num_cons
}
pub fn get_num_vars(&self) -> usize {
self.num_vars
}
pub fn get_num_inputs(&self) -> usize {
self.num_inputs
}
}
impl R1CSInstance {
pub fn new(
num_cons: usize,
num_vars: usize,
num_inputs: usize,
A: &[(usize, usize, Scalar)],
B: &[(usize, usize, Scalar)],
C: &[(usize, usize, Scalar)],
) -> R1CSInstance {
Timer::print(&format!("number_of_constraints {}", num_cons));
Timer::print(&format!("number_of_variables {}", num_vars));
Timer::print(&format!("number_of_inputs {}", num_inputs));
Timer::print(&format!("number_non-zero_entries_A {}", A.len()));
Timer::print(&format!("number_non-zero_entries_B {}", B.len()));
Timer::print(&format!("number_non-zero_entries_C {}", C.len()));
// check that num_cons is a power of 2
assert_eq!(num_cons.next_power_of_two(), num_cons);
// check that num_vars is a power of 2
assert_eq!(num_vars.next_power_of_two(), num_vars);
// check that number_inputs + 1 <= num_vars
assert!(num_inputs < num_vars);
// no errors, so create polynomials
let num_poly_vars_x = num_cons.log_2();
let num_poly_vars_y = (2 * num_vars).log_2();
let mat_A = (0..A.len())
.map(|i| SparseMatEntry::new(A[i].0, A[i].1, A[i].2))
.collect::<Vec<SparseMatEntry>>();
let mat_B = (0..B.len())
.map(|i| SparseMatEntry::new(B[i].0, B[i].1, B[i].2))
.collect::<Vec<SparseMatEntry>>();
let mat_C = (0..C.len())
.map(|i| SparseMatEntry::new(C[i].0, C[i].1, C[i].2))
.collect::<Vec<SparseMatEntry>>();
let poly_A = SparseMatPolynomial::new(num_poly_vars_x, num_poly_vars_y, mat_A);
let poly_B = SparseMatPolynomial::new(num_poly_vars_x, num_poly_vars_y, mat_B);
let poly_C = SparseMatPolynomial::new(num_poly_vars_x, num_poly_vars_y, mat_C);
R1CSInstance {
num_cons,
num_vars,
num_inputs,
A: poly_A,
B: poly_B,
C: poly_C,
}
}
pub fn get_num_vars(&self) -> usize {
self.num_vars
}
pub fn get_num_cons(&self) -> usize {
self.num_cons
}
pub fn get_num_inputs(&self) -> usize {
self.num_inputs
}
pub fn get_digest(&self) -> Vec<u8> {
let mut encoder = ZlibEncoder::new(Vec::new(), Compression::default());
bincode::serialize_into(&mut encoder, &self).unwrap();
encoder.finish().unwrap()
}
pub fn produce_synthetic_r1cs(
num_cons: usize,
num_vars: usize,
num_inputs: usize,
) -> (R1CSInstance, Vec<Scalar>, Vec<Scalar>) {
Timer::print(&format!("number_of_constraints {}", num_cons));
Timer::print(&format!("number_of_variables {}", num_vars));
Timer::print(&format!("number_of_inputs {}", num_inputs));
let mut csprng: OsRng = OsRng;
// assert num_cons and num_vars are power of 2
assert_eq!((num_cons.log_2()).pow2(), num_cons);
assert_eq!((num_vars.log_2()).pow2(), num_vars);
// num_inputs + 1 <= num_vars
assert!(num_inputs < num_vars);
// z is organized as [vars,1,io]
let size_z = num_vars + num_inputs + 1;
// produce a random satisfying assignment
let Z = {
let mut Z: Vec<Scalar> = (0..size_z)
.map(|_i| Scalar::random(&mut csprng))
.collect::<Vec<Scalar>>();
Z[num_vars] = Scalar::one(); // set the constant term to 1
Z
};
// three sparse matrices
let mut A: Vec<SparseMatEntry> = Vec::new();
let mut B: Vec<SparseMatEntry> = Vec::new();
let mut C: Vec<SparseMatEntry> = Vec::new();
let one = Scalar::one();
for i in 0..num_cons {
let A_idx = i % size_z;
let B_idx = (i + 2) % size_z;
A.push(SparseMatEntry::new(i, A_idx, one));
B.push(SparseMatEntry::new(i, B_idx, one));
let AB_val = Z[A_idx] * Z[B_idx];
let C_idx = (i + 3) % size_z;
let C_val = Z[C_idx];
if C_val == Scalar::zero() {
C.push(SparseMatEntry::new(i, num_vars, AB_val));
} else {
C.push(SparseMatEntry::new(
i,
C_idx,
AB_val * C_val.invert().unwrap(),
));
}
}
Timer::print(&format!("number_non-zero_entries_A {}", A.len()));
Timer::print(&format!("number_non-zero_entries_B {}", B.len()));
Timer::print(&format!("number_non-zero_entries_C {}", C.len()));
let num_poly_vars_x = num_cons.log_2();
let num_poly_vars_y = (2 * num_vars).log_2();
let poly_A = SparseMatPolynomial::new(num_poly_vars_x, num_poly_vars_y, A);
let poly_B = SparseMatPolynomial::new(num_poly_vars_x, num_poly_vars_y, B);
let poly_C = SparseMatPolynomial::new(num_poly_vars_x, num_poly_vars_y, C);
let inst = R1CSInstance {
num_cons,
num_vars,
num_inputs,
A: poly_A,
B: poly_B,
C: poly_C,
};
assert!(inst.is_sat(&Z[..num_vars], &Z[num_vars + 1..]));
(inst, Z[..num_vars].to_vec(), Z[num_vars + 1..].to_vec())
}
pub fn is_sat(&self, vars: &[Scalar], input: &[Scalar]) -> bool {
assert_eq!(vars.len(), self.num_vars);
assert_eq!(input.len(), self.num_inputs);
let z = {
let mut z = vars.to_vec();
z.extend(&vec![Scalar::one()]);
z.extend(input);
z
};
// verify if Az * Bz - Cz = [0...]
let Az = self
.A
.multiply_vec(self.num_cons, self.num_vars + self.num_inputs + 1, &z);
let Bz = self
.B
.multiply_vec(self.num_cons, self.num_vars + self.num_inputs + 1, &z);
let Cz = self
.C
.multiply_vec(self.num_cons, self.num_vars + self.num_inputs + 1, &z);
assert_eq!(Az.len(), self.num_cons);
assert_eq!(Bz.len(), self.num_cons);
assert_eq!(Cz.len(), self.num_cons);
let res: usize = (0..self.num_cons)
.map(|i| usize::from(Az[i] * Bz[i] != Cz[i]))
.sum();
res == 0
}
pub fn multiply_vec(
&self,
num_rows: usize,
num_cols: usize,
z: &[Scalar],
) -> (DensePolynomial, DensePolynomial, DensePolynomial) {
assert_eq!(num_rows, self.num_cons);
assert_eq!(z.len(), num_cols);
assert!(num_cols > self.num_vars);
(
DensePolynomial::new(self.A.multiply_vec(num_rows, num_cols, z)),
DensePolynomial::new(self.B.multiply_vec(num_rows, num_cols, z)),
DensePolynomial::new(self.C.multiply_vec(num_rows, num_cols, z)),
)
}
pub fn compute_eval_table_sparse(
&self,
num_rows: usize,
num_cols: usize,
evals: &[Scalar],
) -> (Vec<Scalar>, Vec<Scalar>, Vec<Scalar>) {
assert_eq!(num_rows, self.num_cons);
assert!(num_cols > self.num_vars);
let evals_A = self.A.compute_eval_table_sparse(evals, num_rows, num_cols);
let evals_B = self.B.compute_eval_table_sparse(evals, num_rows, num_cols);
let evals_C = self.C.compute_eval_table_sparse(evals, num_rows, num_cols);
(evals_A, evals_B, evals_C)
}
pub fn evaluate(&self, rx: &[Scalar], ry: &[Scalar]) -> (Scalar, Scalar, Scalar) {
let evals = SparseMatPolynomial::multi_evaluate(&[&self.A, &self.B, &self.C], rx, ry);
(evals[0], evals[1], evals[2])
}
pub fn commit(&self, gens: &R1CSCommitmentGens) -> (R1CSCommitment, R1CSDecommitment) {
let (comm, dense) = SparseMatPolynomial::multi_commit(&[&self.A, &self.B, &self.C], &gens.gens);
let r1cs_comm = R1CSCommitment {
num_cons: self.num_cons,
num_vars: self.num_vars,
num_inputs: self.num_inputs,
comm,
};
let r1cs_decomm = R1CSDecommitment { dense };
(r1cs_comm, r1cs_decomm)
}
}
#[derive(Debug, Serialize, Deserialize)]
pub struct R1CSEvalProof {
proof: SparseMatPolyEvalProof,
}
impl R1CSEvalProof {
pub fn prove(
decomm: &R1CSDecommitment,
rx: &[Scalar], // point at which the polynomial is evaluated
ry: &[Scalar],
evals: &(Scalar, Scalar, Scalar),
gens: &R1CSCommitmentGens,
transcript: &mut Transcript,
random_tape: &mut RandomTape,
) -> R1CSEvalProof {
let timer = Timer::new("R1CSEvalProof::prove");
let proof = SparseMatPolyEvalProof::prove(
&decomm.dense,
rx,
ry,
&[evals.0, evals.1, evals.2],
&gens.gens,
transcript,
random_tape,
);
timer.stop();
R1CSEvalProof { proof }
}
pub fn verify(
&self,
comm: &R1CSCommitment,
rx: &[Scalar], // point at which the R1CS matrix polynomials are evaluated
ry: &[Scalar],
evals: &(Scalar, Scalar, Scalar),
gens: &R1CSCommitmentGens,
transcript: &mut Transcript,
) -> Result<(), ProofVerifyError> {
self.proof.verify(
&comm.comm,
rx,
ry,
&[evals.0, evals.1, evals.2],
&gens.gens,
transcript,
)
}
}

View File

@@ -0,0 +1,608 @@
#![allow(clippy::too_many_arguments)]
use super::commitments::{Commitments, MultiCommitGens};
use super::dense_mlpoly::{
DensePolynomial, EqPolynomial, PolyCommitment, PolyCommitmentGens, PolyEvalProof,
};
use super::errors::ProofVerifyError;
use super::group::{CompressedGroup, GroupElement, VartimeMultiscalarMul};
use super::math::Math;
use super::nizk::{EqualityProof, KnowledgeProof, ProductProof};
use super::r1csinstance::R1CSInstance;
use super::random::RandomTape;
use super::scalar::Scalar;
use super::sparse_mlpoly::{SparsePolyEntry, SparsePolynomial};
use super::sumcheck::ZKSumcheckInstanceProof;
use super::timer::Timer;
use super::transcript::{AppendToTranscript, ProofTranscript};
use crate::group::DecompressEncodedPoint;
use core::iter;
use merlin::Transcript;
use serde::{Deserialize, Serialize};
#[derive(Serialize, Deserialize, Debug)]
pub struct R1CSProof {
pub comm_vars: PolyCommitment,
pub sc_proof_phase1: ZKSumcheckInstanceProof,
pub claims_phase2: (
CompressedGroup,
CompressedGroup,
CompressedGroup,
CompressedGroup,
),
pub pok_claims_phase2: (KnowledgeProof, ProductProof),
pub proof_eq_sc_phase1: EqualityProof,
pub sc_proof_phase2: ZKSumcheckInstanceProof,
pub comm_vars_at_ry: CompressedGroup,
pub proof_eval_vars_at_ry: PolyEvalProof,
pub proof_eq_sc_phase2: EqualityProof,
}
pub struct R1CSSumcheckGens {
pub gens_1: MultiCommitGens,
pub gens_3: MultiCommitGens,
pub gens_4: MultiCommitGens,
}
// TODO: fix passing gens_1_ref
impl R1CSSumcheckGens {
pub fn new(label: &'static [u8], gens_1_ref: &MultiCommitGens) -> Self {
let gens_1 = gens_1_ref.clone();
let gens_3 = MultiCommitGens::new(3, label);
let gens_4 = MultiCommitGens::new(4, label);
R1CSSumcheckGens {
gens_1,
gens_3,
gens_4,
}
}
}
pub struct R1CSGens {
pub gens_sc: R1CSSumcheckGens,
pub gens_pc: PolyCommitmentGens,
}
impl R1CSGens {
pub fn new(label: &'static [u8], _num_cons: usize, num_vars: usize) -> Self {
let num_poly_vars = num_vars.log_2();
let gens_pc = PolyCommitmentGens::new(num_poly_vars, label);
let gens_sc = R1CSSumcheckGens::new(label, &gens_pc.gens.gens_1);
R1CSGens { gens_sc, gens_pc }
}
}
impl R1CSProof {
fn prove_phase_one(
num_rounds: usize,
evals_tau: &mut DensePolynomial,
evals_Az: &mut DensePolynomial,
evals_Bz: &mut DensePolynomial,
evals_Cz: &mut DensePolynomial,
gens: &R1CSSumcheckGens,
transcript: &mut Transcript,
random_tape: &mut RandomTape,
) -> (ZKSumcheckInstanceProof, Vec<Scalar>, Vec<Scalar>, Scalar) {
let comb_func = |poly_A_comp: &Scalar,
poly_B_comp: &Scalar,
poly_C_comp: &Scalar,
poly_D_comp: &Scalar|
-> Scalar { poly_A_comp * (poly_B_comp * poly_C_comp - poly_D_comp) };
let (sc_proof_phase_one, r, claims, blind_claim_postsc) =
ZKSumcheckInstanceProof::prove_cubic_with_additive_term(
&Scalar::zero(), // claim is zero
&Scalar::zero(), // blind for claim is also zero
num_rounds,
evals_tau,
evals_Az,
evals_Bz,
evals_Cz,
comb_func,
&gens.gens_1,
&gens.gens_4,
transcript,
random_tape,
);
(sc_proof_phase_one, r, claims, blind_claim_postsc)
}
fn prove_phase_two(
num_rounds: usize,
claim: &Scalar,
blind_claim: &Scalar,
evals_z: &mut DensePolynomial,
evals_ABC: &mut DensePolynomial,
gens: &R1CSSumcheckGens,
transcript: &mut Transcript,
random_tape: &mut RandomTape,
) -> (ZKSumcheckInstanceProof, Vec<Scalar>, Vec<Scalar>, Scalar) {
let comb_func =
|poly_A_comp: &Scalar, poly_B_comp: &Scalar| -> Scalar { poly_A_comp * poly_B_comp };
let (sc_proof_phase_two, r, claims, blind_claim_postsc) = ZKSumcheckInstanceProof::prove_quad(
claim,
blind_claim,
num_rounds,
evals_z,
evals_ABC,
comb_func,
&gens.gens_1,
&gens.gens_3,
transcript,
random_tape,
);
(sc_proof_phase_two, r, claims, blind_claim_postsc)
}
fn protocol_name() -> &'static [u8] {
b"R1CS proof"
}
pub fn prove(
inst: &R1CSInstance,
vars: Vec<Scalar>,
input: &[Scalar],
gens: &R1CSGens,
transcript: &mut Transcript,
random_tape: &mut RandomTape,
) -> (R1CSProof, Vec<Scalar>, Vec<Scalar>) {
let timer_prove = Timer::new("R1CSProof::prove");
transcript.append_protocol_name(R1CSProof::protocol_name());
// we currently require the number of |inputs| + 1 to be at most number of vars
assert!(input.len() < vars.len());
input.append_to_transcript(b"input", transcript);
let timer_commit = Timer::new("polycommit");
let (poly_vars, comm_vars, blinds_vars) = {
// create a multilinear polynomial using the supplied assignment for variables
let poly_vars = DensePolynomial::new(vars.clone());
// produce a commitment to the satisfying assignment
let (comm_vars, blinds_vars) = poly_vars.commit(&gens.gens_pc, Some(random_tape));
// add the commitment to the prover's transcript
comm_vars.append_to_transcript(b"poly_commitment", transcript);
(poly_vars, comm_vars, blinds_vars)
};
timer_commit.stop();
let timer_sc_proof_phase1 = Timer::new("prove_sc_phase_one");
// append input to variables to create a single vector z
let z = {
let num_inputs = input.len();
let num_vars = vars.len();
let mut z = vars;
z.extend(&vec![Scalar::one()]); // add constant term in z
z.extend(input);
z.extend(&vec![Scalar::zero(); num_vars - num_inputs - 1]); // we will pad with zeros
z
};
// derive the verifier's challenge tau
let (num_rounds_x, num_rounds_y) = (inst.get_num_cons().log_2(), z.len().log_2());
let tau = transcript.challenge_vector(b"challenge_tau", num_rounds_x);
// compute the initial evaluation table for R(\tau, x)
let mut poly_tau = DensePolynomial::new(EqPolynomial::new(tau).evals());
let (mut poly_Az, mut poly_Bz, mut poly_Cz) =
inst.multiply_vec(inst.get_num_cons(), z.len(), &z);
let (sc_proof_phase1, rx, _claims_phase1, blind_claim_postsc1) = R1CSProof::prove_phase_one(
num_rounds_x,
&mut poly_tau,
&mut poly_Az,
&mut poly_Bz,
&mut poly_Cz,
&gens.gens_sc,
transcript,
random_tape,
);
assert_eq!(poly_tau.len(), 1);
assert_eq!(poly_Az.len(), 1);
assert_eq!(poly_Bz.len(), 1);
assert_eq!(poly_Cz.len(), 1);
timer_sc_proof_phase1.stop();
let (tau_claim, Az_claim, Bz_claim, Cz_claim) =
(&poly_tau[0], &poly_Az[0], &poly_Bz[0], &poly_Cz[0]);
let (Az_blind, Bz_blind, Cz_blind, prod_Az_Bz_blind) = (
random_tape.random_scalar(b"Az_blind"),
random_tape.random_scalar(b"Bz_blind"),
random_tape.random_scalar(b"Cz_blind"),
random_tape.random_scalar(b"prod_Az_Bz_blind"),
);
let (pok_Cz_claim, comm_Cz_claim) = {
KnowledgeProof::prove(
&gens.gens_sc.gens_1,
transcript,
random_tape,
Cz_claim,
&Cz_blind,
)
};
let (proof_prod, comm_Az_claim, comm_Bz_claim, comm_prod_Az_Bz_claims) = {
let prod = Az_claim * Bz_claim;
ProductProof::prove(
&gens.gens_sc.gens_1,
transcript,
random_tape,
Az_claim,
&Az_blind,
Bz_claim,
&Bz_blind,
&prod,
&prod_Az_Bz_blind,
)
};
comm_Az_claim.append_to_transcript(b"comm_Az_claim", transcript);
comm_Bz_claim.append_to_transcript(b"comm_Bz_claim", transcript);
comm_Cz_claim.append_to_transcript(b"comm_Cz_claim", transcript);
comm_prod_Az_Bz_claims.append_to_transcript(b"comm_prod_Az_Bz_claims", transcript);
// prove the final step of sum-check #1
let taus_bound_rx = tau_claim;
let blind_expected_claim_postsc1 = taus_bound_rx * (prod_Az_Bz_blind - Cz_blind);
let claim_post_phase1 = (Az_claim * Bz_claim - Cz_claim) * taus_bound_rx;
let (proof_eq_sc_phase1, _C1, _C2) = EqualityProof::prove(
&gens.gens_sc.gens_1,
transcript,
random_tape,
&claim_post_phase1,
&blind_expected_claim_postsc1,
&claim_post_phase1,
&blind_claim_postsc1,
);
let timer_sc_proof_phase2 = Timer::new("prove_sc_phase_two");
// combine the three claims into a single claim
let r_A = transcript.challenge_scalar(b"challenege_Az");
let r_B = transcript.challenge_scalar(b"challenege_Bz");
let r_C = transcript.challenge_scalar(b"challenege_Cz");
let claim_phase2 = r_A * Az_claim + r_B * Bz_claim + r_C * Cz_claim;
let blind_claim_phase2 = r_A * Az_blind + r_B * Bz_blind + r_C * Cz_blind;
let evals_ABC = {
// compute the initial evaluation table for R(\tau, x)
let evals_rx = EqPolynomial::new(rx.clone()).evals();
let (evals_A, evals_B, evals_C) =
inst.compute_eval_table_sparse(inst.get_num_cons(), z.len(), &evals_rx);
assert_eq!(evals_A.len(), evals_B.len());
assert_eq!(evals_A.len(), evals_C.len());
(0..evals_A.len())
.map(|i| r_A * evals_A[i] + r_B * evals_B[i] + r_C * evals_C[i])
.collect::<Vec<Scalar>>()
};
// another instance of the sum-check protocol
let (sc_proof_phase2, ry, claims_phase2, blind_claim_postsc2) = R1CSProof::prove_phase_two(
num_rounds_y,
&claim_phase2,
&blind_claim_phase2,
&mut DensePolynomial::new(z),
&mut DensePolynomial::new(evals_ABC),
&gens.gens_sc,
transcript,
random_tape,
);
timer_sc_proof_phase2.stop();
let timer_polyeval = Timer::new("polyeval");
let eval_vars_at_ry = poly_vars.evaluate(&ry[1..]);
let blind_eval = random_tape.random_scalar(b"blind_eval");
let (proof_eval_vars_at_ry, comm_vars_at_ry) = PolyEvalProof::prove(
&poly_vars,
Some(&blinds_vars),
&ry[1..],
&eval_vars_at_ry,
Some(&blind_eval),
&gens.gens_pc,
transcript,
random_tape,
);
timer_polyeval.stop();
// prove the final step of sum-check #2
let blind_eval_Z_at_ry = (Scalar::one() - ry[0]) * blind_eval;
let blind_expected_claim_postsc2 = claims_phase2[1] * blind_eval_Z_at_ry;
let claim_post_phase2 = claims_phase2[0] * claims_phase2[1];
let (proof_eq_sc_phase2, _C1, _C2) = EqualityProof::prove(
&gens.gens_pc.gens.gens_1,
transcript,
random_tape,
&claim_post_phase2,
&blind_expected_claim_postsc2,
&claim_post_phase2,
&blind_claim_postsc2,
);
timer_prove.stop();
(
R1CSProof {
comm_vars,
sc_proof_phase1,
claims_phase2: (
comm_Az_claim,
comm_Bz_claim,
comm_Cz_claim,
comm_prod_Az_Bz_claims,
),
pok_claims_phase2: (pok_Cz_claim, proof_prod),
proof_eq_sc_phase1,
sc_proof_phase2,
comm_vars_at_ry,
proof_eval_vars_at_ry,
proof_eq_sc_phase2,
},
rx,
ry,
)
}
pub fn verify(
&self,
num_vars: usize,
num_cons: usize,
input: &[Scalar],
evals: &(Scalar, Scalar, Scalar),
transcript: &mut Transcript,
gens: &R1CSGens,
) -> Result<(Vec<Scalar>, Vec<Scalar>), ProofVerifyError> {
transcript.append_protocol_name(R1CSProof::protocol_name());
input.append_to_transcript(b"input", transcript);
let n = num_vars;
// add the commitment to the verifier's transcript
self
.comm_vars
.append_to_transcript(b"poly_commitment", transcript);
let (num_rounds_x, num_rounds_y) = (num_cons.log_2(), (2 * num_vars).log_2());
// derive the verifier's challenge tau
let tau = transcript.challenge_vector(b"challenge_tau", num_rounds_x);
// verify the first sum-check instance
let claim_phase1 = Scalar::zero()
.commit(&Scalar::zero(), &gens.gens_sc.gens_1)
.compress();
let (comm_claim_post_phase1, rx) = self.sc_proof_phase1.verify(
&claim_phase1,
num_rounds_x,
3,
&gens.gens_sc.gens_1,
&gens.gens_sc.gens_4,
transcript,
)?;
// perform the intermediate sum-check test with claimed Az, Bz, and Cz
let (comm_Az_claim, comm_Bz_claim, comm_Cz_claim, comm_prod_Az_Bz_claims) = &self.claims_phase2;
let (pok_Cz_claim, proof_prod) = &self.pok_claims_phase2;
pok_Cz_claim.verify(&gens.gens_sc.gens_1, transcript, comm_Cz_claim)?;
proof_prod.verify(
&gens.gens_sc.gens_1,
transcript,
comm_Az_claim,
comm_Bz_claim,
comm_prod_Az_Bz_claims,
)?;
comm_Az_claim.append_to_transcript(b"comm_Az_claim", transcript);
comm_Bz_claim.append_to_transcript(b"comm_Bz_claim", transcript);
comm_Cz_claim.append_to_transcript(b"comm_Cz_claim", transcript);
comm_prod_Az_Bz_claims.append_to_transcript(b"comm_prod_Az_Bz_claims", transcript);
let taus_bound_rx: Scalar = (0..rx.len())
.map(|i| rx[i] * tau[i] + (Scalar::one() - rx[i]) * (Scalar::one() - tau[i]))
.product();
let expected_claim_post_phase1 = (taus_bound_rx
* (comm_prod_Az_Bz_claims.decompress().unwrap() - comm_Cz_claim.decompress().unwrap()))
.compress();
// verify proof that expected_claim_post_phase1 == claim_post_phase1
self.proof_eq_sc_phase1.verify(
&gens.gens_sc.gens_1,
transcript,
&expected_claim_post_phase1,
&comm_claim_post_phase1,
)?;
// derive three public challenges and then derive a joint claim
let r_A = transcript.challenge_scalar(b"challenege_Az");
let r_B = transcript.challenge_scalar(b"challenege_Bz");
let r_C = transcript.challenge_scalar(b"challenege_Cz");
// r_A * comm_Az_claim + r_B * comm_Bz_claim + r_C * comm_Cz_claim;
let comm_claim_phase2 = GroupElement::vartime_multiscalar_mul(
iter::once(r_A)
.chain(iter::once(r_B))
.chain(iter::once(r_C))
.collect(),
iter::once(&comm_Az_claim)
.chain(iter::once(&comm_Bz_claim))
.chain(iter::once(&comm_Cz_claim))
.map(|pt| pt.decompress().unwrap())
.collect(),
)
.compress();
// verify the joint claim with a sum-check protocol
let (comm_claim_post_phase2, ry) = self.sc_proof_phase2.verify(
&comm_claim_phase2,
num_rounds_y,
2,
&gens.gens_sc.gens_1,
&gens.gens_sc.gens_3,
transcript,
)?;
// verify Z(ry) proof against the initial commitment
self.proof_eval_vars_at_ry.verify(
&gens.gens_pc,
transcript,
&ry[1..],
&self.comm_vars_at_ry,
&self.comm_vars,
)?;
let poly_input_eval = {
// constant term
let mut input_as_sparse_poly_entries = vec![SparsePolyEntry::new(0, Scalar::one())];
//remaining inputs
input_as_sparse_poly_entries.extend(
(0..input.len())
.map(|i| SparsePolyEntry::new(i + 1, input[i]))
.collect::<Vec<SparsePolyEntry>>(),
);
SparsePolynomial::new(n.log_2(), input_as_sparse_poly_entries).evaluate(&ry[1..])
};
// compute commitment to eval_Z_at_ry = (Scalar::one() - ry[0]) * self.eval_vars_at_ry + ry[0] * poly_input_eval
let comm_eval_Z_at_ry = GroupElement::vartime_multiscalar_mul(
iter::once(Scalar::one() - ry[0])
.chain(iter::once(ry[0]))
.map(|s| s)
.collect(),
iter::once(self.comm_vars_at_ry.decompress().unwrap())
.chain(iter::once(
poly_input_eval.commit(&Scalar::zero(), &gens.gens_pc.gens.gens_1),
))
.collect(),
);
// perform the final check in the second sum-check protocol
let (eval_A_r, eval_B_r, eval_C_r) = evals;
let expected_claim_post_phase2 =
((r_A * eval_A_r + r_B * eval_B_r + r_C * eval_C_r) * comm_eval_Z_at_ry).compress();
// verify proof that expected_claim_post_phase1 == claim_post_phase1
self.proof_eq_sc_phase2.verify(
&gens.gens_sc.gens_1,
transcript,
&expected_claim_post_phase2,
&comm_claim_post_phase2,
)?;
Ok((rx, ry))
}
}
#[cfg(test)]
mod tests {
use super::*;
use rand_core::OsRng;
fn produce_tiny_r1cs() -> (R1CSInstance, Vec<Scalar>, Vec<Scalar>) {
// three constraints over five variables Z1, Z2, Z3, Z4, and Z5
// rounded to the nearest power of two
let num_cons = 128;
let num_vars = 256;
let num_inputs = 2;
// encode the above constraints into three matrices
let mut A: Vec<(usize, usize, Scalar)> = Vec::new();
let mut B: Vec<(usize, usize, Scalar)> = Vec::new();
let mut C: Vec<(usize, usize, Scalar)> = Vec::new();
let one = Scalar::one();
// constraint 0 entries
// (Z1 + Z2) * I0 - Z3 = 0;
A.push((0, 0, one));
A.push((0, 1, one));
B.push((0, num_vars + 1, one));
C.push((0, 2, one));
// constraint 1 entries
// (Z1 + I1) * (Z3) - Z4 = 0
A.push((1, 0, one));
A.push((1, num_vars + 2, one));
B.push((1, 2, one));
C.push((1, 3, one));
// constraint 3 entries
// Z5 * 1 - 0 = 0
A.push((2, 4, one));
B.push((2, num_vars, one));
let inst = R1CSInstance::new(num_cons, num_vars, num_inputs, &A, &B, &C);
// compute a satisfying assignment
let mut csprng: OsRng = OsRng;
let i0 = Scalar::random(&mut csprng);
let i1 = Scalar::random(&mut csprng);
let z1 = Scalar::random(&mut csprng);
let z2 = Scalar::random(&mut csprng);
let z3 = (z1 + z2) * i0; // constraint 1: (Z1 + Z2) * I0 - Z3 = 0;
let z4 = (z1 + i1) * z3; // constraint 2: (Z1 + I1) * (Z3) - Z4 = 0
let z5 = Scalar::zero(); //constraint 3
let mut vars = vec![Scalar::zero(); num_vars];
vars[0] = z1;
vars[1] = z2;
vars[2] = z3;
vars[3] = z4;
vars[4] = z5;
let mut input = vec![Scalar::zero(); num_inputs];
input[0] = i0;
input[1] = i1;
(inst, vars, input)
}
#[test]
fn test_tiny_r1cs() {
let (inst, vars, input) = tests::produce_tiny_r1cs();
let is_sat = inst.is_sat(&vars, &input);
assert!(is_sat);
}
#[test]
fn test_synthetic_r1cs() {
let (inst, vars, input) = R1CSInstance::produce_synthetic_r1cs(1024, 1024, 10);
let is_sat = inst.is_sat(&vars, &input);
assert!(is_sat);
}
#[test]
pub fn check_r1cs_proof() {
let num_vars = 1024;
let num_cons = num_vars;
let num_inputs = 10;
let (inst, vars, input) = R1CSInstance::produce_synthetic_r1cs(num_cons, num_vars, num_inputs);
let gens = R1CSGens::new(b"test-m", num_cons, num_vars);
let mut random_tape = RandomTape::new(b"proof");
let mut prover_transcript = Transcript::new(b"example");
let (proof, rx, ry) = R1CSProof::prove(
&inst,
vars,
&input,
&gens,
&mut prover_transcript,
&mut random_tape,
);
let inst_evals = inst.evaluate(&rx, &ry);
let mut verifier_transcript = Transcript::new(b"example");
assert!(proof
.verify(
inst.get_num_vars(),
inst.get_num_cons(),
&input,
&inst_evals,
&mut verifier_transcript,
&gens,
)
.is_ok());
}
}

View File

@@ -0,0 +1,27 @@
use super::scalar::Scalar;
use super::transcript::ProofTranscript;
use merlin::Transcript;
use rand_core::OsRng;
pub struct RandomTape {
tape: Transcript,
}
impl RandomTape {
pub fn new(name: &'static [u8]) -> Self {
let tape = {
let mut rng = OsRng::default();
let mut tape = Transcript::new(name);
tape.append_scalar(b"init_randomness", &Scalar::random(&mut rng));
tape
};
Self { tape }
}
pub fn random_scalar(&mut self, label: &'static [u8]) -> Scalar {
self.tape.challenge_scalar(label)
}
pub fn random_vector(&mut self, label: &'static [u8], len: usize) -> Vec<Scalar> {
self.tape.challenge_vector(label, len)
}
}

View File

@@ -0,0 +1,46 @@
use secq256k1::elliptic_curve::ops::Reduce;
use secq256k1::U256;
mod scalar;
pub type Scalar = scalar::Scalar;
pub type ScalarBytes = secq256k1::Scalar;
pub trait ScalarFromPrimitives {
fn to_scalar(self) -> Scalar;
}
impl ScalarFromPrimitives for usize {
#[inline]
fn to_scalar(self) -> Scalar {
(0..self).map(|_i| Scalar::one()).sum()
}
}
impl ScalarFromPrimitives for bool {
#[inline]
fn to_scalar(self) -> Scalar {
if self {
Scalar::one()
} else {
Scalar::zero()
}
}
}
pub trait ScalarBytesFromScalar {
fn decompress_scalar(s: &Scalar) -> ScalarBytes;
fn decompress_vector(s: &[Scalar]) -> Vec<ScalarBytes>;
}
impl ScalarBytesFromScalar for Scalar {
fn decompress_scalar(s: &Scalar) -> ScalarBytes {
ScalarBytes::from_uint_reduced(U256::from_le_slice(&s.to_bytes()))
}
fn decompress_vector(s: &[Scalar]) -> Vec<ScalarBytes> {
(0..s.len())
.map(|i| Scalar::decompress_scalar(&s[i]))
.collect::<Vec<ScalarBytes>>()
}
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,778 @@
#![allow(clippy::too_many_arguments)]
#![allow(clippy::type_complexity)]
use super::commitments::{Commitments, MultiCommitGens};
use super::dense_mlpoly::DensePolynomial;
use super::errors::ProofVerifyError;
use super::group::{CompressedGroup, GroupElement, VartimeMultiscalarMul};
use super::nizk::DotProductProof;
use super::random::RandomTape;
use super::scalar::Scalar;
use super::transcript::{AppendToTranscript, ProofTranscript};
use super::unipoly::{CompressedUniPoly, UniPoly};
use crate::group::DecompressEncodedPoint;
use core::iter;
use itertools::izip;
use merlin::Transcript;
use serde::{Deserialize, Serialize};
#[derive(Serialize, Deserialize, Debug)]
pub struct SumcheckInstanceProof {
compressed_polys: Vec<CompressedUniPoly>,
}
impl SumcheckInstanceProof {
pub fn new(compressed_polys: Vec<CompressedUniPoly>) -> SumcheckInstanceProof {
SumcheckInstanceProof { compressed_polys }
}
pub fn verify(
&self,
claim: Scalar,
num_rounds: usize,
degree_bound: usize,
transcript: &mut Transcript,
) -> Result<(Scalar, Vec<Scalar>), ProofVerifyError> {
let mut e = claim;
let mut r: Vec<Scalar> = Vec::new();
// verify that there is a univariate polynomial for each round
assert_eq!(self.compressed_polys.len(), num_rounds);
for i in 0..self.compressed_polys.len() {
let poly = self.compressed_polys[i].decompress(&e);
// verify degree bound
assert_eq!(poly.degree(), degree_bound);
// check if G_k(0) + G_k(1) = e
assert_eq!(poly.eval_at_zero() + poly.eval_at_one(), e);
// append the prover's message to the transcript
poly.append_to_transcript(b"poly", transcript);
//derive the verifier's challenge for the next round
let r_i = transcript.challenge_scalar(b"challenge_nextround");
r.push(r_i);
// evaluate the claimed degree-ell polynomial at r_i
e = poly.evaluate(&r_i);
}
Ok((e, r))
}
}
#[derive(Serialize, Deserialize, Debug)]
pub struct ZKSumcheckInstanceProof {
pub comm_polys: Vec<CompressedGroup>,
pub comm_evals: Vec<CompressedGroup>,
pub proofs: Vec<DotProductProof>,
}
impl ZKSumcheckInstanceProof {
pub fn new(
comm_polys: Vec<CompressedGroup>,
comm_evals: Vec<CompressedGroup>,
proofs: Vec<DotProductProof>,
) -> Self {
ZKSumcheckInstanceProof {
comm_polys,
comm_evals,
proofs,
}
}
pub fn verify(
&self,
comm_claim: &CompressedGroup,
num_rounds: usize,
degree_bound: usize,
gens_1: &MultiCommitGens,
gens_n: &MultiCommitGens,
transcript: &mut Transcript,
) -> Result<(CompressedGroup, Vec<Scalar>), ProofVerifyError> {
// verify degree bound
assert_eq!(gens_n.n, degree_bound + 1);
// verify that there is a univariate polynomial for each round
assert_eq!(self.comm_polys.len(), num_rounds);
assert_eq!(self.comm_evals.len(), num_rounds);
let mut r: Vec<Scalar> = Vec::new();
for i in 0..self.comm_polys.len() {
let comm_poly = &self.comm_polys[i];
// append the prover's polynomial to the transcript
comm_poly.append_to_transcript(b"comm_poly", transcript);
//derive the verifier's challenge for the next round
let r_i = transcript.challenge_scalar(b"challenge_nextround");
// verify the proof of sum-check and evals
let res = {
let comm_claim_per_round = if i == 0 {
comm_claim
} else {
&self.comm_evals[i - 1]
};
let comm_eval = &self.comm_evals[i];
// add two claims to transcript
comm_claim_per_round.append_to_transcript(b"comm_claim_per_round", transcript);
comm_eval.append_to_transcript(b"comm_eval", transcript);
// produce two weights
let w = transcript.challenge_vector(b"combine_two_claims_to_one", 2);
// compute a weighted sum of the RHS
let comm_target = GroupElement::vartime_multiscalar_mul(
w.clone(),
iter::once(&comm_claim_per_round)
.chain(iter::once(&comm_eval))
.map(|pt| pt.decompress().unwrap())
.collect(),
)
.compress();
let a = {
// the vector to use to decommit for sum-check test
let a_sc = {
let mut a = vec![Scalar::one(); degree_bound + 1];
a[0] += Scalar::one();
a
};
// the vector to use to decommit for evaluation
let a_eval = {
let mut a = vec![Scalar::one(); degree_bound + 1];
for j in 1..a.len() {
a[j] = a[j - 1] * r_i;
}
a
};
// take weighted sum of the two vectors using w
assert_eq!(a_sc.len(), a_eval.len());
(0..a_sc.len())
.map(|i| w[0] * a_sc[i] + w[1] * a_eval[i])
.collect::<Vec<Scalar>>()
};
self.proofs[i]
.verify(
gens_1,
gens_n,
transcript,
&a,
&self.comm_polys[i],
&comm_target,
)
.is_ok()
};
if !res {
return Err(ProofVerifyError::InternalError);
}
r.push(r_i);
}
Ok((self.comm_evals[self.comm_evals.len() - 1], r))
}
}
impl SumcheckInstanceProof {
pub fn prove_cubic<F>(
claim: &Scalar,
num_rounds: usize,
poly_A: &mut DensePolynomial,
poly_B: &mut DensePolynomial,
poly_C: &mut DensePolynomial,
comb_func: F,
transcript: &mut Transcript,
) -> (Self, Vec<Scalar>, Vec<Scalar>)
where
F: Fn(&Scalar, &Scalar, &Scalar) -> Scalar,
{
let mut e = *claim;
let mut r: Vec<Scalar> = Vec::new();
let mut cubic_polys: Vec<CompressedUniPoly> = Vec::new();
for _j in 0..num_rounds {
let mut eval_point_0 = Scalar::zero();
let mut eval_point_2 = Scalar::zero();
let mut eval_point_3 = Scalar::zero();
let len = poly_A.len() / 2;
for i in 0..len {
// eval 0: bound_func is A(low)
eval_point_0 += comb_func(&poly_A[i], &poly_B[i], &poly_C[i]);
// eval 2: bound_func is -A(low) + 2*A(high)
let poly_A_bound_point = poly_A[len + i] + poly_A[len + i] - poly_A[i];
let poly_B_bound_point = poly_B[len + i] + poly_B[len + i] - poly_B[i];
let poly_C_bound_point = poly_C[len + i] + poly_C[len + i] - poly_C[i];
eval_point_2 += comb_func(
&poly_A_bound_point,
&poly_B_bound_point,
&poly_C_bound_point,
);
// eval 3: bound_func is -2A(low) + 3A(high); computed incrementally with bound_func applied to eval(2)
let poly_A_bound_point = poly_A_bound_point + poly_A[len + i] - poly_A[i];
let poly_B_bound_point = poly_B_bound_point + poly_B[len + i] - poly_B[i];
let poly_C_bound_point = poly_C_bound_point + poly_C[len + i] - poly_C[i];
eval_point_3 += comb_func(
&poly_A_bound_point,
&poly_B_bound_point,
&poly_C_bound_point,
);
}
let evals = vec![eval_point_0, e - eval_point_0, eval_point_2, eval_point_3];
let poly = UniPoly::from_evals(&evals);
// append the prover's message to the transcript
poly.append_to_transcript(b"poly", transcript);
//derive the verifier's challenge for the next round
let r_j = transcript.challenge_scalar(b"challenge_nextround");
r.push(r_j);
// bound all tables to the verifier's challenege
poly_A.bound_poly_var_top(&r_j);
poly_B.bound_poly_var_top(&r_j);
poly_C.bound_poly_var_top(&r_j);
e = poly.evaluate(&r_j);
cubic_polys.push(poly.compress());
}
(
SumcheckInstanceProof::new(cubic_polys),
r,
vec![poly_A[0], poly_B[0], poly_C[0]],
)
}
pub fn prove_cubic_batched<F>(
claim: &Scalar,
num_rounds: usize,
poly_vec_par: (
&mut Vec<&mut DensePolynomial>,
&mut Vec<&mut DensePolynomial>,
&mut DensePolynomial,
),
poly_vec_seq: (
&mut Vec<&mut DensePolynomial>,
&mut Vec<&mut DensePolynomial>,
&mut Vec<&mut DensePolynomial>,
),
coeffs: &[Scalar],
comb_func: F,
transcript: &mut Transcript,
) -> (
Self,
Vec<Scalar>,
(Vec<Scalar>, Vec<Scalar>, Scalar),
(Vec<Scalar>, Vec<Scalar>, Vec<Scalar>),
)
where
F: Fn(&Scalar, &Scalar, &Scalar) -> Scalar,
{
let (poly_A_vec_par, poly_B_vec_par, poly_C_par) = poly_vec_par;
let (poly_A_vec_seq, poly_B_vec_seq, poly_C_vec_seq) = poly_vec_seq;
//let (poly_A_vec_seq, poly_B_vec_seq, poly_C_vec_seq) = poly_vec_seq;
let mut e = *claim;
let mut r: Vec<Scalar> = Vec::new();
let mut cubic_polys: Vec<CompressedUniPoly> = Vec::new();
for _j in 0..num_rounds {
let mut evals: Vec<(Scalar, Scalar, Scalar)> = Vec::new();
for (poly_A, poly_B) in poly_A_vec_par.iter().zip(poly_B_vec_par.iter()) {
let mut eval_point_0 = Scalar::zero();
let mut eval_point_2 = Scalar::zero();
let mut eval_point_3 = Scalar::zero();
let len = poly_A.len() / 2;
for i in 0..len {
// eval 0: bound_func is A(low)
eval_point_0 += comb_func(&poly_A[i], &poly_B[i], &poly_C_par[i]);
// eval 2: bound_func is -A(low) + 2*A(high)
let poly_A_bound_point = poly_A[len + i] + poly_A[len + i] - poly_A[i];
let poly_B_bound_point = poly_B[len + i] + poly_B[len + i] - poly_B[i];
let poly_C_bound_point = poly_C_par[len + i] + poly_C_par[len + i] - poly_C_par[i];
eval_point_2 += comb_func(
&poly_A_bound_point,
&poly_B_bound_point,
&poly_C_bound_point,
);
// eval 3: bound_func is -2A(low) + 3A(high); computed incrementally with bound_func applied to eval(2)
let poly_A_bound_point = poly_A_bound_point + poly_A[len + i] - poly_A[i];
let poly_B_bound_point = poly_B_bound_point + poly_B[len + i] - poly_B[i];
let poly_C_bound_point = poly_C_bound_point + poly_C_par[len + i] - poly_C_par[i];
eval_point_3 += comb_func(
&poly_A_bound_point,
&poly_B_bound_point,
&poly_C_bound_point,
);
}
evals.push((eval_point_0, eval_point_2, eval_point_3));
}
for (poly_A, poly_B, poly_C) in izip!(
poly_A_vec_seq.iter(),
poly_B_vec_seq.iter(),
poly_C_vec_seq.iter()
) {
let mut eval_point_0 = Scalar::zero();
let mut eval_point_2 = Scalar::zero();
let mut eval_point_3 = Scalar::zero();
let len = poly_A.len() / 2;
for i in 0..len {
// eval 0: bound_func is A(low)
eval_point_0 += comb_func(&poly_A[i], &poly_B[i], &poly_C[i]);
// eval 2: bound_func is -A(low) + 2*A(high)
let poly_A_bound_point = poly_A[len + i] + poly_A[len + i] - poly_A[i];
let poly_B_bound_point = poly_B[len + i] + poly_B[len + i] - poly_B[i];
let poly_C_bound_point = poly_C[len + i] + poly_C[len + i] - poly_C[i];
eval_point_2 += comb_func(
&poly_A_bound_point,
&poly_B_bound_point,
&poly_C_bound_point,
);
// eval 3: bound_func is -2A(low) + 3A(high); computed incrementally with bound_func applied to eval(2)
let poly_A_bound_point = poly_A_bound_point + poly_A[len + i] - poly_A[i];
let poly_B_bound_point = poly_B_bound_point + poly_B[len + i] - poly_B[i];
let poly_C_bound_point = poly_C_bound_point + poly_C[len + i] - poly_C[i];
eval_point_3 += comb_func(
&poly_A_bound_point,
&poly_B_bound_point,
&poly_C_bound_point,
);
}
evals.push((eval_point_0, eval_point_2, eval_point_3));
}
let evals_combined_0 = (0..evals.len()).map(|i| evals[i].0 * coeffs[i]).sum();
let evals_combined_2 = (0..evals.len()).map(|i| evals[i].1 * coeffs[i]).sum();
let evals_combined_3 = (0..evals.len()).map(|i| evals[i].2 * coeffs[i]).sum();
let evals = vec![
evals_combined_0,
e - evals_combined_0,
evals_combined_2,
evals_combined_3,
];
let poly = UniPoly::from_evals(&evals);
// append the prover's message to the transcript
poly.append_to_transcript(b"poly", transcript);
//derive the verifier's challenge for the next round
let r_j = transcript.challenge_scalar(b"challenge_nextround");
r.push(r_j);
// bound all tables to the verifier's challenege
for (poly_A, poly_B) in poly_A_vec_par.iter_mut().zip(poly_B_vec_par.iter_mut()) {
poly_A.bound_poly_var_top(&r_j);
poly_B.bound_poly_var_top(&r_j);
}
poly_C_par.bound_poly_var_top(&r_j);
for (poly_A, poly_B, poly_C) in izip!(
poly_A_vec_seq.iter_mut(),
poly_B_vec_seq.iter_mut(),
poly_C_vec_seq.iter_mut()
) {
poly_A.bound_poly_var_top(&r_j);
poly_B.bound_poly_var_top(&r_j);
poly_C.bound_poly_var_top(&r_j);
}
e = poly.evaluate(&r_j);
cubic_polys.push(poly.compress());
}
let poly_A_par_final = (0..poly_A_vec_par.len())
.map(|i| poly_A_vec_par[i][0])
.collect();
let poly_B_par_final = (0..poly_B_vec_par.len())
.map(|i| poly_B_vec_par[i][0])
.collect();
let claims_prod = (poly_A_par_final, poly_B_par_final, poly_C_par[0]);
let poly_A_seq_final = (0..poly_A_vec_seq.len())
.map(|i| poly_A_vec_seq[i][0])
.collect();
let poly_B_seq_final = (0..poly_B_vec_seq.len())
.map(|i| poly_B_vec_seq[i][0])
.collect();
let poly_C_seq_final = (0..poly_C_vec_seq.len())
.map(|i| poly_C_vec_seq[i][0])
.collect();
let claims_dotp = (poly_A_seq_final, poly_B_seq_final, poly_C_seq_final);
(
SumcheckInstanceProof::new(cubic_polys),
r,
claims_prod,
claims_dotp,
)
}
}
impl ZKSumcheckInstanceProof {
pub fn prove_quad<F>(
claim: &Scalar,
blind_claim: &Scalar,
num_rounds: usize,
poly_A: &mut DensePolynomial,
poly_B: &mut DensePolynomial,
comb_func: F,
gens_1: &MultiCommitGens,
gens_n: &MultiCommitGens,
transcript: &mut Transcript,
random_tape: &mut RandomTape,
) -> (Self, Vec<Scalar>, Vec<Scalar>, Scalar)
where
F: Fn(&Scalar, &Scalar) -> Scalar,
{
let (blinds_poly, blinds_evals) = (
random_tape.random_vector(b"blinds_poly", num_rounds),
random_tape.random_vector(b"blinds_evals", num_rounds),
);
let mut claim_per_round = *claim;
let mut comm_claim_per_round = claim_per_round.commit(blind_claim, gens_1).compress();
let mut r: Vec<Scalar> = Vec::new();
let mut comm_polys: Vec<CompressedGroup> = Vec::new();
let mut comm_evals: Vec<CompressedGroup> = Vec::new();
let mut proofs: Vec<DotProductProof> = Vec::new();
for j in 0..num_rounds {
let (poly, comm_poly) = {
let mut eval_point_0 = Scalar::zero();
let mut eval_point_2 = Scalar::zero();
let len = poly_A.len() / 2;
for i in 0..len {
// eval 0: bound_func is A(low)
eval_point_0 += comb_func(&poly_A[i], &poly_B[i]);
// eval 2: bound_func is -A(low) + 2*A(high)
let poly_A_bound_point = poly_A[len + i] + poly_A[len + i] - poly_A[i];
let poly_B_bound_point = poly_B[len + i] + poly_B[len + i] - poly_B[i];
eval_point_2 += comb_func(&poly_A_bound_point, &poly_B_bound_point);
}
let evals = vec![eval_point_0, claim_per_round - eval_point_0, eval_point_2];
let poly = UniPoly::from_evals(&evals);
let comm_poly = poly.commit(gens_n, &blinds_poly[j]).compress();
(poly, comm_poly)
};
// append the prover's message to the transcript
comm_poly.append_to_transcript(b"comm_poly", transcript);
comm_polys.push(comm_poly);
//derive the verifier's challenge for the next round
let r_j = transcript.challenge_scalar(b"challenge_nextround");
// bound all tables to the verifier's challenege
poly_A.bound_poly_var_top(&r_j);
poly_B.bound_poly_var_top(&r_j);
// produce a proof of sum-check and of evaluation
let (proof, claim_next_round, comm_claim_next_round) = {
let eval = poly.evaluate(&r_j);
let comm_eval = eval.commit(&blinds_evals[j], gens_1).compress();
// we need to prove the following under homomorphic commitments:
// (1) poly(0) + poly(1) = claim_per_round
// (2) poly(r_j) = eval
// Our technique is to leverage dot product proofs:
// (1) we can prove: <poly_in_coeffs_form, (2, 1, 1, 1)> = claim_per_round
// (2) we can prove: <poly_in_coeffs_form, (1, r_j, r^2_j, ..) = eval
// for efficiency we batch them using random weights
// add two claims to transcript
comm_claim_per_round.append_to_transcript(b"comm_claim_per_round", transcript);
comm_eval.append_to_transcript(b"comm_eval", transcript);
// produce two weights
let w = transcript.challenge_vector(b"combine_two_claims_to_one", 2);
// compute a weighted sum of the RHS
let target = w[0] * claim_per_round + w[1] * eval;
let comm_target = GroupElement::vartime_multiscalar_mul(
w.clone(),
iter::once(&comm_claim_per_round)
.chain(iter::once(&comm_eval))
.map(|pt| pt.decompress().unwrap())
.collect(),
)
.compress();
let blind = {
let blind_sc = if j == 0 {
blind_claim
} else {
&blinds_evals[j - 1]
};
let blind_eval = &blinds_evals[j];
w[0] * blind_sc + w[1] * blind_eval
};
assert_eq!(target.commit(&blind, gens_1).compress(), comm_target);
let a = {
// the vector to use to decommit for sum-check test
let a_sc = {
let mut a = vec![Scalar::one(); poly.degree() + 1];
a[0] += Scalar::one();
a
};
// the vector to use to decommit for evaluation
let a_eval = {
let mut a = vec![Scalar::one(); poly.degree() + 1];
for j in 1..a.len() {
a[j] = a[j - 1] * r_j;
}
a
};
// take weighted sum of the two vectors using w
assert_eq!(a_sc.len(), a_eval.len());
(0..a_sc.len())
.map(|i| w[0] * a_sc[i] + w[1] * a_eval[i])
.collect::<Vec<Scalar>>()
};
let (proof, _comm_poly, _comm_sc_eval) = DotProductProof::prove(
gens_1,
gens_n,
transcript,
random_tape,
&poly.as_vec(),
&blinds_poly[j],
&a,
&target,
&blind,
);
(proof, eval, comm_eval)
};
claim_per_round = claim_next_round;
comm_claim_per_round = comm_claim_next_round;
proofs.push(proof);
r.push(r_j);
comm_evals.push(comm_claim_per_round);
}
(
ZKSumcheckInstanceProof::new(comm_polys, comm_evals, proofs),
r,
vec![poly_A[0], poly_B[0]],
blinds_evals[num_rounds - 1],
)
}
pub fn prove_cubic_with_additive_term<F>(
claim: &Scalar,
blind_claim: &Scalar,
num_rounds: usize,
poly_A: &mut DensePolynomial,
poly_B: &mut DensePolynomial,
poly_C: &mut DensePolynomial,
poly_D: &mut DensePolynomial,
comb_func: F,
gens_1: &MultiCommitGens,
gens_n: &MultiCommitGens,
transcript: &mut Transcript,
random_tape: &mut RandomTape,
) -> (Self, Vec<Scalar>, Vec<Scalar>, Scalar)
where
F: Fn(&Scalar, &Scalar, &Scalar, &Scalar) -> Scalar,
{
let (blinds_poly, blinds_evals) = (
random_tape.random_vector(b"blinds_poly", num_rounds),
random_tape.random_vector(b"blinds_evals", num_rounds),
);
let mut claim_per_round = *claim;
let mut comm_claim_per_round = claim_per_round.commit(blind_claim, gens_1).compress();
let mut r: Vec<Scalar> = Vec::new();
let mut comm_polys: Vec<CompressedGroup> = Vec::new();
let mut comm_evals: Vec<CompressedGroup> = Vec::new();
let mut proofs: Vec<DotProductProof> = Vec::new();
for j in 0..num_rounds {
let (poly, comm_poly) = {
let mut eval_point_0 = Scalar::zero();
let mut eval_point_2 = Scalar::zero();
let mut eval_point_3 = Scalar::zero();
let len = poly_A.len() / 2;
for i in 0..len {
// eval 0: bound_func is A(low)
eval_point_0 += comb_func(&poly_A[i], &poly_B[i], &poly_C[i], &poly_D[i]);
// eval 2: bound_func is -A(low) + 2*A(high)
let poly_A_bound_point = poly_A[len + i] + poly_A[len + i] - poly_A[i];
let poly_B_bound_point = poly_B[len + i] + poly_B[len + i] - poly_B[i];
let poly_C_bound_point = poly_C[len + i] + poly_C[len + i] - poly_C[i];
let poly_D_bound_point = poly_D[len + i] + poly_D[len + i] - poly_D[i];
eval_point_2 += comb_func(
&poly_A_bound_point,
&poly_B_bound_point,
&poly_C_bound_point,
&poly_D_bound_point,
);
// eval 3: bound_func is -2A(low) + 3A(high); computed incrementally with bound_func applied to eval(2)
let poly_A_bound_point = poly_A_bound_point + poly_A[len + i] - poly_A[i];
let poly_B_bound_point = poly_B_bound_point + poly_B[len + i] - poly_B[i];
let poly_C_bound_point = poly_C_bound_point + poly_C[len + i] - poly_C[i];
let poly_D_bound_point = poly_D_bound_point + poly_D[len + i] - poly_D[i];
eval_point_3 += comb_func(
&poly_A_bound_point,
&poly_B_bound_point,
&poly_C_bound_point,
&poly_D_bound_point,
);
}
let evals = vec![
eval_point_0,
claim_per_round - eval_point_0,
eval_point_2,
eval_point_3,
];
let poly = UniPoly::from_evals(&evals);
let comm_poly = poly.commit(gens_n, &blinds_poly[j]).compress();
(poly, comm_poly)
};
// append the prover's message to the transcript
comm_poly.append_to_transcript(b"comm_poly", transcript);
comm_polys.push(comm_poly);
//derive the verifier's challenge for the next round
let r_j = transcript.challenge_scalar(b"challenge_nextround");
// bound all tables to the verifier's challenege
poly_A.bound_poly_var_top(&r_j);
poly_B.bound_poly_var_top(&r_j);
poly_C.bound_poly_var_top(&r_j);
poly_D.bound_poly_var_top(&r_j);
// produce a proof of sum-check and of evaluation
let (proof, claim_next_round, comm_claim_next_round) = {
let eval = poly.evaluate(&r_j);
let comm_eval = eval.commit(&blinds_evals[j], gens_1).compress();
// we need to prove the following under homomorphic commitments:
// (1) poly(0) + poly(1) = claim_per_round
// (2) poly(r_j) = eval
// Our technique is to leverage dot product proofs:
// (1) we can prove: <poly_in_coeffs_form, (2, 1, 1, 1)> = claim_per_round
// (2) we can prove: <poly_in_coeffs_form, (1, r_j, r^2_j, ..) = eval
// for efficiency we batch them using random weights
// add two claims to transcript
comm_claim_per_round.append_to_transcript(b"comm_claim_per_round", transcript);
comm_eval.append_to_transcript(b"comm_eval", transcript);
// produce two weights
let w = transcript.challenge_vector(b"combine_two_claims_to_one", 2);
// compute a weighted sum of the RHS
let target = w[0] * claim_per_round + w[1] * eval;
let comm_target = GroupElement::vartime_multiscalar_mul(
w.clone(),
iter::once(&comm_claim_per_round)
.chain(iter::once(&comm_eval))
.map(|pt| pt.decompress().unwrap())
.collect::<Vec<GroupElement>>(),
)
.compress();
let blind = {
let blind_sc = if j == 0 {
blind_claim
} else {
&blinds_evals[j - 1]
};
let blind_eval = &blinds_evals[j];
w[0] * blind_sc + w[1] * blind_eval
};
assert_eq!(target.commit(&blind, gens_1).compress(), comm_target);
let a = {
// the vector to use to decommit for sum-check test
let a_sc = {
let mut a = vec![Scalar::one(); poly.degree() + 1];
a[0] += Scalar::one();
a
};
// the vector to use to decommit for evaluation
let a_eval = {
let mut a = vec![Scalar::one(); poly.degree() + 1];
for j in 1..a.len() {
a[j] = a[j - 1] * r_j;
}
a
};
// take weighted sum of the two vectors using w
assert_eq!(a_sc.len(), a_eval.len());
(0..a_sc.len())
.map(|i| w[0] * a_sc[i] + w[1] * a_eval[i])
.collect::<Vec<Scalar>>()
};
let (proof, _comm_poly, _comm_sc_eval) = DotProductProof::prove(
gens_1,
gens_n,
transcript,
random_tape,
&poly.as_vec(),
&blinds_poly[j],
&a,
&target,
&blind,
);
(proof, eval, comm_eval)
};
proofs.push(proof);
claim_per_round = claim_next_round;
comm_claim_per_round = comm_claim_next_round;
r.push(r_j);
comm_evals.push(comm_claim_per_round);
}
(
ZKSumcheckInstanceProof::new(comm_polys, comm_evals, proofs),
r,
vec![poly_A[0], poly_B[0], poly_C[0], poly_D[0]],
blinds_evals[num_rounds - 1],
)
}
}

View File

@@ -0,0 +1,88 @@
#[cfg(feature = "profile")]
use colored::Colorize;
#[cfg(feature = "profile")]
use core::sync::atomic::AtomicUsize;
#[cfg(feature = "profile")]
use core::sync::atomic::Ordering;
#[cfg(feature = "profile")]
use std::time::Instant;
#[cfg(feature = "profile")]
pub static CALL_DEPTH: AtomicUsize = AtomicUsize::new(0);
#[cfg(feature = "profile")]
pub struct Timer {
label: String,
timer: Instant,
}
#[cfg(feature = "profile")]
impl Timer {
#[inline(always)]
pub fn new(label: &str) -> Self {
let timer = Instant::now();
CALL_DEPTH.fetch_add(1, Ordering::Relaxed);
let star = "* ";
println!(
"{:indent$}{}{}",
"",
star,
label.yellow().bold(),
indent = 2 * CALL_DEPTH.fetch_add(0, Ordering::Relaxed)
);
Self {
label: label.to_string(),
timer,
}
}
#[inline(always)]
pub fn stop(&self) {
let duration = self.timer.elapsed();
let star = "* ";
println!(
"{:indent$}{}{} {:?}",
"",
star,
self.label.blue().bold(),
duration,
indent = 2 * CALL_DEPTH.fetch_add(0, Ordering::Relaxed)
);
CALL_DEPTH.fetch_sub(1, Ordering::Relaxed);
}
#[inline(always)]
pub fn print(msg: &str) {
CALL_DEPTH.fetch_add(1, Ordering::Relaxed);
let star = "* ";
println!(
"{:indent$}{}{}",
"",
star,
msg.to_string().green().bold(),
indent = 2 * CALL_DEPTH.fetch_add(0, Ordering::Relaxed)
);
CALL_DEPTH.fetch_sub(1, Ordering::Relaxed);
}
}
#[cfg(not(feature = "profile"))]
pub struct Timer {
_label: String,
}
#[cfg(not(feature = "profile"))]
impl Timer {
#[inline(always)]
pub fn new(label: &str) -> Self {
Self {
_label: label.to_string(),
}
}
#[inline(always)]
pub fn stop(&self) {}
#[inline(always)]
pub fn print(_msg: &str) {}
}

View File

@@ -0,0 +1,63 @@
use super::group::CompressedGroup;
use super::scalar::Scalar;
pub use merlin::Transcript;
pub trait ProofTranscript {
fn append_protocol_name(&mut self, protocol_name: &'static [u8]);
fn append_scalar(&mut self, label: &'static [u8], scalar: &Scalar);
fn append_point(&mut self, label: &'static [u8], point: &CompressedGroup);
fn challenge_scalar(&mut self, label: &'static [u8]) -> Scalar;
fn challenge_vector(&mut self, label: &'static [u8], len: usize) -> Vec<Scalar>;
}
impl ProofTranscript for Transcript {
fn append_protocol_name(&mut self, protocol_name: &'static [u8]) {
self.append_message(b"protocol-name", protocol_name);
}
fn append_scalar(&mut self, label: &'static [u8], scalar: &Scalar) {
self.append_message(label, &scalar.to_bytes());
}
fn append_point(&mut self, label: &'static [u8], point: &CompressedGroup) {
self.append_message(label, point.as_bytes());
}
fn challenge_scalar(&mut self, label: &'static [u8]) -> Scalar {
let mut buf = [0u8; 64];
self.challenge_bytes(label, &mut buf);
Scalar::from_bytes_wide(&buf)
}
fn challenge_vector(&mut self, label: &'static [u8], len: usize) -> Vec<Scalar> {
(0..len)
.map(|_i| self.challenge_scalar(label))
.collect::<Vec<Scalar>>()
}
}
pub trait AppendToTranscript {
fn append_to_transcript(&self, label: &'static [u8], transcript: &mut Transcript);
}
impl AppendToTranscript for Scalar {
fn append_to_transcript(&self, label: &'static [u8], transcript: &mut Transcript) {
transcript.append_scalar(label, self);
}
}
impl AppendToTranscript for [Scalar] {
fn append_to_transcript(&self, label: &'static [u8], transcript: &mut Transcript) {
transcript.append_message(label, b"begin_append_vector");
for item in self {
transcript.append_scalar(label, item);
}
transcript.append_message(label, b"end_append_vector");
}
}
impl AppendToTranscript for CompressedGroup {
fn append_to_transcript(&self, label: &'static [u8], transcript: &mut Transcript) {
transcript.append_point(label, self);
}
}

View File

@@ -0,0 +1,182 @@
use super::commitments::{Commitments, MultiCommitGens};
use super::group::GroupElement;
use super::scalar::{Scalar, ScalarFromPrimitives};
use super::transcript::{AppendToTranscript, ProofTranscript};
use merlin::Transcript;
use serde::{Deserialize, Serialize};
// ax^2 + bx + c stored as vec![c,b,a]
// ax^3 + bx^2 + cx + d stored as vec![d,c,b,a]
#[derive(Debug)]
pub struct UniPoly {
coeffs: Vec<Scalar>,
}
// ax^2 + bx + c stored as vec![c,a]
// ax^3 + bx^2 + cx + d stored as vec![d,b,a]
#[derive(Serialize, Deserialize, Debug)]
pub struct CompressedUniPoly {
coeffs_except_linear_term: Vec<Scalar>,
}
impl UniPoly {
pub fn from_evals(evals: &[Scalar]) -> Self {
// we only support degree-2 or degree-3 univariate polynomials
assert!(evals.len() == 3 || evals.len() == 4);
let coeffs = if evals.len() == 3 {
// ax^2 + bx + c
let two_inv = (2_usize).to_scalar().invert().unwrap();
let c = evals[0];
let a = two_inv * (evals[2] - evals[1] - evals[1] + c);
let b = evals[1] - c - a;
vec![c, b, a]
} else {
// ax^3 + bx^2 + cx + d
let two_inv = (2_usize).to_scalar().invert().unwrap();
let six_inv = (6_usize).to_scalar().invert().unwrap();
let d = evals[0];
let a = six_inv
* (evals[3] - evals[2] - evals[2] - evals[2] + evals[1] + evals[1] + evals[1] - evals[0]);
let b = two_inv
* (evals[0] + evals[0] - evals[1] - evals[1] - evals[1] - evals[1] - evals[1]
+ evals[2]
+ evals[2]
+ evals[2]
+ evals[2]
- evals[3]);
let c = evals[1] - d - a - b;
vec![d, c, b, a]
};
UniPoly { coeffs }
}
pub fn degree(&self) -> usize {
self.coeffs.len() - 1
}
pub fn as_vec(&self) -> Vec<Scalar> {
self.coeffs.clone()
}
pub fn eval_at_zero(&self) -> Scalar {
self.coeffs[0]
}
pub fn eval_at_one(&self) -> Scalar {
(0..self.coeffs.len()).map(|i| self.coeffs[i]).sum()
}
pub fn evaluate(&self, r: &Scalar) -> Scalar {
let mut eval = self.coeffs[0];
let mut power = *r;
for i in 1..self.coeffs.len() {
eval += power * self.coeffs[i];
power *= r;
}
eval
}
pub fn compress(&self) -> CompressedUniPoly {
let coeffs_except_linear_term = [&self.coeffs[..1], &self.coeffs[2..]].concat();
assert_eq!(coeffs_except_linear_term.len() + 1, self.coeffs.len());
CompressedUniPoly {
coeffs_except_linear_term,
}
}
pub fn commit(&self, gens: &MultiCommitGens, blind: &Scalar) -> GroupElement {
self.coeffs.commit(blind, gens)
}
}
impl CompressedUniPoly {
// we require eval(0) + eval(1) = hint, so we can solve for the linear term as:
// linear_term = hint - 2 * constant_term - deg2 term - deg3 term
pub fn decompress(&self, hint: &Scalar) -> UniPoly {
let mut linear_term =
hint - self.coeffs_except_linear_term[0] - self.coeffs_except_linear_term[0];
for i in 1..self.coeffs_except_linear_term.len() {
linear_term -= self.coeffs_except_linear_term[i];
}
let mut coeffs = vec![self.coeffs_except_linear_term[0], linear_term];
coeffs.extend(&self.coeffs_except_linear_term[1..]);
assert_eq!(self.coeffs_except_linear_term.len() + 1, coeffs.len());
UniPoly { coeffs }
}
}
impl AppendToTranscript for UniPoly {
fn append_to_transcript(&self, label: &'static [u8], transcript: &mut Transcript) {
transcript.append_message(label, b"UniPoly_begin");
for i in 0..self.coeffs.len() {
transcript.append_scalar(b"coeff", &self.coeffs[i]);
}
transcript.append_message(label, b"UniPoly_end");
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_from_evals_quad() {
// polynomial is 2x^2 + 3x + 1
let e0 = Scalar::one();
let e1 = (6_usize).to_scalar();
let e2 = (15_usize).to_scalar();
let evals = vec![e0, e1, e2];
let poly = UniPoly::from_evals(&evals);
assert_eq!(poly.eval_at_zero(), e0);
assert_eq!(poly.eval_at_one(), e1);
assert_eq!(poly.coeffs.len(), 3);
assert_eq!(poly.coeffs[0], Scalar::one());
assert_eq!(poly.coeffs[1], (3_usize).to_scalar());
assert_eq!(poly.coeffs[2], (2_usize).to_scalar());
let hint = e0 + e1;
let compressed_poly = poly.compress();
let decompressed_poly = compressed_poly.decompress(&hint);
for i in 0..decompressed_poly.coeffs.len() {
assert_eq!(decompressed_poly.coeffs[i], poly.coeffs[i]);
}
let e3 = (28_usize).to_scalar();
assert_eq!(poly.evaluate(&(3_usize).to_scalar()), e3);
}
#[test]
fn test_from_evals_cubic() {
// polynomial is x^3 + 2x^2 + 3x + 1
let e0 = Scalar::one();
let e1 = (7_usize).to_scalar();
let e2 = (23_usize).to_scalar();
let e3 = (55_usize).to_scalar();
let evals = vec![e0, e1, e2, e3];
let poly = UniPoly::from_evals(&evals);
assert_eq!(poly.eval_at_zero(), e0);
assert_eq!(poly.eval_at_one(), e1);
assert_eq!(poly.coeffs.len(), 4);
assert_eq!(poly.coeffs[0], Scalar::one());
assert_eq!(poly.coeffs[1], (3_usize).to_scalar());
assert_eq!(poly.coeffs[2], (2_usize).to_scalar());
assert_eq!(poly.coeffs[3], (1_usize).to_scalar());
let hint = e0 + e1;
let compressed_poly = poly.compress();
let decompressed_poly = compressed_poly.decompress(&hint);
for i in 0..decompressed_poly.coeffs.len() {
assert_eq!(decompressed_poly.coeffs[i], poly.coeffs[i]);
}
let e4 = (109_usize).to_scalar();
assert_eq!(poly.evaluate(&(4_usize).to_scalar()), e4);
}
}

View File

@@ -0,0 +1,22 @@
[package]
name = "circuit_reader"
version = "0.1.0"
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
bincode = "1.3.3"
secq256k1 = { path = "../secq256k1" }
spartan = { path = "../Spartan-secq" }
ff = "0.12.0"
byteorder = "1.4.3"
group = "0.12.0"
itertools = "0.9.0"
[[bin]]
name = "gen_spartan_inst"
path = "src/bin/gen_spartan_inst.rs"

View File

@@ -0,0 +1,24 @@
#![allow(non_snake_case)]
use bincode;
use circuit_reader::load_as_spartan_inst;
use std::env::{args, current_dir};
use std::fs::File;
use std::io::Write;
fn main() {
let circom_r1cs_path = args().nth(1).unwrap();
let output_path = args().nth(2).unwrap();
let num_pub_inputs = args().nth(3).unwrap().parse::<usize>().unwrap();
let root = current_dir().unwrap();
let circom_r1cs_path = root.join(circom_r1cs_path);
let spartan_inst = load_as_spartan_inst(circom_r1cs_path, num_pub_inputs);
let sparta_inst_bytes = bincode::serialize(&spartan_inst).unwrap();
File::create(root.join(output_path.clone()))
.unwrap()
.write_all(sparta_inst_bytes.as_slice())
.unwrap();
println!("Written Spartan circuit to {}", output_path);
}

View File

@@ -1,41 +1,15 @@
#![allow(non_snake_case)]
use bincode;
mod circom_reader;
use circom_reader::{load_r1cs_from_bin_file, R1CS};
use ff::PrimeField;
use libspartan::Instance;
use secq256k1::AffinePoint;
use secq256k1::FieldBytes;
use spartan_wasm::circom_reader::{load_r1cs_from_bin_file, R1CS};
use std::env::{args, current_dir};
use std::fs::File;
use std::io::Write;
use std::path::PathBuf;
fn main() {
let circuit_path = args().nth(1).unwrap();
let output_path = args().nth(2).unwrap();
let num_pub_inputs = args().nth(3).unwrap().parse::<usize>().unwrap();
let root = current_dir().unwrap();
let circuit_path = root.join(circuit_path);
let spartan_inst = load_as_spartan_inst(circuit_path, num_pub_inputs);
let sparta_inst_bytes = bincode::serialize(&spartan_inst).unwrap();
File::create(root.join(output_path.clone()))
.unwrap()
.write_all(sparta_inst_bytes.as_slice())
.unwrap();
println!("Written Spartan circuit to {}", output_path);
}
pub fn load_as_spartan_inst(circuit_file: PathBuf, num_pub_inputs: usize) -> Instance {
let root = current_dir().unwrap();
let circuit_file = root.join(circuit_file);
let (r1cs, _) = load_r1cs_from_bin_file::<AffinePoint>(&circuit_file);
let spartan_inst = convert_to_spartan_r1cs(&r1cs, num_pub_inputs);
spartan_inst
}

View File

@@ -1,6 +1,6 @@
{
"name": "circuits",
"version": "1.0.0",
"name": "@personaelabs/spartan-ecdsa-circuits",
"version": "0.1.0",
"main": "index.js",
"license": "MIT",
"dependencies": {
@@ -18,4 +18,4 @@
"ts-jest": "^29.0.3",
"typescript": "^4.9.4"
}
}
}

View File

@@ -1,3 +1,5 @@
pragma circom 2.1.2;
function ROUND_KEYS() {
return [
15180568604901803243989155929934437997245952775071395385994322939386074967328,
@@ -213,4 +215,4 @@ function MDS_MATRIX() {
70274477372358662369456035572054501601454406272695978931839980644925236550307
]
];
}
}

View File

@@ -0,0 +1,12 @@
[package]
name = "hoplite"
version = "0.1.0"
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
secq256k1 = { path = "../secq256k1" }
spartan = { path = "../Spartan-secq" }
sha3 = { version = "0.8.2" }
secpq_curves = { git = "https://github.com/DanTehrani/secpq_curves.git" }

View File

@@ -0,0 +1,8 @@
# Hoplite
Hoplite is a Spartan reference implementation designed to be the spec for the Halo2 Spartan verification circuit. [Srinath's Spartan implementation](https://github.com/microsoft/Spartan) uses stateful classes, making it difficult to conceptualize the verification process in terms of circuit constraints. To better understand the verification process, it would be helpful to re-implement the verification in a circuit-like coding manner. For example
- The verification should be stateless (i.e. should employ functional programming)
- The R1CS matrices should be hard-coded into the circuit
Additionally, this reference implementation should include thorough documentation to facilitate collaboration and audits.

212
packages/hoplite/spartan.md Normal file
View File

@@ -0,0 +1,212 @@
# Spartan: Full Protocol Description
_This doc is a work in progress. Do not recommend reading._
_Reference implementation: [Hoplite](https://github.com/personaelabs/Hoplite)_
## Public Setup
- Compute the Pedersen commitment generators using [hash-to-curve](https://github.com/personaelabs/spartan-ecdsa/blob/main/packages/secq256k1/src/hashtocurve.rs).
## Building blocks
$Fp$: The finite field used in the protocol.
### Pedersen commitment
Commitment
Multi-commitments
### proof-of-dot-prod
TBD
### proof-of-equality
TBD
### proof-of-opening
TBD
### Closed form evaluation of a multilinear polynomial
$$
\widetilde{Z}(r_y) = (1 - r_y[0]) ・ \widetilde{w}(r_y[1..]) + r_y[0]・\widetilde{(io, 1)}(r_y[1..]) \\
r_y = (2, 3, 4) \\
\widetilde{w}(x_1, x_2) = x_1 + 2x_2 \\
\widetilde{io}(x_1, x_2, 1) = x_1 + 2x_2 + 3 * 1 \\
\widetilde{Z}(r_y) = (1 - 2)・(3 + 2 * 4) + 2 * (3 + 2 * 4) \\
= -1 * 11 + 2 * 11 = 11
$$
$z = (io, 1, w)$
### zk-sum-check
The details of the zk-sum-check protocol isn't provided in the Spartan paper (it only mentions that it uses methods form prior constructions). The following is a description of the zk-sum-check protocol used in the [original Spartan implementation](https://github.com/microsoft/Spartan).
_Required prior knowledge: [The sum-check protocol](https://zkproof.org/2020/03/16/sum-checkprotocol/)_
**Notations**
- $g$: The polynomial which the sum is proven. We assume that $g$ is a multilinear polynomial (i.e. degree = 1) for simplicity.
- $H$: The sum of evaluates of $g$ over the boolean hypercube.
- $m$: The number of variables in $g$.
- $s$: $\lfloor{log_2{m}}\rfloor$
The protocol consists of $m$ rounds.
**Prover: First round**
In the first round, the prover computes
$$g_1(X) = \sum_{i\in\{0, 1\}^{s-1}} g(X, x_2, ... x_m)$$
In the standard sum-check protocol $g_1$ is sent to the verifier and the verifier checks
$$g_1(0) + g_1(1) \stackrel{?}{=} H$$
and
$$g_1(r_1) \stackrel{?}{=} \sum_{i\in\{0, 1\}^{s-1}} g(r_1, x_2, ... x_m)$$
where $r_1$ is a challenge.
The evaluation of $g$ in the second check is proven in the successive sum-check protocol.
In zk-sum-check, we instead provide the proof of evaluation of $g_1(0)$ $g_1(1)$ and $g_1(r_1)$ without revealing the coefficients of $g_1$, using proof-of-dot-product. For efficiency, we combine the evaluations into a single proof as follows.
First, since we assume $g$ is a multilinear polynomial, we can write
$$g_1(X) = p_1X + p_0$$
where $p_0, p_1 \in Fp$ . $p_1$ is the coefficient and $p_0$ is the y-intercept.
Before running proof-of-dot-prod, the prover must send commitments
$$C_{g1} = \mathrm{multicom}((p_1, p_2), r_{g1})$$
$$C_{eval} = \mathrm{com}(g_1(r), r_{eval})$$
$$C_{sum} = \mathrm{com}((g_1(0) + g_1(1), r_{sum}))$$
to the verifier.
The prover computes the weighted sum of and $g_1(0) + g_1(1)$ and $g(r_1)$ using weights $w_0, w_1 \in F_p$ sent from the verifier as
$$(g_1(0) + g_1(1)) * w_0 + g_1(r_1) * w_1$$
$$= p_1w_0 + 2p_0w_0 + p_1w_1r_1 + p_0w_1$$
$$= p_1(w_0 + r_1w_1) + p_0(2w_0 + w_1)$$
Thus, we use proof-of-dot-prod to prove
$$(w_0 + r_1w_1, 2w_0 + w_1) \cdot (p_1, p_0) = (g_1(0) + g_1(1)) * w_0 + g_1(r_1) * w_1$$
Now we proceed to the rest of the rounds
### Prover: Rest of the rounds
The rest of the rounds proceed similary as the first round except that prover proves the evaluations of the polynomial
$$g_i(X) = \sum_{b\in \{0, 1\}^{s-1-i}}g(r_1, ...r_{i-1}, X, x_{i+1},...,{x_m})$$
### Prover: Last round
In the standard sum-check protocol, the verifier queries $g(r_1, ... ,r_m)$ using the oracle of $g$. and checks the result is equal to $g_m(r_m)$. In the Spartan's version of zk-sum-check, the prover instead provides the proof of evaluation of $g(r_1, ... ,r_m)$ **doing another zk-sum-check**. The details of this second zk-sum-check protocol is described later in this doc.
### Verification
The verifier receives
- Claimed sum $H$
- proof-of-dot-products $\{dp_1, dp_2, ... dp_m\}$
Recall that the dot-product relation is
$$(w_0 + r_1w_1, 2w_0 + w_1) \cdot (p_1, p_0) = (g_1(0) + g_1(1)) * w_0 + g_1(r_1) * w_1$$
The verifier have access to $r_1, w_0, w_1$ and the commitments $Cy, Cx, C_{eval}$..
The verifier computes the **target commitment**
$$Ct = C_{sum} * w_0 + C_{eval} * w_1$$
and checks the dot product proof
$$
TBD
$$
## Main Protocol
Now we'll see how Spartan (_SpartanNIZK to be precise!_) uses the above building blocks to construct an NIZK for R1CS satisfiability.
---
**Below this is especially WIP! A lot of incomplete stuff!**
1.$P$ Commit the witness polynomial
- $P: C = PC.commit(pp, \bar{w}, S)$
send $C$ to the verifier 2.$V$ Randomly sample a challenge $\tau$ to query $\mathbb{g}$
- $\tau \in \mathbb{F^{log_m}}$ and send $\tau$ to the prover
4. Let $T_1 = 0$,
5. $V: sample r_x \in \mathbb{F^{u1}}$
6. $G_{io},\tau(x) = (\sum_{y \in \{0, 1\}^s} \widetilde{A}(x, y)\widetilde{Z}(y) + \sum_{y \in \{0, 1\}^s}\widetilde{B}(x, y)\widetilde{Z}(y) - \sum_{y \in \{0, 1\}^s}\widetilde{C}(x, y)\widetilde{Z}(y))\widetilde{eq}(x, \tau)$
$\sum_{x \in \{0, 1\}^s} G_{io},\tau(x) = 0$ for a random $\tau$ iff all the constraints are satisfied
- Run sumcheck on $G_{io},\tau(x)$
- At the last step of the sum check where the verifier queries $G_{io}, \tau(x)$, we use the following sub-protocol.
Define
- $\bar{A}(x) = \sum_{y \in \{0, 1\}^s} \widetilde{A}(x, y)$
- $\bar{B}(x) = \sum_{y \in \{0, 1\}^s} \widetilde{B}(x, y)$
- $\bar{C}(x) = \sum_{y \in \{0, 1\}^s} \widetilde{C}(x, y)$
- $M_{r_x}(y) = r_A * \widetilde{A}(x, y)\widetilde{Z}(y) + r_B * \widetilde{B}(x, y)\widetilde{Z}(y) + r_C * \widetilde{C}(x, y)\widetilde{Z}(y)$
Verify that $\bar{A}(x) * \bar{B}(x) - \bar{C}(x) = 0$
Run the sum-check protocol to verify $M_{r_x}(y)$
- $P$
- Send evaluations $v_A = \bar{A}(r_x), v_B = \bar{B}(r_x), v_C = \bar{C}(r_x)$ to the verifier.
- Send the opening $v_Z = Z(r_x)$ to the verifier
- $V$
- Check $(v_A + v_B - v_C) * eq(r_x, \tau) = e_x$
The last part of the second sum-check protocol
- $v_1 = \widetilde{A}(r_x, r_y)$
- $v_2 = \widetilde{B}(r_x, r_y)$
- $v_3 = \widetilde{C}(r_x, r_y)$
- check taht $(r_A * v_1 + r_B * v_2 + r_C * v_3) * v_z = e_y$
In the last round, the verifier needs to query $g(x)$. We will construct a protocol that is specific to Spartan that allows us to query $g(x)$ in zero-knowledge.
### The second zk-sum-check
Instead of constructing a generic method to evalute $g(X)$ in zk, we focus on $g(X)$ which is specific to Spartan. Recall that we want to prove the sum of
$$G_{io},\tau(x) = (\sum_{y \in \{0, 1\}^s} \widetilde{A}(x, y)\widetilde{Z}(y) + \sum_{y \in \{0, 1\}^s}\widetilde{B}(x, y)\widetilde{Z}(y) - \sum_{y \in \{0, 1\}^s}\widetilde{C}(x, y)\widetilde{Z}(y))\widetilde{eq}(x, \tau)$$
By looking at the terms of $\widetilde{F}(x)$, each term is in a form that is suitable to apply the SumCheck protocol. Assume for now that we can check the validity of each term (i.e each sum of $\widetilde{A}(x, y)\widetilde{Z}$, $\widetilde{B}(x, y)\widetilde{Z}$ and $\widetilde{C}(x, y)\widetilde{Z}$), we can check the relation of the sums as follows.
Define
- $\bar{A}(x) = \sum_{y \in \{0, 1\}^s} \widetilde{A}(x, y)$
- $\bar{B}(x) = \sum_{y \in \{0, 1\}^s} \widetilde{B}(x, y)$
- $\bar{C}(x) = \sum_{y \in \{0, 1\}^s} \widetilde{C}(x, y)$
Now, recall that we want to evaluate $G_{io},\tau(x)$ only at the last round of the zk-sum-check over the all round_challenges $r_x = \{r_1, r_2, ... r_m\}$.
Hence the prover can provide the evaluations $v_A, v_B$ and $v_C$ to the verifier.
$$v_A = \bar{A}(r_x), v_B = \bar{B}(r_x), v_C = \bar{C}(r_x)$$
The verifier checks that the evaluation of $G_{io}$ is equal to the evaluation of $g_m(r_m)$
$$g_m(r_m) \stackrel{?}{=} (v_A + v_B - v_C)\widetilde{eq}(r_x, \tau)$$
The verifier also needs to check the validity of $\bar{A}(x), \bar{B}(x), \bar{C}(x)$.
This is where the second zk-sum-check comes in.
We can check each term individually, but for efficiency, we use a random linear combination of the three terms.
and sample challnges $r_A, r_B, r_C \in_R F_p$ to compute the random linear combination
$$
\widetilde{M}(x) \\ = r_A \bar{A}(r_x) + r_B\bar{B}(r_x) + r_C\bar{C}(r_x) \\
= (r_A\widetilde{A}(r_x, r_y) + r_B\widetilde{B}(r_x, r_y) + r_C\widetilde{C}(r_x, r_y))\widetilde{Z}(r_x, r_y)
$$
At the end of the second zk-sum-check, the verifier needs to evaluate $\widetilde{Z}(r_x, r_y)$. In order to evaluate without knowing the coefficients, we use the proof_log-of-dot-prod protocol. Note that the prover needs to commit to $Z(x)$ at the beginning so it cannot just come up with a $Z(x)$ that passes the final check of the second zk-sum-check.

View File

@@ -0,0 +1,395 @@
use crate::{Fp, Fq};
use libspartan::{
dense_mlpoly::{PolyCommitment, PolyEvalProof},
group::CompressedGroup,
nizk::{BulletReductionProof, DotProductProof, EqualityProof, KnowledgeProof, ProductProof},
scalar::Scalar,
sumcheck::ZKSumcheckInstanceProof,
};
use secpq_curves::{
group::{prime::PrimeCurveAffine, Curve},
CurveAffine, Secq256k1, Secq256k1Affine,
};
use secq256k1::{
affine::Group,
elliptic_curve::{
subtle::{Choice, ConditionallySelectable, ConstantTimeEq},
Field, PrimeField,
},
};
use std::option::Option;
// ############################
// `CV` stands for `Circuit Value`.
// ############################
#[derive(Debug)]
pub struct CVSumCheckProof {
pub comm_polys: Vec<Option<Secq256k1>>,
pub comm_evals: Vec<Option<Secq256k1>>,
pub proofs: Vec<CVDotProdProof>,
}
impl CVSumCheckProof {
pub fn without_witness(num_rounds: usize, poly_degree: usize) -> Self {
Self {
comm_polys: vec![None; num_rounds],
comm_evals: vec![None; num_rounds],
// We pass poly_degree + 1 because we're counting the degree 0 term as well.
proofs: vec![CVDotProdProof::without_witness(poly_degree + 1); num_rounds],
}
}
}
pub struct CVBulletReductionProof {
pub L_vec: Vec<Option<Secq256k1>>,
pub R_vec: Vec<Option<Secq256k1>>,
}
impl CVBulletReductionProof {
fn without_witness(vec_len: usize) -> Self {
assert!(vec_len % 2 == 0, "vec_len must be even");
Self {
L_vec: vec![None; vec_len / 2],
R_vec: vec![None; vec_len / 2],
}
}
}
#[derive(Debug, Clone)]
pub struct CVDotProdProof {
pub delta: Option<Secq256k1>,
pub beta: Option<Secq256k1>,
pub z: Vec<Option<Fq>>,
pub z_delta: Option<Fq>,
pub z_beta: Option<Fq>,
}
impl CVDotProdProof {
fn without_witness(vec_len: usize) -> Self {
Self {
delta: None,
beta: None,
z: vec![None; vec_len],
z_delta: None,
z_beta: None,
}
}
}
pub struct CVEqualityProof {
pub alpha: Option<Secq256k1>,
pub z: Option<Fq>,
}
impl Default for CVEqualityProof {
fn default() -> Self {
Self {
alpha: None,
z: None,
}
}
}
pub struct CVKnowledgeProof {
pub alpha: Option<Secq256k1>,
pub z1: Option<Fq>,
pub z2: Option<Fq>,
}
impl Default for CVKnowledgeProof {
fn default() -> Self {
Self {
alpha: None,
z1: None,
z2: None,
}
}
}
pub struct CVProductProof {
pub alpha: Option<Secq256k1>,
pub beta: Option<Secq256k1>,
pub delta: Option<Secq256k1>,
pub z: [Option<Fq>; 5],
}
impl Default for CVProductProof {
fn default() -> Self {
Self {
alpha: None,
beta: None,
delta: None,
z: [None; 5],
}
}
}
pub struct CVDotProductProofLog {
pub bullet_reduction_proof: CVBulletReductionProof,
pub delta: Option<Secq256k1>,
pub beta: Option<Secq256k1>,
pub z1: Option<Fq>,
pub z2: Option<Fq>,
}
impl CVDotProductProofLog {
fn without_witness(vec_len: usize) -> Self {
Self {
bullet_reduction_proof: CVBulletReductionProof::without_witness(vec_len),
delta: None,
beta: None,
z1: None,
z2: None,
}
}
}
pub struct CVPolyEvalProof {
pub proof: CVDotProductProofLog,
}
impl CVPolyEvalProof {
pub fn without_witness(vec_len: usize) -> Self {
Self {
proof: CVDotProductProofLog::without_witness(vec_len),
}
}
}
pub struct CVPolyCommitment {
pub C: Vec<Option<Secq256k1>>,
}
impl CVPolyCommitment {
pub fn without_witness(vec_len: usize) -> Self {
let C = vec![None; vec_len];
Self { C }
}
}
// Convert the types defined in the `secq256k1` crate
// to the types defined in the `secpq_curves` crate.
// This conversion is necessary because,
// `libspartan` uses `secq256k1` for curve/field operations
// whereas halo2 uses `secpq_curves`
// In general, we need to do the following two conversions
// `CompressedGroup` -> `Secq256k1`
// `Scalar` -> `Fq`
pub trait ToCircuitVal<V> {
fn to_circuit_val(&self) -> V;
}
pub trait FromCircuitVal<V> {
fn from_circuit_val(v: &V) -> Self;
}
impl FromCircuitVal<Secq256k1> for CompressedGroup {
fn from_circuit_val(point: &Secq256k1) -> CompressedGroup {
if point.is_identity().into() {
return CompressedGroup::identity();
}
let coords = point.to_affine().coordinates().unwrap();
let mut x = coords.x().to_bytes();
let mut y = coords.y().to_bytes();
x.reverse();
y.reverse();
let result = CompressedGroup::from_affine_coordinates(&x.into(), &y.into(), true);
result
}
}
impl ToCircuitVal<Fq> for Scalar {
fn to_circuit_val(&self) -> Fq {
let bytes = self.to_bytes();
Fq::from_bytes(&bytes).unwrap()
}
}
impl ToCircuitVal<CVEqualityProof> for EqualityProof {
fn to_circuit_val(&self) -> CVEqualityProof {
let alpha = Some(self.alpha.to_circuit_val());
let z = Some(self.z.to_circuit_val());
CVEqualityProof { alpha, z }
}
}
impl ToCircuitVal<CVKnowledgeProof> for KnowledgeProof {
fn to_circuit_val(&self) -> CVKnowledgeProof {
let alpha = Some(self.alpha.to_circuit_val());
let z1 = Some(self.z1.to_circuit_val());
let z2 = Some(self.z2.to_circuit_val());
CVKnowledgeProof { alpha, z1, z2 }
}
}
impl ToCircuitVal<CVProductProof> for ProductProof {
fn to_circuit_val(&self) -> CVProductProof {
let alpha = Some(self.alpha.to_circuit_val());
let beta = Some(self.beta.to_circuit_val());
let delta = Some(self.delta.to_circuit_val());
let z: [Option<Fq>; 5] = self
.z
.iter()
.map(|z_i| Some(z_i.to_circuit_val()))
.collect::<Vec<Option<Fq>>>()
.try_into()
.unwrap();
CVProductProof {
alpha,
beta,
delta,
z,
}
}
}
impl ToCircuitVal<CVPolyEvalProof> for PolyEvalProof {
fn to_circuit_val(&self) -> CVPolyEvalProof {
let dotprod_proof_log = &self.proof;
let beta = Some(dotprod_proof_log.beta.to_circuit_val());
let delta = Some(dotprod_proof_log.delta.to_circuit_val());
let z1 = Some(dotprod_proof_log.z1.to_circuit_val());
let z2 = Some(dotprod_proof_log.z2.to_circuit_val());
let cv_bullet_reduction_proof = CVBulletReductionProof {
L_vec: dotprod_proof_log
.bullet_reduction_proof
.L_vec
.iter()
.map(|val| Some(val.compress().to_circuit_val()))
.collect::<Vec<Option<Secq256k1>>>()
.try_into()
.unwrap(),
R_vec: dotprod_proof_log
.bullet_reduction_proof
.R_vec
.iter()
.map(|val| Some(val.compress().to_circuit_val()))
.collect::<Vec<Option<Secq256k1>>>()
.try_into()
.unwrap(),
};
let cv_dotprod_proof_log = CVDotProductProofLog {
delta,
beta,
z1,
z2,
bullet_reduction_proof: cv_bullet_reduction_proof,
};
CVPolyEvalProof {
proof: cv_dotprod_proof_log,
}
}
}
impl ToCircuitVal<CVPolyCommitment> for PolyCommitment {
fn to_circuit_val(&self) -> CVPolyCommitment {
let C = self
.C
.iter()
.map(|c| Some(c.to_circuit_val()))
.collect::<Vec<Option<Secq256k1>>>()
.try_into()
.unwrap();
CVPolyCommitment { C }
}
}
impl ToCircuitVal<Secq256k1> for CompressedGroup {
fn to_circuit_val(&self) -> Secq256k1 {
if self.is_identity() {
return Secq256k1::identity();
}
let mut x_bytes: [u8; 32] = (*self.x().unwrap()).try_into().unwrap();
// x_bytes is in big-endian!
x_bytes.reverse();
let x = Fp::from_bytes(&x_bytes).unwrap();
let coords = self.coordinates();
let y_odd: Choice = match coords.tag() {
secq256k1::elliptic_curve::sec1::Tag::CompressedOddY => Choice::from(1),
secq256k1::elliptic_curve::sec1::Tag::CompressedEvenY => Choice::from(0),
_ => Choice::from(0),
};
let x3 = x.square() * x;
let b = Fp::from_raw([7, 0, 0, 0]);
let y = (x3 + b).sqrt();
let res = y
.map(|y| {
let y = Fp::conditional_select(&-y, &y, y.is_odd().ct_eq(&y_odd));
let p = Secq256k1Affine::from_xy(x, y).unwrap();
p.to_curve()
})
.unwrap();
res
}
}
impl ToCircuitVal<CVDotProdProof> for DotProductProof {
fn to_circuit_val(&self) -> CVDotProdProof {
CVDotProdProof {
delta: Some(self.delta.to_circuit_val()),
beta: Some(self.beta.to_circuit_val()),
z_beta: Some(self.z_beta.to_circuit_val()),
z_delta: Some(self.z_delta.to_circuit_val()),
z: self
.z
.iter()
.map(|z_i| Some(z_i.to_circuit_val()))
.collect::<Vec<Option<Fq>>>()
.try_into()
.unwrap(),
}
}
}
impl ToCircuitVal<CVSumCheckProof> for ZKSumcheckInstanceProof {
fn to_circuit_val(&self) -> CVSumCheckProof {
let mut proofs = vec![];
let mut comm_polys = vec![];
let mut comm_evals = vec![];
for i in 0..self.proofs.len() {
proofs.push(self.proofs[i].to_circuit_val());
comm_polys.push(Some(self.comm_polys[i].to_circuit_val()));
comm_evals.push(Some(self.comm_evals[i].to_circuit_val()));
}
CVSumCheckProof {
comm_polys,
comm_evals,
proofs,
}
}
}
impl ToCircuitVal<CVBulletReductionProof> for BulletReductionProof {
fn to_circuit_val(&self) -> CVBulletReductionProof {
let mut L_vec = vec![];
let mut R_vec = vec![];
for i in 0..self.L_vec.len() {
L_vec.push(Some(self.L_vec[i].to_circuit_val()));
R_vec.push(Some(self.R_vec[i].to_circuit_val()));
}
CVBulletReductionProof { L_vec, R_vec }
}
}

View File

@@ -0,0 +1,98 @@
use crate::Fq;
use secpq_curves::Secq256k1;
use secq256k1::{affine::Group, AffinePoint};
use sha3::{
digest::{ExtendableOutput, Input},
Shake256,
};
use std::{io::Read, ops::Mul};
use crate::circuit_vals::ToCircuitVal;
pub struct MultiCommitGens {
pub G: Vec<Secq256k1>,
pub h: Secq256k1,
}
impl Default for MultiCommitGens {
fn default() -> Self {
MultiCommitGens {
G: vec![],
h: Secq256k1::default(),
}
}
}
impl From<libspartan::commitments::MultiCommitGens> for MultiCommitGens {
fn from(gens: libspartan::commitments::MultiCommitGens) -> Self {
MultiCommitGens {
G: gens
.G
.iter()
.map(|g| g.compress().to_circuit_val())
.collect(),
h: gens.h.compress().to_circuit_val(),
}
}
}
impl MultiCommitGens {
pub fn new(n: usize, label: &[u8]) -> Self {
let mut shake = Shake256::default();
shake.input(label);
shake.input(AffinePoint::generator().compress().as_bytes());
let mut reader = shake.xof_result();
let mut gens: Vec<Secq256k1> = Vec::new();
let mut uniform_bytes = [0u8; 128];
for _ in 0..n + 1 {
reader.read_exact(&mut uniform_bytes).unwrap();
let gen = AffinePoint::from_uniform_bytes(&uniform_bytes).compress();
gens.push(gen.to_circuit_val());
}
MultiCommitGens {
G: gens[..n].to_vec(),
h: gens[n],
}
}
pub fn scale(&self, s: &Fq) -> MultiCommitGens {
MultiCommitGens {
h: self.h,
G: (0..self.G.len()).map(|i| self.G[i] * s).collect(),
}
}
}
pub trait Commitments {
fn commit(&self, blind: &Fq, gens: &MultiCommitGens) -> Secq256k1;
}
impl Commitments for Fq {
fn commit(&self, blind: &Fq, gens: &MultiCommitGens) -> Secq256k1 {
gens.G[0] * self + gens.h * blind
}
}
impl Commitments for Vec<Fq> {
fn commit(&self, blind: &Fq, gens: &MultiCommitGens) -> Secq256k1 {
let mut result = Secq256k1::identity();
for (i, val) in self.iter().enumerate() {
result += gens.G[i] * val;
}
result += gens.h * blind;
result
}
}
impl Commitments for [Fq] {
fn commit(&self, blind: &Fq, gens: &MultiCommitGens) -> Secq256k1 {
let mut result = Secq256k1::identity();
for (i, val) in self.iter().enumerate() {
result += gens.G[i] * val;
}
result += gens.h * blind;
result
}
}

View File

@@ -0,0 +1,73 @@
use crate::{
circuit_vals::{CVDotProdProof, FromCircuitVal},
commitments::Commitments,
utils::to_fq,
Fq, MultiCommitGens,
};
use libspartan::{
group::CompressedGroup,
transcript::{AppendToTranscript, ProofTranscript, Transcript},
};
use secpq_curves::{group::Curve, Secq256k1};
// Utilities
pub fn dot_prod(x: &[Fq], a: &[Fq]) -> Fq {
let mut result = Fq::zero();
for (x, a) in x.iter().zip(a.iter()) {
result += *x * *a;
}
result
}
// https://eprint.iacr.org/2017/1132.pdf
// P.18, Figure 6, steps 4
pub fn verify(
tau: &Secq256k1,
a: &[Fq],
proof: &CVDotProdProof,
com_poly: &Secq256k1,
gens_1: &MultiCommitGens,
gens_n: &MultiCommitGens,
transcript: &mut Transcript,
) {
transcript.append_protocol_name(b"dot product proof");
CompressedGroup::from_circuit_val(com_poly).append_to_transcript(b"Cx", transcript);
CompressedGroup::from_circuit_val(tau).append_to_transcript(b"Cy", transcript);
transcript.append_message(b"a", b"begin_append_vector");
for a_i in a {
transcript.append_message(b"a", &a_i.to_bytes());
}
transcript.append_message(b"a", b"end_append_vector");
CompressedGroup::from_circuit_val(&proof.delta.unwrap())
.append_to_transcript(b"delta", transcript);
CompressedGroup::from_circuit_val(&proof.beta.unwrap())
.append_to_transcript(b"beta", transcript);
let c = to_fq(&transcript.challenge_scalar(b"c"));
// (13)
let lhs = (com_poly * c) + proof.delta.unwrap();
let rhs = proof
.z
.iter()
.map(|z_i| z_i.unwrap())
.collect::<Vec<Fq>>()
.commit(&proof.z_delta.unwrap(), gens_n);
assert!(lhs == rhs, "dot prod verification failed (13)");
// (14)
let lhs = (tau * c) + proof.beta.unwrap();
let rhs = dot_prod(
&proof.z.iter().map(|z_i| z_i.unwrap()).collect::<Vec<Fq>>(),
a,
)
.commit(&proof.z_beta.unwrap(), gens_1);
assert!(lhs == rhs, "dot prod verification failed (14)");
}

319
packages/hoplite/src/lib.rs Normal file
View File

@@ -0,0 +1,319 @@
#![allow(non_snake_case)]
use crate::circuit_vals::{CVSumCheckProof, ToCircuitVal};
use commitments::{Commitments, MultiCommitGens};
pub use libspartan::scalar::Scalar;
use libspartan::{
group::DecompressEncodedPoint,
transcript::{AppendToTranscript, ProofTranscript, Transcript},
Instance, NIZKGens, NIZK,
};
use secpq_curves::{group::Curve, Secq256k1};
pub mod circuit_vals;
pub mod commitments;
pub mod dotprod;
pub mod poly_evaluation_proof;
pub mod proof_bullet_reduce;
pub mod proof_log_of_dotprod;
pub mod proof_of_eq;
pub mod proof_of_opening;
pub mod proof_of_prod;
pub mod sumcheck;
pub mod utils;
use utils::eval_ml_poly;
pub type Fp = secpq_curves::Fq;
pub type Fq = secpq_curves::Fp;
pub fn eq_eval(t: &[Fq], x: &[Fq]) -> Fq {
let mut result = Fq::one();
for i in 0..t.len() {
result *= t[i] * x[i] + (Fq::one() - t[i]) * (Fq::one() - x[i]);
}
result
}
/**
* Verify a SpartanNIZK proof
*/
pub fn verify_nizk(
inst: &Instance,
input: &[libspartan::scalar::Scalar],
proof: &NIZK,
gens: &NIZKGens,
) {
// Append the domain parameters to the transcript
let mut transcript = Transcript::new(b"test_verify");
transcript.append_protocol_name(b"Spartan NIZK proof");
transcript.append_message(b"R1CSInstanceDigest", &inst.digest);
transcript.append_protocol_name(b"R1CS proof");
input.append_to_transcript(b"input", &mut transcript);
proof
.r1cs_sat_proof
.comm_vars
.append_to_transcript(b"poly_commitment", &mut transcript);
let tau: Vec<Fq> = transcript
.challenge_vector(
b"challenge_tau",
proof.r1cs_sat_proof.sc_proof_phase1.proofs.len(),
)
.iter()
.map(|tau_i| tau_i.to_circuit_val())
.collect();
// Convert the generators to circuit value representations
let gens_1: MultiCommitGens = gens.gens_r1cs_sat.gens_sc.gens_1.clone().into();
let gens_3: MultiCommitGens = gens.gens_r1cs_sat.gens_sc.gens_3.clone().into();
let gens_4: MultiCommitGens = gens.gens_r1cs_sat.gens_sc.gens_4.clone().into();
let gens_pc_gens = &gens.gens_r1cs_sat.gens_pc.gens;
let gens_pc_1: MultiCommitGens = gens_pc_gens.gens_1.clone().into();
let gens_pc_n: MultiCommitGens = gens_pc_gens.gens_n.clone().into();
let sc_proof_phase1: CVSumCheckProof = proof.r1cs_sat_proof.sc_proof_phase1.to_circuit_val();
// The expected sum of the phase 1 sum-check is zero
let phase1_expected_sum = Fq::zero().commit(&Fq::zero(), &gens_1);
// comm_claim_post_phase1: Commitment to the claimed evaluation of the final round polynomial over rx
let (comm_claim_post_phase1, rx) = sumcheck::verify(
3,
&phase1_expected_sum,
&sc_proof_phase1,
&gens_1,
&gens_4,
&mut transcript,
);
// Verify Az * Bz = Cz
let (comm_Az_claim, comm_Bz_claim, comm_Cz_claim, comm_prod_Az_Bz_claims) =
&proof.r1cs_sat_proof.claims_phase2;
// First, we verify that the prover knows the opening to comm_Cz_claim
let (pok_Cz_claim, proof_prod) = &proof.r1cs_sat_proof.pok_claims_phase2;
proof_of_opening::verify(
&comm_Cz_claim.to_circuit_val(),
&pok_Cz_claim.to_circuit_val(),
&gens_1,
&mut transcript,
);
// Second, we verify Az * Bz = "Commitment to the claimed prod"
proof_of_prod::verify(
&proof_prod.to_circuit_val(),
comm_Az_claim.to_circuit_val(),
comm_Bz_claim.to_circuit_val(),
comm_prod_Az_Bz_claims.to_circuit_val(),
&gens_1,
&mut transcript,
);
comm_Az_claim.append_to_transcript(b"comm_Az_claim", &mut transcript);
comm_Bz_claim.append_to_transcript(b"comm_Bz_claim", &mut transcript);
comm_Cz_claim.append_to_transcript(b"comm_Cz_claim", &mut transcript);
comm_prod_Az_Bz_claims.append_to_transcript(b"comm_prod_Az_Bz_claims", &mut transcript);
// Verify the final query to the polynomial
// Now, we verify that
// (Az * Bz - Cz) * eq(tau, rx) = Commitment to the claimed evaluation of the final round polynomial over rx
// In the first sum-check, we verify that
// (A(x, y) * Z(y) + B(x, y) * Z(y) - C(x, y) * Z(y)) * eq(tau, rx) = 0
// So the final round polynomial's evaluation over rx should equal to the
// evaluation of the above poly over rx
let eq_tau_rx = eq_eval(&tau, &rx);
let expected_claim_post_phase1 = (comm_prod_Az_Bz_claims.decompress().unwrap()
+ -comm_Cz_claim.decompress().unwrap())
.compress()
.to_circuit_val()
.to_affine()
* eq_tau_rx;
// Check the equality between the evaluation of the final round poly of the sum-check
// and the evaluation of the F(x) poly over rx
let proof_eq_sc_phase1 = &proof.r1cs_sat_proof.proof_eq_sc_phase1;
proof_of_eq::verify(
&expected_claim_post_phase1,
&comm_claim_post_phase1,
&proof_eq_sc_phase1.to_circuit_val(),
&gens_1,
&mut transcript,
);
// Verify that the commitments to Az, Bz and Cz are correct
let r_A = transcript.challenge_scalar(b"challenege_Az");
let r_B = transcript.challenge_scalar(b"challenege_Bz");
let r_C = transcript.challenge_scalar(b"challenege_Cz");
// M(r_y) = r_A * comm_Az_claim + r_B * comm_Bz_claim + r_C * comm_Cz_claim;
let comm_claim_phase2 = r_A * comm_Az_claim.decompress().unwrap()
+ r_B * comm_Bz_claim.decompress().unwrap()
+ r_C * comm_Cz_claim.decompress().unwrap();
// Verify the sum-check over M(x)
let sc_proof_phase2: CVSumCheckProof = proof.r1cs_sat_proof.sc_proof_phase2.to_circuit_val();
// comm_claim_post_phase2: Claimed evaluation of the final round polynomial over ry
let (comm_claim_post_phase2, ry) = sumcheck::verify(
2,
&comm_claim_phase2.compress().to_circuit_val(),
&sc_proof_phase2,
&gens_1,
&gens_3,
&mut transcript,
);
// Verify that the final round polynomial's evaluation over ry is equal to the
// evaluation of M(x) over ry.
// In order to do so, we need to get the evaluation of Z(X) over ry.
// We use proof_log of dot prod to verify that.
// comm_vars: Commitment to the evaluations of Z(X) over the boolean hypercube
let comm_vars = proof
.r1cs_sat_proof
.comm_vars
.C
.iter()
.map(|c_i| c_i.to_circuit_val())
.collect::<Vec<Secq256k1>>();
let poly_eval_proof = &proof.r1cs_sat_proof.proof_eval_vars_at_ry;
let comm_vars_at_ry = proof.r1cs_sat_proof.comm_vars_at_ry.to_circuit_val();
poly_evaluation_proof::verify(
&gens_pc_1,
&gens_pc_n,
&ry[1..],
&comm_vars_at_ry,
&comm_vars,
&poly_eval_proof.to_circuit_val(),
&mut transcript,
);
// Interpolate the input as a multilinear polynomial and evaluate at ry[1..]
let mut input_with_one: Vec<Fq> = vec![Fq::one()];
input_with_one.extend_from_slice(
&input
.iter()
.map(|x| x.to_circuit_val())
.collect::<Vec<Fq>>(),
);
let poly_input_eval = eval_ml_poly(&input_with_one, &ry[1..]);
let comm_poly_input_eval = poly_input_eval.commit(&Fq::zero(), &gens_pc_1);
// compute commitment to eval_Z_at_ry = (Scalar::one() - ry[0]) * self.eval_vars_at_ry + ry[0] * poly_input_eval
let comm_eval_Z_at_ry = comm_vars_at_ry * (Fq::one() - ry[0]) + comm_poly_input_eval * ry[0];
let (claimed_rx, claimed_ry) = &proof.r;
let inst_evals = inst.inst.evaluate(&claimed_rx, &claimed_ry);
let (eval_A_r, eval_B_r, eval_C_r) = inst_evals;
// Z(r_y) * (r_A * A(r_y) + r_B * B(r_y) + r_C * C(r_y))
let expected_claim_post_phase2 = comm_eval_Z_at_ry
* (r_A.to_circuit_val() * eval_A_r.to_circuit_val()
+ r_B.to_circuit_val() * eval_B_r.to_circuit_val()
+ r_C.to_circuit_val() * eval_C_r.to_circuit_val());
// Verify that the commitment to the evaluation of the final round polynomial
// is correct
proof_of_eq::verify(
&expected_claim_post_phase2,
&comm_claim_post_phase2,
&proof.r1cs_sat_proof.proof_eq_sc_phase2.to_circuit_val(),
&gens_1,
&mut transcript,
);
}
#[cfg(test)]
mod tests {
use super::*;
use libspartan::{InputsAssignment, Instance, NIZKGens, VarsAssignment};
#[test]
fn test_verify_nizk() {
// parameters of the R1CS instance
let num_cons = 2;
let num_vars = 5;
let num_inputs = 0;
// The constraint
// x ** 2 + y = ~out
// Constraints in R1CS format
// sym_1 = x * x
// ~out = sym_1 + y
// Variables
// 'y', 'x', 'sym_1', '~out', '~one'
let mut A: Vec<(usize, usize, [u8; 32])> = Vec::new(); // <row, column, value>
let mut B: Vec<(usize, usize, [u8; 32])> = Vec::new();
let mut C: Vec<(usize, usize, [u8; 32])> = Vec::new();
let one = Fq::one().to_bytes();
// sym_1 = x * x
A.push((0, 1, one));
B.push((0, 1, one));
C.push((0, 2, one));
// ~out = sym_1 + y
A.push((1, 0, one));
A.push((1, 2, one));
B.push((1, 4, one));
C.push((1, 3, one));
let vars = [
Fq::from(2).to_bytes(),
Fq::from(2).to_bytes(),
Fq::from(4).to_bytes(),
Fq::from(6).to_bytes(),
Fq::from(1).to_bytes(),
];
let inputs = vec![];
let assignment_inputs = InputsAssignment::new(&inputs).unwrap();
let assignment_vars = VarsAssignment::new(&vars).unwrap();
// Check if instance is satisfiable
let inst = Instance::new(num_cons, num_vars, num_inputs, &A, &B, &C).unwrap();
let res = inst.is_sat(&assignment_vars, &assignment_inputs);
assert!(res.unwrap(), "should be satisfied");
let gens = NIZKGens::new(num_cons, num_vars, num_inputs);
let mut prover_transcript = Transcript::new(b"test_verify");
let proof = NIZK::prove(
&inst,
assignment_vars,
&assignment_inputs,
&gens,
&mut prover_transcript,
);
let mut verifier_transcript = Transcript::new(b"test_verify");
// Just running the verification of the original implementation as a reference
let _result = proof.verify(&inst, &assignment_inputs, &mut verifier_transcript, &gens);
// In the phase 1 sum check com_eval uses gens_1 and dot product uses gens_4
// com_eval uses gens_1, and dot product uses gen_3
verify_nizk(&inst, &assignment_inputs.assignment, &proof, &gens);
}
}

View File

@@ -0,0 +1,50 @@
use crate::circuit_vals::CVPolyEvalProof;
use crate::{commitments::MultiCommitGens, proof_log_of_dotprod, Fq};
use libspartan::math::Math;
use libspartan::transcript::{ProofTranscript, Transcript};
use secpq_curves::{group::Group, Secq256k1};
fn evals(r: &[Fq]) -> Vec<Fq> {
let ell = r.len();
let mut evals: Vec<Fq> = vec![Fq::one(); ell.pow2()];
let mut size = 1;
for j in 0..ell {
// in each iteration, we double the size of chis
size *= 2;
for i in (0..size).rev().step_by(2) {
// copy each element from the prior iteration twice
let scalar = evals[i / 2];
evals[i] = scalar * r[j];
evals[i - 1] = scalar - evals[i];
}
}
evals
}
pub fn verify(
gens_1: &MultiCommitGens,
gens_n: &MultiCommitGens,
r: &[Fq], // point at which the polynomial is evaluated
C_Zr: &Secq256k1, // commitment to \widetilde{Z}(r)
comm_poly: &[Secq256k1], // commitment to the evaluations of the polynomial over the boolean hypercube
proof: &CVPolyEvalProof,
transcript: &mut Transcript,
) {
transcript.append_protocol_name(b"polynomial evaluation proof");
// Evaluate the eq poly over the boolean hypercube bounded to r
let r_left = &r[0..(r.len() / 2)];
let r_right = &r[(r.len() / 2)..];
let L = evals(r_left);
let R = evals(r_right);
// L * r_left;
let mut C_LZ = Secq256k1::identity();
for i in 0..comm_poly.len() {
C_LZ += comm_poly[i] * L[i];
}
proof_log_of_dotprod::verify(gens_1, gens_n, &R, &C_LZ, C_Zr, &proof.proof, transcript);
}

View File

@@ -0,0 +1,85 @@
use crate::{
circuit_vals::{FromCircuitVal, ToCircuitVal},
Fq,
};
use libspartan::{
group::CompressedGroup,
scalar::Scalar,
transcript::{ProofTranscript, Transcript},
};
use secpq_curves::{
group::{Curve, Group},
Secq256k1,
};
pub fn verify(
upsilon: &Secq256k1, // The upsilon calculated in this func should equal this
a: &[Fq],
G: &[Secq256k1],
upsilon_L: &[Secq256k1],
upsilon_R: &[Secq256k1],
transcript: &mut Transcript,
) -> (Secq256k1, Fq, Secq256k1) {
// #####
// 1: Compute the verification scalars
// #####
// Compute challenges
let mut challenges = vec![];
for (L, R) in upsilon_L.iter().zip(upsilon_R.iter()) {
transcript.append_point(b"L", &CompressedGroup::from_circuit_val(L));
transcript.append_point(b"R", &CompressedGroup::from_circuit_val(R));
// CompressedGroup::from_circuit_val(R).append_to_transcript(b"R", transcript);
challenges.push(transcript.challenge_scalar(b"u"));
}
let mut challenges_inv = challenges.clone();
// 2. Compute the invert of the challenges
Scalar::batch_invert(&mut challenges_inv);
// 3. Compute the square of the challenges
let challenges_sq = challenges
.iter()
.map(|c| c.square())
.collect::<Vec<Scalar>>();
let challenges_inv_sq = challenges_inv
.iter()
.map(|c| c.square())
.collect::<Vec<Scalar>>();
let mut upsilon_hat = Secq256k1::identity();
upsilon_hat += upsilon;
let n = upsilon_L.len();
for i in 0..n {
upsilon_hat += upsilon_L[i] * challenges_sq[i].to_circuit_val()
+ upsilon_R[i] * challenges_inv_sq[i].to_circuit_val();
}
let mut a = &mut a.to_owned()[..];
let mut G = &mut G.to_owned()[..];
let mut n = G.len();
while n != 1 {
n /= 2;
let (a_L, a_R) = a.split_at_mut(n);
let (G_L, G_R) = G.split_at_mut(n);
for i in 0..n {
let u = challenges[challenges.len() - n / 2 - 1];
let u_inv = challenges_inv[challenges.len() - n / 2 - 1];
a_L[i] = a_L[i] * u_inv.to_circuit_val() + a_R[i] * u.to_circuit_val();
G_L[i] = G_L[i] * u_inv.to_circuit_val() + G_R[i] * u.to_circuit_val();
}
a = a_L;
G = G_L;
}
let a_hat = a[0];
let g_hat = G[0];
(upsilon_hat, a_hat, g_hat)
}

View File

@@ -0,0 +1,77 @@
use crate::{
circuit_vals::{CVBulletReductionProof, CVDotProductProofLog, FromCircuitVal, ToCircuitVal},
commitments::MultiCommitGens,
proof_bullet_reduce,
utils::to_fq,
Fq,
};
use libspartan::{
group::CompressedGroup,
nizk::DotProductProofLog,
transcript::{AppendToTranscript, ProofTranscript, Transcript},
};
use secpq_curves::{group::Curve, Secq256k1};
// https://eprint.iacr.org/2017/1132.pdf
// P.19 proof_log-of-dot-prod
pub fn verify(
gens_1: &MultiCommitGens,
gens_n: &MultiCommitGens,
a: &[Fq],
Cx: &Secq256k1, // commitment to the evaluation (Cy)
Cy: &Secq256k1, // commitment to the evaluation (Cy)
proof: &CVDotProductProofLog,
transcript: &mut Transcript,
) {
transcript.append_protocol_name(b"dot product proof (log)");
CompressedGroup::from_circuit_val(Cx).append_to_transcript(b"Cx", transcript);
CompressedGroup::from_circuit_val(Cy).append_to_transcript(b"Cy", transcript);
transcript.append_message(b"a", b"begin_append_vector");
for a_i in a {
transcript.append_message(b"a", &a_i.to_bytes());
}
transcript.append_message(b"a", b"end_append_vector");
// sample a random base and scale the generator used for
// the output of the inner product
let r = to_fq(&transcript.challenge_scalar(b"r"));
let gens_1_scaled = gens_1.scale(&r);
// Upsilon
let Gamma = Cx + Cy * r;
let L_vec = proof
.bullet_reduction_proof
.L_vec
.iter()
.map(|L_i| L_i.unwrap())
.collect::<Vec<Secq256k1>>();
let upsilon_L = L_vec.as_slice();
let R_vec = &proof
.bullet_reduction_proof
.R_vec
.iter()
.map(|R_i| R_i.unwrap())
.collect::<Vec<Secq256k1>>();
let upsilon_R = R_vec.as_slice();
let (Gamma_hat, a_hat, g_hat) =
proof_bullet_reduce::verify(&Gamma, &a, &gens_n.G, upsilon_L, upsilon_R, transcript);
CompressedGroup::from_circuit_val(&proof.delta.unwrap())
.append_to_transcript(b"delta", transcript);
CompressedGroup::from_circuit_val(&proof.beta.unwrap())
.append_to_transcript(b"beta", transcript);
let c = to_fq(&transcript.challenge_scalar(b"c"));
let lhs = (Gamma_hat * c + proof.beta.unwrap()) * a_hat + proof.delta.unwrap();
let rhs = (g_hat + gens_1_scaled.G[0] * a_hat) * proof.z1.unwrap()
+ gens_1_scaled.h * proof.z2.unwrap();
assert!(rhs == lhs, "Proof (log) of dot prod verification failed");
}

View File

@@ -0,0 +1,34 @@
use crate::{
circuit_vals::{CVEqualityProof, FromCircuitVal},
commitments::MultiCommitGens,
utils::to_fq,
Fq,
};
use libspartan::{
group::CompressedGroup,
transcript::{AppendToTranscript, ProofTranscript, Transcript},
};
use secpq_curves::Secq256k1;
// https://eprint.iacr.org/2017/1132.pdf
// P.17 proof-of-equality
pub fn verify(
C1: &Secq256k1,
C2: &Secq256k1,
proof: &CVEqualityProof,
gens_n: &MultiCommitGens,
transcript: &mut Transcript,
) {
transcript.append_protocol_name(b"equality proof");
CompressedGroup::from_circuit_val(C1).append_to_transcript(b"C1", transcript);
CompressedGroup::from_circuit_val(C2).append_to_transcript(b"C2", transcript);
CompressedGroup::from_circuit_val(&proof.alpha.unwrap())
.append_to_transcript(b"alpha", transcript);
let lhs = gens_n.h * proof.z.unwrap();
let c = to_fq(&transcript.challenge_scalar(b"c"));
let rhs = (C1 - C2) * c + proof.alpha.unwrap();
assert!(rhs == lhs, "Proof of equality verification failed");
}

View File

@@ -0,0 +1,32 @@
use crate::{
circuit_vals::{CVKnowledgeProof, FromCircuitVal},
commitments::{Commitments, MultiCommitGens},
utils::to_fq,
Fq,
};
use libspartan::{
group::CompressedGroup,
transcript::{AppendToTranscript, ProofTranscript, Transcript},
};
use secpq_curves::Secq256k1;
// https://eprint.iacr.org/2017/1132.pdf
// P.17 Knowledge of opening
pub fn verify(
C: &Secq256k1,
proof: &CVKnowledgeProof,
gens_n: &MultiCommitGens,
transcript: &mut Transcript,
) {
transcript.append_protocol_name(b"knowledge proof");
let alpha = proof.alpha.unwrap();
CompressedGroup::from_circuit_val(C).append_to_transcript(b"C", transcript);
CompressedGroup::from_circuit_val(&alpha).append_to_transcript(b"alpha", transcript);
let c = to_fq(&transcript.challenge_scalar(b"c"));
let lhs = proof.z1.unwrap().commit(&proof.z2.unwrap(), gens_n);
let rhs = C * c + alpha;
assert!(lhs == rhs, "proof of opening verification failed");
}

View File

@@ -0,0 +1,72 @@
use crate::{
circuit_vals::CVProductProof,
commitments::{Commitments, MultiCommitGens},
utils::to_fq,
Fq,
};
use libspartan::{
group::CompressedGroup,
transcript::{AppendToTranscript, ProofTranscript, Transcript},
};
use secpq_curves::Secq256k1;
use crate::circuit_vals::FromCircuitVal;
// https://eprint.iacr.org/2017/1132.pdf
// P.17 Figure 5
pub fn verify(
proof: &CVProductProof,
X: Secq256k1,
Y: Secq256k1,
Z: Secq256k1,
gens_n: &MultiCommitGens,
transcript: &mut Transcript,
) {
let alpha = proof.alpha.unwrap();
let beta = proof.beta.unwrap();
let delta = proof.delta.unwrap();
let z: [Fq; 5] = proof
.z
.iter()
.map(|z_i| z_i.unwrap())
.collect::<Vec<Fq>>()
.try_into()
.unwrap();
transcript.append_protocol_name(b"product proof");
CompressedGroup::from_circuit_val(&X).append_to_transcript(b"X", transcript);
CompressedGroup::from_circuit_val(&Y).append_to_transcript(b"Y", transcript);
CompressedGroup::from_circuit_val(&Z).append_to_transcript(b"Z", transcript);
CompressedGroup::from_circuit_val(&alpha).append_to_transcript(b"alpha", transcript);
CompressedGroup::from_circuit_val(&beta).append_to_transcript(b"beta", transcript);
CompressedGroup::from_circuit_val(&delta).append_to_transcript(b"delta", transcript);
let c = to_fq(&transcript.challenge_scalar(b"c"));
let z1 = z[0];
let z2 = z[1];
let z3 = z[2];
let z4 = z[3];
let z5 = z[4];
// (7)
let lhs = alpha + X * c;
let rhs = z1.commit(&z2, gens_n);
assert!(lhs == rhs, "prod proof verification failed (7)");
// (8)
let lhs = beta + Y * c;
let rhs = z3.commit(&z4, gens_n);
assert!(lhs == rhs, "prod proof verification failed (8)");
// (9)
let lhs = delta + Z * c;
let gens_x = MultiCommitGens {
G: vec![X],
h: gens_n.h,
};
let rhs = z3.commit(&z5, &gens_x);
assert!(lhs == rhs, "prod proof verification failed (9)");
}

View File

@@ -0,0 +1,97 @@
use crate::{
circuit_vals::{CVDotProdProof, CVSumCheckProof, FromCircuitVal},
dotprod,
utils::to_fq,
Fq, MultiCommitGens,
};
use libspartan::{
group::CompressedGroup,
transcript::{AppendToTranscript, ProofTranscript, Transcript},
};
use secpq_curves::{group::Curve, Secq256k1};
#[derive(Debug, Clone)]
pub struct RoundProof {
pub dotprod_proof: CVDotProdProof,
pub com_eval: Secq256k1,
}
// This function should be able to verify proofs generated by the above `prove` function
// and also the proofs generated by the original Spartan implementation
#[allow(dead_code)]
pub fn verify(
degree_bound: usize,
target_com: &Secq256k1,
proof: &CVSumCheckProof,
gens_1: &MultiCommitGens,
gens_n: &MultiCommitGens,
transcript: &mut Transcript,
) -> (Secq256k1, Vec<Fq>) {
let mut r = vec![];
for (i, round_dotprod_proof) in proof.proofs.iter().enumerate() {
let com_poly = &proof.comm_polys[i].unwrap();
let com_poly_encoded = CompressedGroup::from_circuit_val(com_poly);
com_poly_encoded.append_to_transcript(b"comm_poly", transcript);
let com_eval = &proof.comm_evals[i].unwrap();
let r_i = to_fq(&transcript.challenge_scalar(b"challenge_nextround"));
r.push(r_i);
// The sum over (0, 1) is expected to be equal to the challenge evaluation of the prev round
let com_round_sum = if i == 0 {
*target_com
} else {
proof.comm_evals[i - 1].unwrap()
};
let com_round_sum_encoded = CompressedGroup::from_circuit_val(&com_round_sum);
com_round_sum_encoded.append_to_transcript(b"comm_claim_per_round", transcript);
CompressedGroup::from_circuit_val(&com_eval.clone())
.append_to_transcript(b"comm_eval", transcript);
let w_scalar = transcript.challenge_vector(b"combine_two_claims_to_one", 2);
let w = w_scalar.iter().map(|x| to_fq(x)).collect::<Vec<Fq>>();
let a = {
// the vector to use to decommit for sum-check test
let a_sc = {
let mut a = vec![Fq::one(); degree_bound + 1];
a[0] += Fq::one();
a
};
// the vector to use to decommit for evaluation
let a_eval = {
let mut a = vec![Fq::one(); degree_bound + 1];
for j in 1..a.len() {
a[j] = a[j - 1] * r_i;
}
a
};
// take weighted sum of the two vectors using w
assert_eq!(a_sc.len(), a_eval.len());
(0..a_sc.len())
.map(|i| w[0] * a_sc[i] + w[1] * a_eval[i])
.collect::<Vec<Fq>>()
};
let tau = com_round_sum * w[0] + com_eval * w[1];
// Check that the dot product proofs are valid
dotprod::verify(
&tau,
&a,
&round_dotprod_proof,
&com_poly,
&gens_1,
&gens_n,
transcript,
);
}
(proof.comm_evals[proof.comm_evals.len() - 1].unwrap(), r)
}

View File

@@ -0,0 +1,49 @@
use libspartan::math::Math;
use crate::{Fp, Fq};
pub fn hypercube(n: u32) -> Vec<Vec<u8>> {
let mut v = vec![];
for i in 0..(2u64.pow(n)) {
let mut row = vec![];
for j in 0..n {
row.push(((i >> j) & 1) as u8);
}
v.push(row);
}
v
}
pub fn to_fp(x: &libspartan::scalar::Scalar) -> Fp {
Fp::from_bytes(&x.to_bytes().into()).unwrap()
}
pub fn to_fq(x: &libspartan::scalar::Scalar) -> Fq {
Fq::from_bytes(&x.to_bytes().into()).unwrap()
}
fn compute_chi(e: &[Fq], x: &[Fq]) -> Fq {
let mut chi = Fq::one();
for i in 0..e.len() {
chi *= e[i] * x[i] + (Fq::one() - e[i]) * (Fq::one() - x[i]);
}
chi
}
pub fn eval_ml_poly(z: &[Fq], r: &[Fq]) -> Fq {
let mut eval = Fq::zero();
// compute chi
for i in 0..z.len() {
let i_bits: Vec<Fq> = i
.get_bits(r.len())
.iter()
.map(|b| if *b { Fq::one() } else { Fq::zero() })
.collect();
eval += compute_chi(&i_bits, r) * z[i];
}
eval
}

View File

@@ -0,0 +1,29 @@
[package]
name = "hoplite_circuit"
version = "0.1.0"
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[features]
default = ["dev-graph"]
dev-graph = ["halo2_proofs/dev-graph", "plotters"]
[dependencies]
halo2_proofs = { git = "https://github.com/privacy-scaling-explorations/halo2", tag = "v2023_01_20" }
halo2-base = { git = "https://github.com/axiom-crypto/halo2-lib.git", default-features = false, features = ["halo2-pse"] }
halo2-ecc = { git = "https://github.com/axiom-crypto/halo2-lib.git", default-features = false, features = ["halo2-pse"] }
num-bigint = { version = "0.4", features = ["rand"] }
secpq_curves = { git = "https://github.com/DanTehrani/secpq_curves.git" }
plotters = { version = "0.3.0", optional = true }
tabbycat = { version = "0.1", features = ["attributes"], optional = true }
spartan = { git = "https://github.com/DanTehrani/Spartan-secq", branch = "hoplite" }
secq256k1 = { git = "https://github.com/personaelabs/spartan-ecdsa", branch = "main" }
hoplite = { path = "../hoplite" }
rand_core = { version = "0.6", default-features = false, features = ["getrandom"] }
circuit_reader = { path = "../circuit_reader" }
bincode = "1.2.1"
num-traits = "0.2.15"
[dev-dependencies]
ark-std = { version = "0.3.0", features = ["print-trace"] }

View File

@@ -0,0 +1,17 @@
{
"name": "node",
"version": "1.0.0",
"main": "node.bench.ts",
"license": "MIT",
"scripts": {
"prove": "ts-node ./src/prover.ts"
},
"dependencies": {
"@ethereumjs/util": "^8.0.3",
"@personaelabs/spartan-ecdsa": "*"
},
"devDependencies": {
"ts-node": "^10.9.1",
"typescript": "^4.9.4"
}
}

View File

@@ -0,0 +1,84 @@
import * as fs from "fs";
import {
MembershipProver,
Poseidon,
Tree,
MembershipVerifier
} from "@personaelabs/spartan-ecdsa";
import {
hashPersonalMessage,
ecsign,
ecrecover,
privateToPublic
} from "@ethereumjs/util";
import * as path from "path";
const prove = async () => {
const privKey = Buffer.from("".padStart(16, "🧙"), "utf16le");
const msg = Buffer.from("harry potter");
const msgHash = hashPersonalMessage(msg);
const { v, r, s } = ecsign(msgHash, privKey);
const pubKey = ecrecover(msgHash, v, r, s);
const sig = `0x${r.toString("hex")}${s.toString("hex")}${v.toString(16)}`;
// Init the Poseidon hash
const poseidon = new Poseidon();
await poseidon.initWasm();
const treeDepth = 20;
const tree = new Tree(treeDepth, poseidon);
// Get the prover public key hash
const proverPubkeyHash = poseidon.hashPubKey(pubKey);
// Insert prover public key hash into the tree
tree.insert(proverPubkeyHash);
// Insert other members into the tree
for (const member of ["🕵️", "🥷", "👩‍🔬"]) {
const pubKey = privateToPublic(
Buffer.from("".padStart(16, member), "utf16le")
);
tree.insert(poseidon.hashPubKey(pubKey));
}
// Compute the merkle proof
const index = tree.indexOf(proverPubkeyHash);
const merkleProof = tree.createProof(index);
const proverConfig = {
circuit: path.join(
__dirname,
"../../../circuits/build/pubkey_membership/pubkey_membership.circuit"
),
witnessGenWasm: path.join(
__dirname,
"../../../circuits/build/pubkey_membership/pubkey_membership_js/pubkey_membership.wasm"
),
enableProfiler: true
};
// Init the prover
const prover = new MembershipProver(proverConfig);
await prover.initWasm();
// Prove membership
const { proof, publicInput } = await prover.prove(sig, msgHash, merkleProof);
fs.writeFileSync("./proof.bin", proof);
fs.writeFileSync("./input.bin", publicInput.serialize());
const verifierConfig = {
circuit: proverConfig.circuit,
enableProfiler: true
};
// Init verifier
const verifier = new MembershipVerifier(verifierConfig);
await verifier.initWasm();
// Verify proof
await verifier.verify(proof, publicInput.serialize());
};
prove();

View File

@@ -0,0 +1,21 @@
{
"include": [
"./src/**/*",
],
"exclude": [
"./node_modules",
"./build"
],
"compilerOptions": {
"target": "ES6",
"module": "CommonJS",
"rootDir": "./src",
"moduleResolution": "node",
"allowJs": true,
"outDir": "./build",
"esModuleInterop": true,
"forceConsistentCasingInFileNames": true,
"strict": true,
"skipLibCheck": true
}
}

View File

@@ -0,0 +1,515 @@
# THIS IS AN AUTOGENERATED FILE. DO NOT EDIT THIS FILE DIRECTLY.
# yarn lockfile v1
"@chainsafe/as-sha256@^0.3.1":
version "0.3.1"
resolved "https://registry.yarnpkg.com/@chainsafe/as-sha256/-/as-sha256-0.3.1.tgz#3639df0e1435cab03f4d9870cc3ac079e57a6fc9"
integrity sha512-hldFFYuf49ed7DAakWVXSJODuq3pzJEguD8tQ7h+sGkM18vja+OFoJI9krnGmgzyuZC2ETX0NOIcCTy31v2Mtg==
"@chainsafe/persistent-merkle-tree@^0.4.2":
version "0.4.2"
resolved "https://registry.yarnpkg.com/@chainsafe/persistent-merkle-tree/-/persistent-merkle-tree-0.4.2.tgz#4c9ee80cc57cd3be7208d98c40014ad38f36f7ff"
integrity sha512-lLO3ihKPngXLTus/L7WHKaw9PnNJWizlOF1H9NNzHP6Xvh82vzg9F2bzkXhYIFshMZ2gTCEz8tq6STe7r5NDfQ==
dependencies:
"@chainsafe/as-sha256" "^0.3.1"
"@chainsafe/ssz@0.9.4":
version "0.9.4"
resolved "https://registry.yarnpkg.com/@chainsafe/ssz/-/ssz-0.9.4.tgz#696a8db46d6975b600f8309ad3a12f7c0e310497"
integrity sha512-77Qtg2N1ayqs4Bg/wvnWfg5Bta7iy7IRh8XqXh7oNMeP2HBbBwx8m6yTpA8p0EHItWPEBkgZd5S5/LSlp3GXuQ==
dependencies:
"@chainsafe/as-sha256" "^0.3.1"
"@chainsafe/persistent-merkle-tree" "^0.4.2"
case "^1.6.3"
"@cspotcode/source-map-support@^0.8.0":
version "0.8.1"
resolved "https://registry.yarnpkg.com/@cspotcode/source-map-support/-/source-map-support-0.8.1.tgz#00629c35a688e05a88b1cda684fb9d5e73f000a1"
integrity sha512-IchNf6dN4tHoMFIn/7OE8LWZ19Y6q/67Bmf6vnGREv8RSbBVb9LPJxEcnwrcwX6ixSvaiGoomAUvu4YSxXrVgw==
dependencies:
"@jridgewell/trace-mapping" "0.3.9"
"@ethereumjs/rlp@^4.0.1":
version "4.0.1"
resolved "https://registry.yarnpkg.com/@ethereumjs/rlp/-/rlp-4.0.1.tgz#626fabfd9081baab3d0a3074b0c7ecaf674aaa41"
integrity sha512-tqsQiBQDQdmPWE1xkkBq4rlSW5QZpLOUJ5RJh2/9fug+q9tnUhuZoVLk7s0scUIKTOzEtR72DFBXI4WiZcMpvw==
"@ethereumjs/util@^8.0.3":
version "8.0.5"
resolved "https://registry.yarnpkg.com/@ethereumjs/util/-/util-8.0.5.tgz#b9088fc687cc13f0c1243d6133d145dfcf3fe446"
integrity sha512-259rXKK3b3D8HRVdRmlOEi6QFvwxdt304hhrEAmpZhsj7ufXEOTIc9JRZPMnXatKjECokdLNBcDOFBeBSzAIaw==
dependencies:
"@chainsafe/ssz" "0.9.4"
"@ethereumjs/rlp" "^4.0.1"
ethereum-cryptography "^1.1.2"
"@iden3/bigarray@0.0.2":
version "0.0.2"
resolved "https://registry.yarnpkg.com/@iden3/bigarray/-/bigarray-0.0.2.tgz#6fc4ba5be18daf8a26ee393f2fb62b80d98c05e9"
integrity sha512-Xzdyxqm1bOFF6pdIsiHLLl3HkSLjbhqJHVyqaTxXt3RqXBEnmsUmEW47H7VOi/ak7TdkRpNkxjyK5Zbkm+y52g==
"@iden3/binfileutils@0.0.11":
version "0.0.11"
resolved "https://registry.yarnpkg.com/@iden3/binfileutils/-/binfileutils-0.0.11.tgz#9ffbbcc1279f2b2182bb6dcff4eee8a5b2167911"
integrity sha512-LylnJoZ0CTdgErnKY8OxohvW4K+p6UHD3sxt+3P9AmMyBQjYR4IpoqoYZZ+9aMj89cmCQ21UvdhndAx04er3NA==
dependencies:
fastfile "0.0.20"
ffjavascript "^0.2.48"
"@jridgewell/resolve-uri@^3.0.3":
version "3.1.0"
resolved "https://registry.yarnpkg.com/@jridgewell/resolve-uri/-/resolve-uri-3.1.0.tgz#2203b118c157721addfe69d47b70465463066d78"
integrity sha512-F2msla3tad+Mfht5cJq7LSXcdudKTWCVYUgw6pLFOOHSTtZlj6SWNYAp+AhuqLmWdBO2X5hPrLcu8cVP8fy28w==
"@jridgewell/sourcemap-codec@^1.4.10":
version "1.4.14"
resolved "https://registry.yarnpkg.com/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.4.14.tgz#add4c98d341472a289190b424efbdb096991bb24"
integrity sha512-XPSJHWmi394fuUuzDnGz1wiKqWfo1yXecHQMRf2l6hztTO+nPru658AyDngaBe7isIxEkRsPR3FZh+s7iVa4Uw==
"@jridgewell/trace-mapping@0.3.9":
version "0.3.9"
resolved "https://registry.yarnpkg.com/@jridgewell/trace-mapping/-/trace-mapping-0.3.9.tgz#6534fd5933a53ba7cbf3a17615e273a0d1273ff9"
integrity sha512-3Belt6tdc8bPgAtbcmdtNJlirVoTmEb5e2gC94PnkwEW9jI6CAHUeoG85tjWP5WquqfavoMtMwiG4P926ZKKuQ==
dependencies:
"@jridgewell/resolve-uri" "^3.0.3"
"@jridgewell/sourcemap-codec" "^1.4.10"
"@noble/hashes@1.2.0", "@noble/hashes@~1.2.0":
version "1.2.0"
resolved "https://registry.yarnpkg.com/@noble/hashes/-/hashes-1.2.0.tgz#a3150eeb09cc7ab207ebf6d7b9ad311a9bdbed12"
integrity sha512-FZfhjEDbT5GRswV3C6uvLPHMiVD6lQBmpoX5+eSiPaMTXte/IKqI5dykDxzZB/WBeK/CDuQRBWarPdi3FNY2zQ==
"@noble/secp256k1@1.7.1", "@noble/secp256k1@~1.7.0":
version "1.7.1"
resolved "https://registry.yarnpkg.com/@noble/secp256k1/-/secp256k1-1.7.1.tgz#b251c70f824ce3ca7f8dc3df08d58f005cc0507c"
integrity sha512-hOUk6AyBFmqVrv7k5WAw/LpszxVbj9gGN4JRkIX52fdFAj1UA61KXmZDvqVEm+pOyec3+fIeZB02LYa/pWOArw==
"@personaelabs/spartan-ecdsa@*":
version "1.0.2"
resolved "https://registry.yarnpkg.com/@personaelabs/spartan-ecdsa/-/spartan-ecdsa-1.0.2.tgz#22a858e1d7d5729a7198873557d3845ebd2af4f2"
integrity sha512-Wi5NykpV2slwDOSqy4DQ2Q1RicPR4U1W1S0MRZ8XMSYhND10GKIsWVcRqIB5VDh+ijbu1FfRcE8eT8Ov6TR+DA==
dependencies:
"@ethereumjs/util" "^8.0.3"
"@zk-kit/incremental-merkle-tree" "^1.0.0"
elliptic "^6.5.4"
snarkjs "^0.5.0"
"@scure/base@~1.1.0":
version "1.1.1"
resolved "https://registry.yarnpkg.com/@scure/base/-/base-1.1.1.tgz#ebb651ee52ff84f420097055f4bf46cfba403938"
integrity sha512-ZxOhsSyxYwLJj3pLZCefNitxsj093tb2vq90mp2txoYeBqbcjDjqFhyM8eUjq/uFm6zJ+mUuqxlS2FkuSY1MTA==
"@scure/bip32@1.1.5":
version "1.1.5"
resolved "https://registry.yarnpkg.com/@scure/bip32/-/bip32-1.1.5.tgz#d2ccae16dcc2e75bc1d75f5ef3c66a338d1ba300"
integrity sha512-XyNh1rB0SkEqd3tXcXMi+Xe1fvg+kUIcoRIEujP1Jgv7DqW2r9lg3Ah0NkFaCs9sTkQAQA8kw7xiRXzENi9Rtw==
dependencies:
"@noble/hashes" "~1.2.0"
"@noble/secp256k1" "~1.7.0"
"@scure/base" "~1.1.0"
"@scure/bip39@1.1.1":
version "1.1.1"
resolved "https://registry.yarnpkg.com/@scure/bip39/-/bip39-1.1.1.tgz#b54557b2e86214319405db819c4b6a370cf340c5"
integrity sha512-t+wDck2rVkh65Hmv280fYdVdY25J9YeEUIgn2LG1WM6gxFkGzcksoDiUkWVpVp3Oex9xGC68JU2dSbUfwZ2jPg==
dependencies:
"@noble/hashes" "~1.2.0"
"@scure/base" "~1.1.0"
"@tsconfig/node10@^1.0.7":
version "1.0.9"
resolved "https://registry.yarnpkg.com/@tsconfig/node10/-/node10-1.0.9.tgz#df4907fc07a886922637b15e02d4cebc4c0021b2"
integrity sha512-jNsYVVxU8v5g43Erja32laIDHXeoNvFEpX33OK4d6hljo3jDhCBDhx5dhCCTMWUojscpAagGiRkBKxpdl9fxqA==
"@tsconfig/node12@^1.0.7":
version "1.0.11"
resolved "https://registry.yarnpkg.com/@tsconfig/node12/-/node12-1.0.11.tgz#ee3def1f27d9ed66dac6e46a295cffb0152e058d"
integrity sha512-cqefuRsh12pWyGsIoBKJA9luFu3mRxCA+ORZvA4ktLSzIuCUtWVxGIuXigEwO5/ywWFMZ2QEGKWvkZG1zDMTag==
"@tsconfig/node14@^1.0.0":
version "1.0.3"
resolved "https://registry.yarnpkg.com/@tsconfig/node14/-/node14-1.0.3.tgz#e4386316284f00b98435bf40f72f75a09dabf6c1"
integrity sha512-ysT8mhdixWK6Hw3i1V2AeRqZ5WfXg1G43mqoYlM2nc6388Fq5jcXyr5mRsqViLx/GJYdoL0bfXD8nmF+Zn/Iow==
"@tsconfig/node16@^1.0.2":
version "1.0.3"
resolved "https://registry.yarnpkg.com/@tsconfig/node16/-/node16-1.0.3.tgz#472eaab5f15c1ffdd7f8628bd4c4f753995ec79e"
integrity sha512-yOlFc+7UtL/89t2ZhjPvvB/DeAr3r+Dq58IgzsFkOAvVC6NMJXmCGjbptdXdR9qsX7pKcTL+s87FtYREi2dEEQ==
"@zk-kit/incremental-merkle-tree@^1.0.0":
version "1.0.0"
resolved "https://registry.yarnpkg.com/@zk-kit/incremental-merkle-tree/-/incremental-merkle-tree-1.0.0.tgz#5a9ec2a2ebcb00972035b175c58906651ef6aa39"
integrity sha512-2iRLZfHnZ6wKE+oZN2CnpkKYCE5f5dpv6YRIwLDCz0xwJZrIMQ81AamFBdxPesQSYMMP0GkC0iv1rm6gxAL2Ow==
acorn-walk@^8.1.1:
version "8.2.0"
resolved "https://registry.yarnpkg.com/acorn-walk/-/acorn-walk-8.2.0.tgz#741210f2e2426454508853a2f44d0ab83b7f69c1"
integrity sha512-k+iyHEuPgSw6SbuDpGQM+06HQUa04DZ3o+F6CSzXMvvI5KMvnaEqXe+YVe555R9nn6GPt404fos4wcgpw12SDA==
acorn@^8.4.1:
version "8.8.2"
resolved "https://registry.yarnpkg.com/acorn/-/acorn-8.8.2.tgz#1b2f25db02af965399b9776b0c2c391276d37c4a"
integrity sha512-xjIYgE8HBrkpd/sJqOGNspf8uHG+NOHGOw6a/Urj8taM2EXfdNAH2oFcPeIFfsv3+kz/mJrS5VuMqbNLjCa2vw==
ansi-styles@^4.1.0:
version "4.3.0"
resolved "https://registry.yarnpkg.com/ansi-styles/-/ansi-styles-4.3.0.tgz#edd803628ae71c04c85ae7a0906edad34b648937"
integrity sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==
dependencies:
color-convert "^2.0.1"
arg@^4.1.0:
version "4.1.3"
resolved "https://registry.yarnpkg.com/arg/-/arg-4.1.3.tgz#269fc7ad5b8e42cb63c896d5666017261c144089"
integrity sha512-58S9QDqG0Xx27YwPSt9fJxivjYl432YCwfDMfZ+71RAqUrZef7LrKQZ3LHLOwCS4FLNBplP533Zx895SeOCHvA==
async@^3.2.3:
version "3.2.4"
resolved "https://registry.yarnpkg.com/async/-/async-3.2.4.tgz#2d22e00f8cddeb5fde5dd33522b56d1cf569a81c"
integrity sha512-iAB+JbDEGXhyIUavoDl9WP/Jj106Kz9DEn1DPgYw5ruDn0e3Wgi3sKFm55sASdGBNOQB8F59d9qQ7deqrHA8wQ==
b4a@^1.0.1:
version "1.6.1"
resolved "https://registry.yarnpkg.com/b4a/-/b4a-1.6.1.tgz#9effac93a469a868d024e16fd77162c653544cbd"
integrity sha512-AsKjNhz72yxteo/0EtQEiwkMUgk/tGmycXlbG4g3Ard2/ULtNLUykGOkeK0egmN27h0xMAhb76jYccW+XTBExA==
balanced-match@^1.0.0:
version "1.0.2"
resolved "https://registry.yarnpkg.com/balanced-match/-/balanced-match-1.0.2.tgz#e83e3a7e3f300b34cb9d87f615fa0cbf357690ee"
integrity sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==
bfj@^7.0.2:
version "7.0.2"
resolved "https://registry.yarnpkg.com/bfj/-/bfj-7.0.2.tgz#1988ce76f3add9ac2913fd8ba47aad9e651bfbb2"
integrity sha512-+e/UqUzwmzJamNF50tBV6tZPTORow7gQ96iFow+8b562OdMpEK0BcJEq2OSPEDmAbSMBQ7PKZ87ubFkgxpYWgw==
dependencies:
bluebird "^3.5.5"
check-types "^11.1.1"
hoopy "^0.1.4"
tryer "^1.0.1"
blake2b-wasm@^2.4.0:
version "2.4.0"
resolved "https://registry.yarnpkg.com/blake2b-wasm/-/blake2b-wasm-2.4.0.tgz#9115649111edbbd87eb24ce7c04b427e4e2be5be"
integrity sha512-S1kwmW2ZhZFFFOghcx73+ZajEfKBqhP82JMssxtLVMxlaPea1p9uoLiUZ5WYyHn0KddwbLc+0vh4wR0KBNoT5w==
dependencies:
b4a "^1.0.1"
nanoassert "^2.0.0"
bluebird@^3.5.5:
version "3.7.2"
resolved "https://registry.yarnpkg.com/bluebird/-/bluebird-3.7.2.tgz#9f229c15be272454ffa973ace0dbee79a1b0c36f"
integrity sha512-XpNj6GDQzdfW+r2Wnn7xiSAd7TM3jzkxGXBGTtWKuSXv1xUV+azxAm8jdWZN06QTQk+2N2XB9jRDkvbmQmcRtg==
bn.js@^4.11.9:
version "4.12.0"
resolved "https://registry.yarnpkg.com/bn.js/-/bn.js-4.12.0.tgz#775b3f278efbb9718eec7361f483fb36fbbfea88"
integrity sha512-c98Bf3tPniI+scsdk237ku1Dc3ujXQTSgyiPUDEOe7tRkhrqridvh8klBv0HCEso1OLOYcHuCv/cS6DNxKH+ZA==
brace-expansion@^1.1.7:
version "1.1.11"
resolved "https://registry.yarnpkg.com/brace-expansion/-/brace-expansion-1.1.11.tgz#3c7fcbf529d87226f3d2f52b966ff5271eb441dd"
integrity sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==
dependencies:
balanced-match "^1.0.0"
concat-map "0.0.1"
brace-expansion@^2.0.1:
version "2.0.1"
resolved "https://registry.yarnpkg.com/brace-expansion/-/brace-expansion-2.0.1.tgz#1edc459e0f0c548486ecf9fc99f2221364b9a0ae"
integrity sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==
dependencies:
balanced-match "^1.0.0"
brorand@^1.1.0:
version "1.1.0"
resolved "https://registry.yarnpkg.com/brorand/-/brorand-1.1.0.tgz#12c25efe40a45e3c323eb8675a0a0ce57b22371f"
integrity sha512-cKV8tMCEpQs4hK/ik71d6LrPOnpkpGBR0wzxqr68g2m/LB2GxVYQroAjMJZRVM1Y4BCjCKc3vAamxSzOY2RP+w==
case@^1.6.3:
version "1.6.3"
resolved "https://registry.yarnpkg.com/case/-/case-1.6.3.tgz#0a4386e3e9825351ca2e6216c60467ff5f1ea1c9"
integrity sha512-mzDSXIPaFwVDvZAHqZ9VlbyF4yyXRuX6IvB06WvPYkqJVO24kX1PPhv9bfpKNFZyxYFmmgo03HUiD8iklmJYRQ==
chalk@^4.0.2:
version "4.1.2"
resolved "https://registry.yarnpkg.com/chalk/-/chalk-4.1.2.tgz#aac4e2b7734a740867aeb16bf02aad556a1e7a01"
integrity sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==
dependencies:
ansi-styles "^4.1.0"
supports-color "^7.1.0"
check-types@^11.1.1:
version "11.2.2"
resolved "https://registry.yarnpkg.com/check-types/-/check-types-11.2.2.tgz#7afc0b6a860d686885062f2dba888ba5710335b4"
integrity sha512-HBiYvXvn9Z70Z88XKjz3AEKd4HJhBXsa3j7xFnITAzoS8+q6eIGi8qDB8FKPBAjtuxjI/zFpwuiCb8oDtKOYrA==
circom_runtime@0.1.21:
version "0.1.21"
resolved "https://registry.yarnpkg.com/circom_runtime/-/circom_runtime-0.1.21.tgz#0ee93bb798b5afb8ecec30725ed14d94587a999b"
integrity sha512-qTkud630B/GK8y76hnOaaS1aNuF6prfV0dTrkeRsiJKnlP1ryQbP2FWLgDOPqn6aKyaPlam+Z+DTbBhkEzh8dA==
dependencies:
ffjavascript "0.2.56"
color-convert@^2.0.1:
version "2.0.1"
resolved "https://registry.yarnpkg.com/color-convert/-/color-convert-2.0.1.tgz#72d3a68d598c9bdb3af2ad1e84f21d896abd4de3"
integrity sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==
dependencies:
color-name "~1.1.4"
color-name@~1.1.4:
version "1.1.4"
resolved "https://registry.yarnpkg.com/color-name/-/color-name-1.1.4.tgz#c2a09a87acbde69543de6f63fa3995c826c536a2"
integrity sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==
concat-map@0.0.1:
version "0.0.1"
resolved "https://registry.yarnpkg.com/concat-map/-/concat-map-0.0.1.tgz#d8a96bd77fd68df7793a73036a3ba0d5405d477b"
integrity sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==
create-require@^1.1.0:
version "1.1.1"
resolved "https://registry.yarnpkg.com/create-require/-/create-require-1.1.1.tgz#c1d7e8f1e5f6cfc9ff65f9cd352d37348756c333"
integrity sha512-dcKFX3jn0MpIaXjisoRvexIJVEKzaq7z2rZKxf+MSr9TkdmHmsU4m2lcLojrj/FHl8mk5VxMmYA+ftRkP/3oKQ==
diff@^4.0.1:
version "4.0.2"
resolved "https://registry.yarnpkg.com/diff/-/diff-4.0.2.tgz#60f3aecb89d5fae520c11aa19efc2bb982aade7d"
integrity sha512-58lmxKSA4BNyLz+HHMUzlOEpg09FV+ev6ZMe3vJihgdxzgcwZ8VoEEPmALCZG9LmqfVoNMMKpttIYTVG6uDY7A==
ejs@^3.1.6:
version "3.1.8"
resolved "https://registry.yarnpkg.com/ejs/-/ejs-3.1.8.tgz#758d32910c78047585c7ef1f92f9ee041c1c190b"
integrity sha512-/sXZeMlhS0ArkfX2Aw780gJzXSMPnKjtspYZv+f3NiKLlubezAHDU5+9xz6gd3/NhG3txQCo6xlglmTS+oTGEQ==
dependencies:
jake "^10.8.5"
elliptic@^6.5.4:
version "6.5.4"
resolved "https://registry.yarnpkg.com/elliptic/-/elliptic-6.5.4.tgz#da37cebd31e79a1367e941b592ed1fbebd58abbb"
integrity sha512-iLhC6ULemrljPZb+QutR5TQGB+pdW6KGD5RSegS+8sorOZT+rdQFbsQFJgvN3eRqNALqJer4oQ16YvJHlU8hzQ==
dependencies:
bn.js "^4.11.9"
brorand "^1.1.0"
hash.js "^1.0.0"
hmac-drbg "^1.0.1"
inherits "^2.0.4"
minimalistic-assert "^1.0.1"
minimalistic-crypto-utils "^1.0.1"
ethereum-cryptography@^1.1.2:
version "1.2.0"
resolved "https://registry.yarnpkg.com/ethereum-cryptography/-/ethereum-cryptography-1.2.0.tgz#5ccfa183e85fdaf9f9b299a79430c044268c9b3a"
integrity sha512-6yFQC9b5ug6/17CQpCyE3k9eKBMdhyVjzUy1WkiuY/E4vj/SXDBbCw8QEIaXqf0Mf2SnY6RmpDcwlUmBSS0EJw==
dependencies:
"@noble/hashes" "1.2.0"
"@noble/secp256k1" "1.7.1"
"@scure/bip32" "1.1.5"
"@scure/bip39" "1.1.1"
fastfile@0.0.20:
version "0.0.20"
resolved "https://registry.yarnpkg.com/fastfile/-/fastfile-0.0.20.tgz#794a143d58cfda2e24c298e5ef619c748c8a1879"
integrity sha512-r5ZDbgImvVWCP0lA/cGNgQcZqR+aYdFx3u+CtJqUE510pBUVGMn4ulL/iRTI4tACTYsNJ736uzFxEBXesPAktA==
ffjavascript@0.2.56:
version "0.2.56"
resolved "https://registry.yarnpkg.com/ffjavascript/-/ffjavascript-0.2.56.tgz#3509f98fcbd3e44ea93cd23519071b76d6eae433"
integrity sha512-em6G5Lrj7ucIqj4TYEgyoHs/j99Urwwqa4+YxEVY2hggnpRimVj+noX5pZQTxI1pvtiekZI4rG65JBf0xraXrg==
dependencies:
wasmbuilder "0.0.16"
wasmcurves "0.2.0"
web-worker "^1.2.0"
ffjavascript@^0.2.48:
version "0.2.57"
resolved "https://registry.yarnpkg.com/ffjavascript/-/ffjavascript-0.2.57.tgz#ba1be96015b2688192e49f2f4de2cc5150fd8594"
integrity sha512-V+vxZ/zPNcthrWmqfe/1YGgqdkTamJeXiED0tsk7B84g40DKlrTdx47IqZuiygqAVG6zMw4qYuvXftIJWsmfKQ==
dependencies:
wasmbuilder "0.0.16"
wasmcurves "0.2.0"
web-worker "^1.2.0"
filelist@^1.0.1:
version "1.0.4"
resolved "https://registry.yarnpkg.com/filelist/-/filelist-1.0.4.tgz#f78978a1e944775ff9e62e744424f215e58352b5"
integrity sha512-w1cEuf3S+DrLCQL7ET6kz+gmlJdbq9J7yXCSjK/OZCPA+qEN1WyF4ZAf0YYJa4/shHJra2t/d/r8SV4Ji+x+8Q==
dependencies:
minimatch "^5.0.1"
has-flag@^4.0.0:
version "4.0.0"
resolved "https://registry.yarnpkg.com/has-flag/-/has-flag-4.0.0.tgz#944771fd9c81c81265c4d6941860da06bb59479b"
integrity sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==
hash.js@^1.0.0, hash.js@^1.0.3:
version "1.1.7"
resolved "https://registry.yarnpkg.com/hash.js/-/hash.js-1.1.7.tgz#0babca538e8d4ee4a0f8988d68866537a003cf42"
integrity sha512-taOaskGt4z4SOANNseOviYDvjEJinIkRgmp7LbKP2YTTmVxWBl87s/uzK9r+44BclBSp2X7K1hqeNfz9JbBeXA==
dependencies:
inherits "^2.0.3"
minimalistic-assert "^1.0.1"
hmac-drbg@^1.0.1:
version "1.0.1"
resolved "https://registry.yarnpkg.com/hmac-drbg/-/hmac-drbg-1.0.1.tgz#d2745701025a6c775a6c545793ed502fc0c649a1"
integrity sha512-Tti3gMqLdZfhOQY1Mzf/AanLiqh1WTiJgEj26ZuYQ9fbkLomzGchCws4FyrSd4VkpBfiNhaE1On+lOz894jvXg==
dependencies:
hash.js "^1.0.3"
minimalistic-assert "^1.0.0"
minimalistic-crypto-utils "^1.0.1"
hoopy@^0.1.4:
version "0.1.4"
resolved "https://registry.yarnpkg.com/hoopy/-/hoopy-0.1.4.tgz#609207d661100033a9a9402ad3dea677381c1b1d"
integrity sha512-HRcs+2mr52W0K+x8RzcLzuPPmVIKMSv97RGHy0Ea9y/mpcaK+xTrjICA04KAHi4GRzxliNqNJEFYWHghy3rSfQ==
inherits@^2.0.3, inherits@^2.0.4:
version "2.0.4"
resolved "https://registry.yarnpkg.com/inherits/-/inherits-2.0.4.tgz#0fa2c64f932917c3433a0ded55363aae37416b7c"
integrity sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==
jake@^10.8.5:
version "10.8.5"
resolved "https://registry.yarnpkg.com/jake/-/jake-10.8.5.tgz#f2183d2c59382cb274226034543b9c03b8164c46"
integrity sha512-sVpxYeuAhWt0OTWITwT98oyV0GsXyMlXCF+3L1SuafBVUIr/uILGRB+NqwkzhgXKvoJpDIpQvqkUALgdmQsQxw==
dependencies:
async "^3.2.3"
chalk "^4.0.2"
filelist "^1.0.1"
minimatch "^3.0.4"
js-sha3@^0.8.0:
version "0.8.0"
resolved "https://registry.yarnpkg.com/js-sha3/-/js-sha3-0.8.0.tgz#b9b7a5da73afad7dedd0f8c463954cbde6818840"
integrity sha512-gF1cRrHhIzNfToc802P800N8PpXS+evLLXfsVpowqmAFR9uwbi89WvXg2QspOmXL8QL86J4T1EpFu+yUkwJY3Q==
logplease@^1.2.15:
version "1.2.15"
resolved "https://registry.yarnpkg.com/logplease/-/logplease-1.2.15.tgz#3da442e93751a5992cc19010a826b08d0293c48a"
integrity sha512-jLlHnlsPSJjpwUfcNyUxXCl33AYg2cHhIf9QhGL2T4iPT0XPB+xP1LRKFPgIg1M/sg9kAJvy94w9CzBNrfnstA==
make-error@^1.1.1:
version "1.3.6"
resolved "https://registry.yarnpkg.com/make-error/-/make-error-1.3.6.tgz#2eb2e37ea9b67c4891f684a1394799af484cf7a2"
integrity sha512-s8UhlNe7vPKomQhC1qFelMokr/Sc3AgNbso3n74mVPA5LTZwkB9NlXf4XPamLxJE8h0gh73rM94xvwRT2CVInw==
minimalistic-assert@^1.0.0, minimalistic-assert@^1.0.1:
version "1.0.1"
resolved "https://registry.yarnpkg.com/minimalistic-assert/-/minimalistic-assert-1.0.1.tgz#2e194de044626d4a10e7f7fbc00ce73e83e4d5c7"
integrity sha512-UtJcAD4yEaGtjPezWuO9wC4nwUnVH/8/Im3yEHQP4b67cXlD/Qr9hdITCU1xDbSEXg2XKNaP8jsReV7vQd00/A==
minimalistic-crypto-utils@^1.0.1:
version "1.0.1"
resolved "https://registry.yarnpkg.com/minimalistic-crypto-utils/-/minimalistic-crypto-utils-1.0.1.tgz#f6c00c1c0b082246e5c4d99dfb8c7c083b2b582a"
integrity sha512-JIYlbt6g8i5jKfJ3xz7rF0LXmv2TkDxBLUkiBeZ7bAx4GnnNMr8xFpGnOxn6GhTEHx3SjRrZEoU+j04prX1ktg==
minimatch@^3.0.4:
version "3.1.2"
resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-3.1.2.tgz#19cd194bfd3e428f049a70817c038d89ab4be35b"
integrity sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==
dependencies:
brace-expansion "^1.1.7"
minimatch@^5.0.1:
version "5.1.6"
resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-5.1.6.tgz#1cfcb8cf5522ea69952cd2af95ae09477f122a96"
integrity sha512-lKwV/1brpG6mBUFHtb7NUmtABCb2WZZmm2wNiOA5hAb8VdCS4B3dtMWyvcoViccwAW/COERjXLt0zP1zXUN26g==
dependencies:
brace-expansion "^2.0.1"
nanoassert@^2.0.0:
version "2.0.0"
resolved "https://registry.yarnpkg.com/nanoassert/-/nanoassert-2.0.0.tgz#a05f86de6c7a51618038a620f88878ed1e490c09"
integrity sha512-7vO7n28+aYO4J+8w96AzhmU8G+Y/xpPDJz/se19ICsqj/momRbb9mh9ZUtkoJ5X3nTnPdhEJyc0qnM6yAsHBaA==
r1csfile@0.0.41:
version "0.0.41"
resolved "https://registry.yarnpkg.com/r1csfile/-/r1csfile-0.0.41.tgz#e3d2709d36923156dd1fc2db9858987b30c92948"
integrity sha512-Q1WDF3u1vYeAwjHo4YuddkA8Aq0TulbKjmGm99+Atn13Lf5fTsMZBnBV9T741w8iSyPFG6Uh6sapQby77sREqA==
dependencies:
"@iden3/bigarray" "0.0.2"
"@iden3/binfileutils" "0.0.11"
fastfile "0.0.20"
ffjavascript "0.2.56"
snarkjs@^0.5.0:
version "0.5.0"
resolved "https://registry.yarnpkg.com/snarkjs/-/snarkjs-0.5.0.tgz#cf26bf1d3835eb16b4b330a438bad9824837d6b0"
integrity sha512-KWz8mZ2Y+6wvn6GGkQo6/ZlKwETdAGohd40Lzpwp5TUZCn6N6O4Az1SuX1rw/qREGL6Im+ycb19suCFE8/xaKA==
dependencies:
"@iden3/binfileutils" "0.0.11"
bfj "^7.0.2"
blake2b-wasm "^2.4.0"
circom_runtime "0.1.21"
ejs "^3.1.6"
fastfile "0.0.20"
ffjavascript "0.2.56"
js-sha3 "^0.8.0"
logplease "^1.2.15"
r1csfile "0.0.41"
supports-color@^7.1.0:
version "7.2.0"
resolved "https://registry.yarnpkg.com/supports-color/-/supports-color-7.2.0.tgz#1b7dcdcb32b8138801b3e478ba6a51caa89648da"
integrity sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==
dependencies:
has-flag "^4.0.0"
tryer@^1.0.1:
version "1.0.1"
resolved "https://registry.yarnpkg.com/tryer/-/tryer-1.0.1.tgz#f2c85406800b9b0f74c9f7465b81eaad241252f8"
integrity sha512-c3zayb8/kWWpycWYg87P71E1S1ZL6b6IJxfb5fvsUgsf0S2MVGaDhDXXjDMpdCpfWXqptc+4mXwmiy1ypXqRAA==
ts-node@^10.9.1:
version "10.9.1"
resolved "https://registry.yarnpkg.com/ts-node/-/ts-node-10.9.1.tgz#e73de9102958af9e1f0b168a6ff320e25adcff4b"
integrity sha512-NtVysVPkxxrwFGUUxGYhfux8k78pQB3JqYBXlLRZgdGUqTO5wU/UyHop5p70iEbGhB7q5KmiZiU0Y3KlJrScEw==
dependencies:
"@cspotcode/source-map-support" "^0.8.0"
"@tsconfig/node10" "^1.0.7"
"@tsconfig/node12" "^1.0.7"
"@tsconfig/node14" "^1.0.0"
"@tsconfig/node16" "^1.0.2"
acorn "^8.4.1"
acorn-walk "^8.1.1"
arg "^4.1.0"
create-require "^1.1.0"
diff "^4.0.1"
make-error "^1.1.1"
v8-compile-cache-lib "^3.0.1"
yn "3.1.1"
typescript@^4.9.4:
version "4.9.5"
resolved "https://registry.yarnpkg.com/typescript/-/typescript-4.9.5.tgz#095979f9bcc0d09da324d58d03ce8f8374cbe65a"
integrity sha512-1FXk9E2Hm+QzZQ7z+McJiHL4NW1F2EzMu9Nq9i3zAaGqibafqYwCVU6WyWAuyQRRzOlxou8xZSyXLEN8oKj24g==
v8-compile-cache-lib@^3.0.1:
version "3.0.1"
resolved "https://registry.yarnpkg.com/v8-compile-cache-lib/-/v8-compile-cache-lib-3.0.1.tgz#6336e8d71965cb3d35a1bbb7868445a7c05264bf"
integrity sha512-wa7YjyUGfNZngI/vtK0UHAN+lgDCxBPCylVXGp0zu59Fz5aiGtNXaq3DhIov063MorB+VfufLh3JlF2KdTK3xg==
wasmbuilder@0.0.16:
version "0.0.16"
resolved "https://registry.yarnpkg.com/wasmbuilder/-/wasmbuilder-0.0.16.tgz#f34c1f2c047d2f6e1065cbfec5603988f16d8549"
integrity sha512-Qx3lEFqaVvp1cEYW7Bfi+ebRJrOiwz2Ieu7ZG2l7YyeSJIok/reEQCQCuicj/Y32ITIJuGIM9xZQppGx5LrQdA==
wasmcurves@0.2.0:
version "0.2.0"
resolved "https://registry.yarnpkg.com/wasmcurves/-/wasmcurves-0.2.0.tgz#ccfc5a7d3778b6e0768b82a9336c80054f9bc0cf"
integrity sha512-3e2rbxdujOwaod657gxgmdhZNn+i1qKdHO3Y/bK+8E7bV8ttV/fu5FO4/WLBACF375cK0QDLOP+65Na63qYuWA==
dependencies:
wasmbuilder "0.0.16"
web-worker@^1.2.0:
version "1.2.0"
resolved "https://registry.yarnpkg.com/web-worker/-/web-worker-1.2.0.tgz#5d85a04a7fbc1e7db58f66595d7a3ac7c9c180da"
integrity sha512-PgF341avzqyx60neE9DD+XS26MMNMoUQRz9NOZwW32nPQrF6p77f1htcnjBSEV8BGMKZ16choqUG4hyI0Hx7mA==
yn@3.1.1:
version "3.1.1"
resolved "https://registry.yarnpkg.com/yn/-/yn-3.1.1.tgz#1e87401a09d767c1d5eab26a6e4c185182d2eb50"
integrity sha512-Ux4ygGWsu2c7isFWe8Yu1YluJmqVhxqK2cLXNQA5AcC3QfbGNpM7fu0Y8b/z16pXLnFxZYvWhd3fhBY9DLmC6Q==

View File

@@ -0,0 +1,185 @@
use crate::{
chips::pedersen_commit::PedersenCommitChip,
transcript::HopliteTranscript,
{FpChip, Fq, FqChip},
};
use halo2_base::{utils::PrimeField, Context};
use halo2_ecc::bigint::CRTInteger;
use halo2_ecc::ecc::{EcPoint, EccChip};
use halo2_ecc::fields::FieldChip;
use halo2_proofs::circuit::Value;
use hoplite::{
circuit_vals::{CVDotProdProof, ToCircuitVal},
commitments::MultiCommitGens,
};
use libspartan::transcript::{ProofTranscript, Transcript};
use super::{
secq256k1::Secq256k1Chip,
utils::{Assign, AssignArray},
};
#[derive(Clone, Debug)]
pub struct AssignedZKDotProdProof<'v, const DIMENSION: usize, F: PrimeField> {
pub delta: EcPoint<F, CRTInteger<'v, F>>,
pub beta: EcPoint<F, CRTInteger<'v, F>>,
pub z: [CRTInteger<'v, F>; DIMENSION],
pub z_delta: CRTInteger<'v, F>,
pub z_beta: CRTInteger<'v, F>,
}
impl<'v, const DIMENSION: usize, F: PrimeField>
Assign<'v, F, AssignedZKDotProdProof<'v, DIMENSION, F>> for CVDotProdProof<DIMENSION>
{
fn assign(
&self,
ctx: &mut Context<'v, F>,
secq_chip: &Secq256k1Chip<F>,
) -> AssignedZKDotProdProof<'v, DIMENSION, F> {
let beta = self.beta.assign(ctx, secq_chip);
let delta = self.delta.assign(ctx, secq_chip);
let z: [CRTInteger<'v, F>; DIMENSION] = self
.z
.iter()
.map(|z_i| z_i.assign(ctx, secq_chip))
.collect::<Vec<CRTInteger<'v, F>>>()
.try_into()
.unwrap();
let z_beta = self.z_beta.assign(ctx, secq_chip);
let z_delta = self.z_delta.assign(ctx, secq_chip);
AssignedZKDotProdProof {
beta,
delta,
z,
z_beta,
z_delta,
}
}
}
#[derive(Clone)]
pub struct ZKDotProdChip<const DIMENSION: usize, F: PrimeField> {
pub ecc_chip: EccChip<F, FpChip<F>>,
pub fq_chip: FqChip<F>,
pub pedersen_chip: PedersenCommitChip<F>,
window_bits: usize,
}
impl<const DIMENSION: usize, F: PrimeField> ZKDotProdChip<DIMENSION, F> {
pub fn construct(
ecc_chip: EccChip<F, FpChip<F>>,
fq_chip: FqChip<F>,
pedersen_chip: PedersenCommitChip<F>,
) -> Self {
Self {
ecc_chip,
fq_chip,
pedersen_chip,
window_bits: 4,
}
}
fn dot_prod<'v>(
&self,
ctx: &mut Context<'v, F>,
a: &[CRTInteger<'v, F>],
b: &[CRTInteger<'v, F>],
) -> CRTInteger<'v, F> {
let mut sum = self
.fq_chip
.load_private(ctx, FqChip::<F>::fe_to_witness(&Value::known(Fq::zero())));
// Implement this
for i in 0..a.len() {
let prod_no_carry = self.fq_chip.mul_no_carry(ctx, &a[i], &b[i]);
let sum_no_carry = self.fq_chip.add_no_carry(ctx, &sum, &prod_no_carry);
sum = self.fq_chip.carry_mod(ctx, &sum_no_carry);
}
sum
}
pub fn verify<'v>(
&self,
ctx: &mut Context<'v, F>,
tau: &EcPoint<F, CRTInteger<'v, F>>,
a: [CRTInteger<'v, F>; DIMENSION],
com_poly: &EcPoint<F, CRTInteger<'v, F>>,
proof: &AssignedZKDotProdProof<'v, DIMENSION, F>,
gens_1: &MultiCommitGens,
gens_n: &MultiCommitGens,
transcript: &mut Transcript,
) {
transcript.append_protocol_name(b"dot product proof");
transcript.append_circuit_point(b"Cx", com_poly.clone());
transcript.append_circuit_point(b"Cy", tau.clone());
transcript.append_message(b"a", b"begin_append_vector");
// TODO: Implement this in a trait
for a_i_val in &a {
let mut a_i = [0u8; 32];
a_i_val.clone().value.and_then(|val| {
let mut a_i_bytes = val.to_bytes_be().1;
a_i_bytes.resize(32, 0);
a_i_bytes.reverse();
a_i = a_i_bytes.try_into().unwrap();
Value::known(val)
});
transcript.append_message(b"a", &a_i);
}
transcript.append_message(b"a", b"end_append_vector");
transcript.append_circuit_point(b"delta", (&proof.delta).clone());
transcript.append_circuit_point(b"beta", (&proof.beta).clone());
let max_bits = self.fq_chip.limb_bits;
let c = transcript.challenge_scalar(b"c");
let c = self.fq_chip.load_private(
ctx,
FqChip::<F>::fe_to_witness(&Value::known(c.to_circuit_val())),
);
// (13)
let epsilon_c = self.ecc_chip.scalar_mult(
ctx,
&com_poly,
&c.truncation.limbs,
max_bits,
self.window_bits,
);
// (epsilon * c) + delta
let lhs = self
.ecc_chip
.add_unequal(ctx, &epsilon_c, &proof.delta, true);
// com(z, z_delta)
let rhs = self
.pedersen_chip
.multi_commit(ctx, &proof.z, &proof.z_delta, &gens_n);
self.ecc_chip.assert_equal(ctx, &lhs, &rhs);
// (14)
let tau_c = self
.ecc_chip
.scalar_mult(ctx, &tau, &c.truncation.limbs, max_bits, 4);
// (tau * c) + beta
let lhs = self.ecc_chip.add_unequal(ctx, &tau_c, &proof.beta, true);
let a_dot_z = self.dot_prod(ctx, &a, &proof.z);
// com((a ・ z), z_beta)
let rhs = self
.pedersen_chip
.commit(ctx, &a_dot_z, &proof.z_beta, &gens_1);
self.ecc_chip.assert_equal(ctx, &lhs, &rhs);
}
}

View File

@@ -0,0 +1,31 @@
use crate::FpChip;
use halo2_base::{utils::PrimeField, Context};
use halo2_ecc::{bigint::CRTInteger, fields::FieldChip};
use num_bigint::BigUint;
use num_traits::Zero;
pub struct EvalMLPolyChip<F: PrimeField, const N_VARS: usize> {
pub fp_chip: FpChip<F>,
}
impl<'v, F: PrimeField, const N_VARS: usize> EvalMLPolyChip<F, N_VARS> {
pub fn construct(fp_chip: FpChip<F>) -> Self {
Self { fp_chip }
}
pub fn eval(
&self,
ctx: &mut Context<'v, F>,
coeffs: &[CRTInteger<'v, F>; N_VARS],
vals: &[CRTInteger<'v, F>; N_VARS],
) -> CRTInteger<'v, F> {
let mut acc = self.fp_chip.load_constant(ctx, BigUint::zero());
for (coeff, val) in coeffs.iter().zip(vals.iter()) {
let term = self.fp_chip.mul(ctx, coeff, val);
acc = self.fp_chip.add_no_carry(ctx, &term, &acc);
self.fp_chip.carry_mod(ctx, &acc);
}
acc
}
}

View File

@@ -0,0 +1,12 @@
pub mod dotprod;
pub mod eval_poly;
pub mod pedersen_commit;
pub mod poly_eval_proof;
pub mod proof_bullet_reduce;
pub mod proof_log_of_dotprod;
pub mod proof_of_eq;
pub mod proof_of_opening;
pub mod proof_of_prod;
pub mod secq256k1;
pub mod sumcheck;
pub mod utils;

View File

@@ -0,0 +1,97 @@
use crate::FpChip;
use halo2_base::{utils::PrimeField, Context};
use halo2_ecc::bigint::CRTInteger;
use halo2_ecc::ecc::{fixed_base, EcPoint, EccChip};
use hoplite::commitments::MultiCommitGens;
use secpq_curves::group::Curve;
#[derive(Clone)]
pub struct PedersenCommitChip<F: PrimeField> {
pub ecc_chip: EccChip<F, FpChip<F>>,
pub fp_chip: FpChip<F>,
window_bits: usize,
}
impl<F: PrimeField> PedersenCommitChip<F> {
pub fn construct(ecc_chip: EccChip<F, FpChip<F>>, fp_chip: FpChip<F>) -> Self {
Self {
ecc_chip,
fp_chip,
window_bits: 4,
}
}
pub fn commit<'v>(
&self,
ctx: &mut Context<'v, F>,
x: &CRTInteger<'v, F>,
blinder: &CRTInteger<'v, F>,
gens: &MultiCommitGens,
) -> EcPoint<F, CRTInteger<'v, F>> {
let max_bits = self.fp_chip.limb_bits;
let gx = fixed_base::scalar_multiply(
&self.fp_chip,
ctx,
&gens.G[0].to_affine(),
&x.truncation.limbs,
max_bits,
self.window_bits,
);
let hb = fixed_base::scalar_multiply(
&self.fp_chip,
ctx,
&gens.h.to_affine(),
&blinder.truncation.limbs,
max_bits,
self.window_bits,
);
let com = self.ecc_chip.add_unequal(ctx, &gx, &hb, true);
com
}
pub fn multi_commit<'v>(
&self,
ctx: &mut Context<'v, F>,
x: &[CRTInteger<'v, F>],
blinder: &CRTInteger<'v, F>,
gens: &MultiCommitGens,
) -> EcPoint<F, CRTInteger<'v, F>> {
let max_bits = self.fp_chip.limb_bits;
let mut g_sum = fixed_base::scalar_multiply(
&self.fp_chip,
ctx,
&gens.G[0].to_affine(),
&x[0].truncation.limbs,
max_bits,
self.window_bits,
);
for (i, x_i) in x[1..].iter().enumerate() {
let g = fixed_base::scalar_multiply(
&self.fp_chip,
ctx,
&gens.G[i + 1].to_affine(),
&x_i.truncation.limbs,
max_bits,
self.window_bits,
);
g_sum = self.ecc_chip.add_unequal(ctx, &g_sum, &g, true);
}
let hb = fixed_base::scalar_multiply(
&self.fp_chip,
ctx,
&gens.h.to_affine(),
&blinder.truncation.limbs,
max_bits,
self.window_bits,
);
let com = self.ecc_chip.add_unequal(ctx, &g_sum, &hb, true);
com
}
}

View File

@@ -0,0 +1,140 @@
use super::{
proof_log_of_dotprod::{AssignedDotProductProofLog, ProofLogOfDotProdChip},
utils::{Assign, AssignArray},
};
use crate::chips::{proof_bullet_reduce::AssignedBulletReductionProof, secq256k1::Secq256k1Chip};
use halo2_base::{utils::PrimeField, Context};
use halo2_ecc::bigint::CRTInteger;
use halo2_ecc::ecc::EcPoint;
use hoplite::{circuit_vals::CVPolyEvalProof, commitments::MultiCommitGens};
use libspartan::transcript::{ProofTranscript, Transcript};
use secpq_curves::{
group::{Curve, Group},
Secq256k1,
};
pub trait AssignN<'v, F: PrimeField, const N: usize> {
fn assign(
&self,
ctx: &mut Context<'v, F>,
secq_chip: &Secq256k1Chip<F>,
) -> AssignedPolyEvalProof<'v, F, N>;
}
pub struct AssignedPolyEvalProof<'v, F: PrimeField, const N: usize> {
pub proof: AssignedDotProductProofLog<'v, F, N>,
}
impl<'v, F: PrimeField, const N: usize> AssignN<'v, F, N> for CVPolyEvalProof<N> {
fn assign(
&self,
ctx: &mut Context<'v, F>,
secq_chip: &Secq256k1Chip<F>,
) -> AssignedPolyEvalProof<'v, F, N> {
let z1 = self.proof.z1.assign(ctx, secq_chip);
let z2 = self.proof.z2.assign(ctx, secq_chip);
let beta = self.proof.beta.assign(ctx, secq_chip);
let delta = self.proof.delta.assign(ctx, secq_chip);
let L_vec = self
.proof
.bullet_reduction_proof
.L_vec
.assign(ctx, secq_chip);
let R_vec = self
.proof
.bullet_reduction_proof
.R_vec
.assign(ctx, secq_chip);
let bullet_reduction_proof = AssignedBulletReductionProof { L_vec, R_vec };
let proof = AssignedDotProductProofLog {
bullet_reduction_proof,
delta,
beta,
z1,
z2,
};
AssignedPolyEvalProof { proof }
}
}
pub struct PolyEvalProofChip<F: PrimeField, const N: usize, const N_HALF: usize> {
pub secq_chip: Secq256k1Chip<F>,
pub proof_log_dotprod_chip: ProofLogOfDotProdChip<F, N, N_HALF>,
pub window_bits: usize,
}
impl<'v, F: PrimeField, const N: usize, const N_HALF: usize> PolyEvalProofChip<F, N, N_HALF> {
pub fn construct(
secq_chip: Secq256k1Chip<F>,
proof_log_dotprod_chip: ProofLogOfDotProdChip<F, N, N_HALF>,
window_bits: usize,
) -> Self {
Self {
secq_chip,
proof_log_dotprod_chip,
window_bits,
}
}
pub fn verify(
&self,
ctx: &mut Context<'v, F>,
r: &[CRTInteger<'v, F>; N],
C_Zr: &EcPoint<F, CRTInteger<'v, F>>,
comm_polys: &[EcPoint<F, CRTInteger<'v, F>>; N],
proof: AssignedPolyEvalProof<'v, F, N_HALF>,
gens_1: &MultiCommitGens,
gens_n: &MultiCommitGens,
transcript: &mut Transcript,
) {
let limbs_bits = self.secq_chip.ecc_chip.field_chip.limb_bits;
transcript.append_protocol_name(b"polynomial evaluation proof");
// Evaluate the eq poly over the boolean hypercube bounded to r
let r_left = &r[0..N / 2];
let r_right = &r[N / 2..];
// TODO: IMplement the evals() constraint
// L = evals(r_left);
// R = evals(r_right);
let L = r_left;
let R = r_right;
// L * r_left;
let mut C_LZ = self
.secq_chip
.ecc_chip
.assign_constant_point(ctx, Secq256k1::identity().to_affine());
for i in 0..comm_polys.len() {
let comm_poly_L = self.secq_chip.ecc_chip.scalar_mult(
ctx,
&comm_polys[i],
&L[i].truncation.limbs,
limbs_bits,
self.window_bits,
);
C_LZ = self
.secq_chip
.ecc_chip
.add_unequal(ctx, &comm_poly_L, &C_LZ, true);
}
self.proof_log_dotprod_chip.verify(
ctx,
R.try_into().unwrap(),
&C_LZ,
&C_Zr,
&proof.proof,
&gens_1,
&gens_n,
transcript,
);
}
}

View File

@@ -0,0 +1,177 @@
use super::utils::{Assign, AssignArray};
use crate::{chips::secq256k1::Secq256k1Chip, transcript::HopliteTranscript, Fq};
use halo2_base::{utils::PrimeField, Context};
use halo2_ecc::ecc::EcPoint;
use halo2_ecc::fields::FieldChip;
use halo2_ecc::{bigint::CRTInteger, ecc::fixed_base};
use hoplite::{
circuit_vals::{CVProductProof, FromCircuitVal, ToCircuitVal},
commitments::MultiCommitGens,
};
use libspartan::{
group::CompressedGroup,
transcript::{ProofTranscript, Transcript},
};
use num_bigint::BigUint;
use num_traits::identities::Zero;
use secpq_curves::group::{Curve, Group};
use secpq_curves::Secq256k1;
use super::pedersen_commit::PedersenCommitChip;
#[derive(Clone)]
pub struct AssignedBulletReductionProof<'v, F: PrimeField, const N: usize> {
pub L_vec: [EcPoint<F, CRTInteger<'v, F>>; N],
pub R_vec: [EcPoint<F, CRTInteger<'v, F>>; N],
}
#[derive(Clone)]
pub struct BulletReduceChip<F: PrimeField, const N: usize> {
pub secq_chip: Secq256k1Chip<F>,
pub pedersen_chip: PedersenCommitChip<F>,
pub window_bits: usize,
}
impl<'v, F: PrimeField, const N: usize> BulletReduceChip<F, N> {
pub fn construct(
secq_chip: Secq256k1Chip<F>,
pedersen_chip: PedersenCommitChip<F>,
window_bits: usize,
) -> Self {
Self {
secq_chip,
pedersen_chip,
window_bits,
}
}
fn batch_invert(&self, ctx: &mut Context<'v, F>, a: [CRTInteger<'v, F>; N]) {}
pub fn verify(
&self,
ctx: &mut Context<'v, F>,
upsilon: &EcPoint<F, CRTInteger<'v, F>>, // The upsilon calculated in this func should equal this
a_L: &[CRTInteger<'v, F>; N],
a_R: &[CRTInteger<'v, F>; N],
upsilon_L: &[EcPoint<F, CRTInteger<'v, F>>; N],
upsilon_R: &[EcPoint<F, CRTInteger<'v, F>>; N],
G_L: &[Secq256k1; N],
G_R: &[Secq256k1; N],
transcript: &mut Transcript,
) -> (
EcPoint<F, CRTInteger<'v, F>>,
CRTInteger<'v, F>,
EcPoint<F, CRTInteger<'v, F>>,
) {
let limb_bits = self.secq_chip.ecc_chip.field_chip.limb_bits;
// #####
// 1: Compute the verification scalars
// #####
// Compute challenges
let mut challenges = Vec::with_capacity(N);
for (L, R) in upsilon_L.iter().zip(upsilon_R.iter()) {
transcript.append_circuit_point(b"L", L.clone());
transcript.append_circuit_point(b"R", R.clone());
let c_i = transcript.challenge_scalar(b"u");
let c_i = Some(c_i.to_circuit_val()).assign(ctx, &self.secq_chip);
challenges.push(c_i);
}
let challenges_inv = challenges.clone();
// 2. Compute the invert of the challenges
// TODO: Compute the invert!
// Scalar::batch_invert(&mut challenges_inv);
// 3. Compute the square of the challenges
let mut challenges_sq = vec![];
for c in challenges.clone() {
let c_i_squared = self.secq_chip.fq_chip.mul(ctx, &c, &c);
challenges_sq.push(c_i_squared.clone());
}
let mut challenges_inv_sq = vec![];
for c in challenges_inv.clone() {
let c_i_squared = self.secq_chip.fq_chip.mul(ctx, &c, &c);
challenges_inv_sq.push(c_i_squared.clone());
}
let mut upsilon_hat = self
.secq_chip
.ecc_chip
.assign_constant_point(ctx, Secq256k1::identity().to_affine());
for i in 0..N {
let p_i_l = self.secq_chip.ecc_chip.scalar_mult(
ctx,
&upsilon_L[i],
&challenges_sq[i].truncation.limbs,
limb_bits,
4,
);
let p_i_r = self.secq_chip.ecc_chip.scalar_mult(
ctx,
&upsilon_R[i],
&challenges_inv_sq[i].truncation.limbs,
limb_bits,
4,
);
let p_i = self
.secq_chip
.ecc_chip
.add_unequal(ctx, &p_i_l, &p_i_r, true);
upsilon_hat = self
.secq_chip
.ecc_chip
.add_unequal(ctx, &p_i, &upsilon_hat, true);
}
let mut a_hat = self.secq_chip.fq_chip.load_constant(ctx, BigUint::zero());
for i in 0..N {
let a_i_l = self.secq_chip.fq_chip.mul(ctx, &a_L[i], &challenges_inv[i]);
let a_i_r = self.secq_chip.fq_chip.mul(ctx, &a_R[i], &challenges[i]);
let a_i_no_carry = self.secq_chip.fq_chip.add_no_carry(ctx, &a_i_l, &a_i_r);
let a_i = self.secq_chip.fq_chip.carry_mod(ctx, &a_i_no_carry);
let a_hat_no_carry = self.secq_chip.fq_chip.add_no_carry(ctx, &a_i, &a_hat);
a_hat = self.secq_chip.fq_chip.carry_mod(ctx, &a_hat_no_carry);
}
let mut g_hat = self
.secq_chip
.ecc_chip
.assign_constant_point(ctx, Secq256k1::identity().to_affine());
for i in 0..N {
let g_i_l = fixed_base::scalar_multiply(
&self.secq_chip.ecc_chip.field_chip,
ctx,
&G_L[i].to_affine(),
&challenges_inv[i].truncation.limbs,
limb_bits,
self.window_bits,
);
let g_i_r = fixed_base::scalar_multiply(
&self.secq_chip.ecc_chip.field_chip,
ctx,
&G_R[i].to_affine(),
&challenges[i].truncation.limbs,
limb_bits,
self.window_bits,
);
let g_i = self
.secq_chip
.ecc_chip
.add_unequal(ctx, &g_i_l, &g_i_r, true);
g_hat = self.secq_chip.ecc_chip.add_unequal(ctx, &g_i, &g_hat, true);
}
(upsilon_hat, a_hat, g_hat)
}
}

View File

@@ -0,0 +1,150 @@
use crate::{chips::proof_bullet_reduce::BulletReduceChip, transcript::HopliteTranscript};
use halo2_base::{utils::PrimeField, Context};
use halo2_ecc::ecc::EcPoint;
use halo2_ecc::{bigint::CRTInteger, ecc::fixed_base};
use hoplite::{circuit_vals::ToCircuitVal, commitments::MultiCommitGens};
use libspartan::transcript::{ProofTranscript, Transcript};
use super::{
proof_bullet_reduce::AssignedBulletReductionProof, secq256k1::Secq256k1Chip, utils::Assign,
};
use secpq_curves::group::Curve;
pub struct AssignedDotProductProofLog<'v, F: PrimeField, const N: usize> {
pub bullet_reduction_proof: AssignedBulletReductionProof<'v, F, N>,
pub delta: EcPoint<F, CRTInteger<'v, F>>,
pub beta: EcPoint<F, CRTInteger<'v, F>>,
pub z1: CRTInteger<'v, F>,
pub z2: CRTInteger<'v, F>,
}
#[derive(Clone)]
pub struct ProofLogOfDotProdChip<F: PrimeField, const N: usize, const N_HALF: usize> {
pub secq_chip: Secq256k1Chip<F>,
pub bullet_reduce_chip: BulletReduceChip<F, N_HALF>,
pub window_bits: usize,
}
impl<'v, F: PrimeField, const N: usize, const N_HALF: usize> ProofLogOfDotProdChip<F, N, N_HALF> {
pub fn construct(
secq_chip: Secq256k1Chip<F>,
bullet_reduce_chip: BulletReduceChip<F, N_HALF>,
window_bits: usize,
) -> Self {
Self {
secq_chip,
bullet_reduce_chip,
window_bits,
}
}
pub fn verify(
&self,
ctx: &mut Context<'v, F>,
a: &[CRTInteger<'v, F>; N],
Cx: &EcPoint<F, CRTInteger<'v, F>>,
Cy: &EcPoint<F, CRTInteger<'v, F>>,
proof: &AssignedDotProductProofLog<'v, F, N_HALF>,
gens_1: &MultiCommitGens,
gens_n: &MultiCommitGens,
transcript: &mut Transcript,
) {
let limb_bits = self.secq_chip.ecc_chip.field_chip.limb_bits;
transcript.append_protocol_name(b"dot product proof (log)");
transcript.append_circuit_point(b"Cx", Cx.clone());
transcript.append_circuit_point(b"Cy", Cy.clone());
transcript.append_message(b"a", b"begin_append_vector");
for a_i in a {
transcript.append_circuit_fq(b"a", a_i.clone());
}
transcript.append_message(b"a", b"end_append_vector");
// Upsilon
let Gamma = self.secq_chip.ecc_chip.add_unequal(ctx, &Cx, &Cy, true);
let a_L = a[0..N_HALF].try_into().unwrap();
let a_R = a[N_HALF..].try_into().unwrap();
let G_L = &gens_n.G[0..N_HALF].try_into().unwrap();
let G_R = &gens_n.G[N_HALF..].try_into().unwrap();
let bullet_reduction_proof = &proof.bullet_reduction_proof;
let upsilon_L = &bullet_reduction_proof.clone().L_vec.try_into().unwrap();
let upsilon_R = &bullet_reduction_proof.clone().R_vec.try_into().unwrap();
let (Gamma_hat, a_hat, g_hat) = self.bullet_reduce_chip.verify(
ctx, &Gamma, a_L, a_R, upsilon_L, upsilon_R, G_L, G_R, transcript,
);
transcript.append_circuit_point(b"delta", proof.delta.clone());
transcript.append_circuit_point(b"beta", proof.beta.clone());
let c = transcript.challenge_scalar(b"c");
let c = Some(c.to_circuit_val()).assign(ctx, &self.secq_chip);
let Gamma_hat_c = self.secq_chip.ecc_chip.scalar_mult(
ctx,
&Gamma_hat,
&c.truncation.limbs,
limb_bits,
self.window_bits,
);
let Gamma_hat_c_beta =
self.secq_chip
.ecc_chip
.add_unequal(ctx, &Gamma_hat_c, &proof.beta, true);
let lhs_1 = self.secq_chip.ecc_chip.scalar_mult(
ctx,
&Gamma_hat_c_beta,
&a_hat.truncation.limbs,
limb_bits,
self.window_bits,
);
let lhs = self
.secq_chip
.ecc_chip
.add_unequal(ctx, &lhs_1, &proof.delta, true);
let G_a_hat = fixed_base::scalar_multiply(
self.secq_chip.ecc_chip.field_chip(),
ctx,
&gens_1.G[0].to_affine(),
&a_hat.truncation.limbs,
limb_bits,
self.window_bits,
);
let rhs_1 = self
.secq_chip
.ecc_chip
.add_unequal(ctx, &G_a_hat, &g_hat, true);
let rhs_2 = self.secq_chip.ecc_chip.scalar_mult(
ctx,
&rhs_1,
&proof.z1.truncation.limbs,
limb_bits,
self.window_bits,
);
let rhs_3 = fixed_base::scalar_multiply(
self.secq_chip.ecc_chip.field_chip(),
ctx,
&gens_1.h.to_affine(),
&proof.z2.truncation.limbs,
limb_bits,
self.window_bits,
);
let rhs = self
.secq_chip
.ecc_chip
.add_unequal(ctx, &rhs_2, &rhs_3, true);
self.secq_chip.ecc_chip.assert_equal(ctx, &lhs, &rhs);
}
}

View File

@@ -0,0 +1,90 @@
use super::utils::Assign;
use crate::{chips::secq256k1::Secq256k1Chip, transcript::HopliteTranscript};
use halo2_base::{utils::PrimeField, Context};
use halo2_ecc::ecc::EcPoint;
use halo2_ecc::{bigint::CRTInteger, ecc::fixed_base};
use hoplite::{
circuit_vals::{CVEqualityProof, ToCircuitVal},
commitments::MultiCommitGens,
};
use libspartan::transcript::{ProofTranscript, Transcript};
use secpq_curves::group::Curve;
pub struct AssignedProofOfEq<'v, F: PrimeField> {
pub alpha: EcPoint<F, CRTInteger<'v, F>>,
pub z: CRTInteger<'v, F>,
}
impl<'v, F: PrimeField> Assign<'v, F, AssignedProofOfEq<'v, F>> for CVEqualityProof {
fn assign(
&self,
ctx: &mut Context<'v, F>,
secq_chip: &Secq256k1Chip<F>,
) -> AssignedProofOfEq<'v, F> {
let alpha = self.alpha.assign(ctx, secq_chip);
let z = self.z.assign(ctx, secq_chip);
AssignedProofOfEq { alpha, z }
}
}
pub struct ProofOfEqChip<F: PrimeField> {
pub secq_chip: Secq256k1Chip<F>,
pub window_bits: usize,
}
impl<'v, F: PrimeField> ProofOfEqChip<F> {
pub fn construct(secq_chip: Secq256k1Chip<F>, window_bits: usize) -> Self {
Self {
secq_chip,
window_bits,
}
}
pub fn verify(
&self,
ctx: &mut Context<'v, F>,
C1: &EcPoint<F, CRTInteger<'v, F>>,
C2: &EcPoint<F, CRTInteger<'v, F>>,
proof: AssignedProofOfEq<'v, F>,
gens_n: &MultiCommitGens,
transcript: &mut Transcript,
) {
let limb_bits = self.secq_chip.ecc_chip.field_chip.limb_bits;
let window_bits = self.window_bits;
transcript.append_protocol_name(b"equality proof");
transcript.append_circuit_point(b"C1", C1.clone());
transcript.append_circuit_point(b"C2", C2.clone());
transcript.append_circuit_point(b"alpha", (&proof.alpha).clone());
let lhs = fixed_base::scalar_multiply(
&self.secq_chip.ecc_chip.field_chip,
ctx,
&gens_n.h.to_affine(),
&proof.z.truncation.limbs,
limb_bits,
window_bits,
);
let c = transcript.challenge_scalar(b"c");
let c = Some(c.to_circuit_val()).assign(ctx, &self.secq_chip);
let C1_minus_C2 = self.secq_chip.ecc_chip.sub_unequal(ctx, &C1, &C2, true);
let C1_minus_C2_c = self.secq_chip.ecc_chip.scalar_mult(
ctx,
&C1_minus_C2,
&c.truncation.limbs,
limb_bits,
window_bits,
);
let rhs = self
.secq_chip
.ecc_chip
.add_unequal(ctx, &C1_minus_C2_c, &proof.alpha, true);
self.secq_chip.ecc_chip.assert_equal(ctx, &lhs, &rhs);
}
}

View File

@@ -0,0 +1,97 @@
use super::utils::Assign;
use crate::{
chips::secq256k1::Secq256k1Chip,
transcript::HopliteTranscript,
{FpChip, FqChip},
};
use halo2_base::{utils::PrimeField, Context};
use halo2_ecc::bigint::CRTInteger;
use halo2_ecc::ecc::{EcPoint, EccChip};
use halo2_ecc::fields::FieldChip;
use halo2_proofs::circuit::Value;
use hoplite::{
circuit_vals::{CVKnowledgeProof, ToCircuitVal},
commitments::MultiCommitGens,
};
use libspartan::transcript::{ProofTranscript, Transcript};
use super::pedersen_commit::PedersenCommitChip;
pub struct AssignedProofOfOpening<'v, F: PrimeField> {
pub alpha: EcPoint<F, CRTInteger<'v, F>>,
pub z1: CRTInteger<'v, F>,
pub z2: CRTInteger<'v, F>,
}
impl<'v, F: PrimeField> Assign<'v, F, AssignedProofOfOpening<'v, F>> for CVKnowledgeProof {
fn assign(
&self,
ctx: &mut Context<'v, F>,
secq_chip: &Secq256k1Chip<F>,
) -> AssignedProofOfOpening<'v, F> {
let alpha = self.alpha.assign(ctx, secq_chip);
let z1 = self.z1.assign(ctx, secq_chip);
let z2 = self.z2.assign(ctx, secq_chip);
AssignedProofOfOpening { alpha, z1, z2 }
}
}
pub struct ZKKnowledgeProofChip<F: PrimeField> {
pub ecc_chip: EccChip<F, FpChip<F>>,
pub fp_chip: FpChip<F>,
pub fq_chip: FqChip<F>,
pub pedersen_chip: PedersenCommitChip<F>,
pub window_bits: usize,
}
impl<'v, F: PrimeField> ZKKnowledgeProofChip<F> {
pub fn construct(
ecc_chip: EccChip<F, FpChip<F>>,
fp_chip: FpChip<F>,
fq_chip: FqChip<F>,
pedersen_chip: PedersenCommitChip<F>,
window_bits: usize,
) -> Self {
Self {
ecc_chip,
fp_chip,
fq_chip,
pedersen_chip,
window_bits,
}
}
pub fn verify(
&self,
ctx: &mut Context<'v, F>,
C: &EcPoint<F, CRTInteger<'v, F>>,
proof: AssignedProofOfOpening<'v, F>,
gens_n: &MultiCommitGens,
transcript: &mut Transcript,
) {
let limb_bits = self.fp_chip.limb_bits;
transcript.append_protocol_name(b"knowledge proof");
let alpha = &proof.alpha;
transcript.append_circuit_point(b"C", C.clone());
transcript.append_circuit_point(b"alpha", alpha.clone());
let c = &transcript.challenge_scalar(b"c");
let c = self.fq_chip.load_private(
ctx,
FqChip::<F>::fe_to_witness(&Value::known(c.to_circuit_val())),
);
let lhs = self.pedersen_chip.commit(ctx, &proof.z1, &proof.z2, gens_n);
let C_mult_c =
self.ecc_chip
.scalar_mult(ctx, C, &c.truncation.limbs, limb_bits, self.window_bits);
let rhs = self.ecc_chip.add_unequal(ctx, &C_mult_c, &alpha, true);
self.ecc_chip.assert_equal(ctx, &lhs, &rhs);
}
}

View File

@@ -0,0 +1,162 @@
use super::utils::{Assign, AssignArray};
use crate::{chips::secq256k1::Secq256k1Chip, transcript::HopliteTranscript};
use halo2_base::{utils::PrimeField, Context};
use halo2_ecc::ecc::EcPoint;
use halo2_ecc::{bigint::CRTInteger, ecc::fixed_base};
use hoplite::{
circuit_vals::{CVProductProof, ToCircuitVal},
commitments::MultiCommitGens,
};
use libspartan::transcript::{ProofTranscript, Transcript};
use secpq_curves::group::Curve;
use super::pedersen_commit::PedersenCommitChip;
pub struct AssignedProofOfProd<'v, F: PrimeField> {
pub alpha: EcPoint<F, CRTInteger<'v, F>>,
pub beta: EcPoint<F, CRTInteger<'v, F>>,
pub delta: EcPoint<F, CRTInteger<'v, F>>,
pub z: [CRTInteger<'v, F>; 5],
}
impl<'v, F: PrimeField> Assign<'v, F, AssignedProofOfProd<'v, F>> for CVProductProof {
fn assign(
&self,
ctx: &mut Context<'v, F>,
secq_chip: &Secq256k1Chip<F>,
) -> AssignedProofOfProd<'v, F> {
let alpha = self.alpha.assign(ctx, secq_chip);
let beta = self.beta.assign(ctx, secq_chip);
let delta = self.delta.assign(ctx, secq_chip);
let z = self.z.assign(ctx, secq_chip);
AssignedProofOfProd {
alpha,
beta,
delta,
z,
}
}
}
pub struct ProofOfProdChip<F: PrimeField> {
pub secq_chip: Secq256k1Chip<F>,
pub pedersen_chip: PedersenCommitChip<F>,
pub window_bits: usize,
}
impl<'v, F: PrimeField> ProofOfProdChip<F> {
pub fn construct(
secq_chip: Secq256k1Chip<F>,
pedersen_chip: PedersenCommitChip<F>,
window_bits: usize,
) -> Self {
Self {
secq_chip,
pedersen_chip,
window_bits,
}
}
pub fn verify(
&self,
ctx: &mut Context<'v, F>,
X: &EcPoint<F, CRTInteger<'v, F>>,
Y: &EcPoint<F, CRTInteger<'v, F>>,
Z: &EcPoint<F, CRTInteger<'v, F>>,
proof: AssignedProofOfProd<'v, F>,
gens_n: &MultiCommitGens,
transcript: &mut Transcript,
) {
let limb_bits = self.secq_chip.ecc_chip.field_chip.limb_bits;
let window_bits = self.window_bits;
transcript.append_protocol_name(b"product proof");
transcript.append_circuit_point(b"X", X.clone());
transcript.append_circuit_point(b"Y", Y.clone());
transcript.append_circuit_point(b"Z", Z.clone());
transcript.append_circuit_point(b"alpha", (&proof.alpha).clone());
transcript.append_circuit_point(b"beta", (&proof.beta).clone());
transcript.append_circuit_point(b"delta", (&proof.delta).clone());
let c = transcript.challenge_scalar(b"c");
let c = Some(c.to_circuit_val()).assign(ctx, &self.secq_chip);
let z1 = &proof.z[0];
let z2 = &proof.z[1];
let z3 = &proof.z[2];
let z4 = &proof.z[3];
let z5 = &proof.z[4];
// (7)
let X_c = self.secq_chip.ecc_chip.scalar_mult(
ctx,
X,
&c.truncation.limbs,
limb_bits,
window_bits,
);
let lhs_7 = self
.secq_chip
.ecc_chip
.add_unequal(ctx, &X_c, &proof.alpha, true);
let rhs_7 = self.pedersen_chip.commit(ctx, &z1, &z2, gens_n);
self.secq_chip.ecc_chip.assert_equal(ctx, &lhs_7, &rhs_7);
// (8)
let Y_c = self.secq_chip.ecc_chip.scalar_mult(
ctx,
Y,
&c.truncation.limbs,
limb_bits,
window_bits,
);
let lhs_8 = self
.secq_chip
.ecc_chip
.add_unequal(ctx, &Y_c, &proof.beta, true);
let rhs_8 = self.pedersen_chip.commit(ctx, &z3, &z4, gens_n);
self.secq_chip.ecc_chip.assert_equal(ctx, &lhs_8, &rhs_8);
// (9)
let Z_c = self.secq_chip.ecc_chip.scalar_mult(
ctx,
Z,
&c.truncation.limbs,
limb_bits,
window_bits,
);
let lhs_9 = self
.secq_chip
.ecc_chip
.add_unequal(ctx, &Z_c, &proof.delta, true);
let rhs_9_gx = self.secq_chip.ecc_chip.scalar_mult(
ctx,
X,
&z3.truncation.limbs,
limb_bits,
window_bits,
);
let rhs_9_hb = fixed_base::scalar_multiply(
&self.secq_chip.ecc_chip.field_chip,
ctx,
&gens_n.h.to_affine(),
&z5.truncation.limbs,
limb_bits,
window_bits,
);
let rhs_9 = self
.secq_chip
.ecc_chip
.add_unequal(ctx, &rhs_9_gx, &rhs_9_hb, true);
self.secq_chip.ecc_chip.assert_equal(ctx, &lhs_9, &rhs_9);
}
}

View File

@@ -0,0 +1,81 @@
use super::utils::{Assign, AssignArray};
use crate::{FpChip, Fq, FqChip};
use halo2_base::{utils::PrimeField, Context};
use halo2_ecc::{
bigint::CRTInteger,
ecc::{EcPoint, EccChip},
fields::FieldChip,
};
use halo2_proofs::circuit::Value;
use secpq_curves::Secq256k1;
#[derive(Clone)]
pub struct Secq256k1Chip<F: PrimeField> {
pub ecc_chip: EccChip<F, FpChip<F>>,
pub fq_chip: FqChip<F>,
}
impl<F: PrimeField> Secq256k1Chip<F> {
pub fn construct(ecc_chip: EccChip<F, FpChip<F>>, fq_chip: FqChip<F>) -> Self {
Self { ecc_chip, fq_chip }
}
}
impl<'v, F: PrimeField> Assign<'v, F, EcPoint<F, CRTInteger<'v, F>>> for Option<Secq256k1> {
fn assign(
&self,
ctx: &mut Context<'v, F>,
secq_chip: &Secq256k1Chip<F>,
) -> EcPoint<F, CRTInteger<'v, F>> {
secq_chip.ecc_chip.load_private(
ctx,
(
self.map_or(Value::unknown(), |p| Value::known(p.x)),
self.map_or(Value::unknown(), |p| Value::known(p.y)),
),
)
}
}
impl<'v, F: PrimeField> Assign<'v, F, CRTInteger<'v, F>> for Option<Fq> {
fn assign(&self, ctx: &mut Context<'v, F>, secq_chip: &Secq256k1Chip<F>) -> CRTInteger<'v, F> {
secq_chip.fq_chip.load_private(
ctx,
self.map_or(Value::unknown(), |z| {
FqChip::<F>::fe_to_witness(&Value::known(z))
}),
)
}
}
impl<'v, F: PrimeField, const N: usize> AssignArray<'v, F, CRTInteger<'v, F>, N>
for [Option<Fq>; N]
{
fn assign(
&self,
ctx: &mut Context<'v, F>,
secq_chip: &Secq256k1Chip<F>,
) -> [CRTInteger<'v, F>; N] {
self.iter()
.map(|v| v.assign(ctx, secq_chip))
.collect::<Vec<CRTInteger<'v, F>>>()
.try_into()
.unwrap()
}
}
impl<'v, F: PrimeField, const N: usize> AssignArray<'v, F, EcPoint<F, CRTInteger<'v, F>>, N>
for [Option<Secq256k1>; N]
{
fn assign(
&self,
ctx: &mut Context<'v, F>,
secq_chip: &Secq256k1Chip<F>,
) -> [EcPoint<F, CRTInteger<'v, F>>; N] {
self.iter()
.map(|v| v.assign(ctx, secq_chip))
.collect::<Vec<EcPoint<F, CRTInteger<'v, F>>>>()
.try_into()
.unwrap()
}
}

View File

@@ -0,0 +1,270 @@
use crate::{
chips::{
dotprod::{AssignedZKDotProdProof, ZKDotProdChip},
pedersen_commit::PedersenCommitChip,
secq256k1::Secq256k1Chip,
},
transcript::HopliteTranscript,
{FpChip, Fq, FqChip},
};
use halo2_base::{utils::PrimeField, Context};
use halo2_ecc::bigint::CRTInteger;
use halo2_ecc::ecc::{fixed_base, EcPoint, EccChip};
use halo2_ecc::fields::FieldChip;
use halo2_proofs::circuit::Value;
use hoplite::{
circuit_vals::{CVSumCheckProof, ToCircuitVal},
commitments::MultiCommitGens,
};
use libspartan::transcript::{ProofTranscript, Transcript};
use secpq_curves::group::Group;
use secpq_curves::{group::Curve, Secq256k1};
use super::utils::{Assign, AssignArray};
#[derive(Clone)]
pub struct AssignedZKSumCheck<'v, const N_ROUNDS: usize, const DIMENSION: usize, F: PrimeField> {
pub comm_polys: [EcPoint<F, CRTInteger<'v, F>>; N_ROUNDS],
pub comm_evals: [EcPoint<F, CRTInteger<'v, F>>; N_ROUNDS],
pub proofs: [AssignedZKDotProdProof<'v, DIMENSION, F>; N_ROUNDS],
}
pub trait AssignZKSumCheckProof<'v, const N_ROUNDS: usize, const DIMENSION: usize, F: PrimeField> {
fn assign(
&self,
ctx: &mut Context<'v, F>,
secq_chip: &Secq256k1Chip<F>,
) -> AssignedZKSumCheck<'v, N_ROUNDS, DIMENSION, F>;
}
impl<'v, const N_ROUNDS: usize, const DIMENSION: usize, F: PrimeField>
AssignZKSumCheckProof<'v, N_ROUNDS, DIMENSION, F> for CVSumCheckProof<N_ROUNDS, DIMENSION>
{
fn assign(
&self,
ctx: &mut Context<'v, F>,
secq_chip: &Secq256k1Chip<F>,
) -> AssignedZKSumCheck<'v, N_ROUNDS, DIMENSION, F> {
let comm_evals = self.comm_evals.assign(ctx, secq_chip);
let comm_polys = self.comm_polys.assign(ctx, secq_chip);
let proofs = self
.proofs
.iter()
.map(|proof| proof.assign(ctx, secq_chip))
.collect::<Vec<AssignedZKDotProdProof<'v, DIMENSION, F>>>()
.try_into()
.unwrap();
AssignedZKSumCheck {
comm_evals,
comm_polys,
proofs,
}
}
}
pub struct ZKSumCheckChip<const N_ROUNDS: usize, const DIMENSION: usize, F: PrimeField> {
pub ecc_chip: EccChip<F, FpChip<F>>,
pub fp_chip: FpChip<F>,
pub fq_chip: FqChip<F>,
pub pedersen_chip: PedersenCommitChip<F>,
pub zkdotprod_chip: ZKDotProdChip<DIMENSION, F>,
pub window_bits: usize,
}
impl<const N_ROUNDS: usize, const DIMENSION: usize, F: PrimeField>
ZKSumCheckChip<N_ROUNDS, DIMENSION, F>
{
pub fn construct(
ecc_chip: EccChip<F, FpChip<F>>,
fp_chip: FpChip<F>,
fq_chip: FqChip<F>,
pedersen_chip: PedersenCommitChip<F>,
zkdotprod_chip: ZKDotProdChip<DIMENSION, F>,
) -> Self {
Self {
ecc_chip,
fp_chip,
fq_chip,
pedersen_chip,
zkdotprod_chip,
window_bits: 4,
}
}
pub fn verify<'v>(
&self,
ctx: &mut Context<'v, F>,
proof: &AssignedZKSumCheck<'v, N_ROUNDS, DIMENSION, F>,
gens_n: &MultiCommitGens,
gens_1: &MultiCommitGens,
target_sum: EcPoint<F, CRTInteger<'v, F>>,
target_sum_identity: bool,
transcript: &mut Transcript,
) -> (EcPoint<F, CRTInteger<'v, F>>, [CRTInteger<'v, F>; N_ROUNDS]) {
let limb_bits = self.fp_chip.limb_bits;
let num_limbs = self.fp_chip.num_limbs;
let mut r = vec![];
for i in 0..N_ROUNDS {
// Load claimed_sum
let com_eval = &proof.comm_evals[i];
let com_poly = &proof.comm_polys[i];
transcript.append_circuit_point(b"comm_poly", com_poly.clone());
let r_i = &transcript.challenge_scalar(b"challenge_nextround");
let r_i = self.fp_chip.load_private(
ctx,
FqChip::<F>::fe_to_witness(&Value::known(r_i.to_circuit_val())),
);
r.push(r_i.clone());
let com_round_sum = if i == 0 {
&target_sum
} else {
&proof.comm_evals[i - 1]
};
transcript.append_circuit_point(b"comm_claim_per_round", com_round_sum.clone());
transcript.append_circuit_point(b"comm_eval", com_eval.clone());
// Convert the CRT integer back into native
// Might be easier to use CRT integer in the original implementation as well.
// Need to append bunch of hashes to transcript
// The point should be SEC-1 encoded as well
let w_scalar = transcript.challenge_vector(b"combine_two_claims_to_one", 2);
let w_0: CRTInteger<F> = self.fq_chip.load_private(
ctx,
FqChip::<F>::fe_to_witness(&Value::known(w_scalar[0].to_circuit_val())),
);
let w_1: CRTInteger<F> = self.fq_chip.load_private(
ctx,
FqChip::<F>::fe_to_witness(&Value::known(w_scalar[1].to_circuit_val())),
);
let tau_0 = if target_sum_identity {
fixed_base::scalar_multiply(
&self.fp_chip,
ctx,
&Secq256k1::identity().to_affine(),
&w_0.truncation.limbs,
limb_bits,
self.window_bits,
)
} else {
self.ecc_chip.scalar_mult(
ctx,
&com_round_sum,
&w_0.truncation.limbs,
limb_bits,
self.window_bits,
)
};
let tau_1 = self.ecc_chip.scalar_mult(
ctx,
&com_eval,
&w_1.truncation.limbs,
limb_bits,
self.window_bits,
);
let tau = if target_sum_identity {
tau_1
} else {
self.ecc_chip.add_unequal(ctx, &tau_0, &tau_1, true)
};
let mut a_sc = vec![];
let mut a_eval_base = vec![]; // All ones
let mut a_eval = vec![];
for i in 0..DIMENSION {
// TODO These should be instance column values?
if i == 0 {
a_sc.push(
self.fq_chip.load_private(
ctx,
FqChip::<F>::fe_to_witness(&Value::known(Fq::from(2))),
),
);
} else {
a_sc.push(
self.fq_chip.load_private(
ctx,
FqChip::<F>::fe_to_witness(&Value::known(Fq::from(1))),
),
);
}
}
for _ in 0..DIMENSION {
// TODO These should be instance column values?
a_eval_base.push(
self.fq_chip
.load_private(ctx, FqChip::<F>::fe_to_witness(&Value::known(Fq::from(1)))),
);
}
a_eval.push(
self.fq_chip
.load_private(ctx, FqChip::<F>::fe_to_witness(&Value::known(Fq::from(1)))),
);
for i in 1..DIMENSION {
// TODO These should be instance column values?
if i == 1 {
let a_eval_i_no_carry = self.fq_chip.mul_no_carry(ctx, &a_eval_base[i], &r_i);
let a_eval_i = self.fq_chip.carry_mod(ctx, &a_eval_i_no_carry);
a_eval.push(a_eval_i);
} else {
let a_eval_i_no_carry = self.fq_chip.mul_no_carry(ctx, &a_eval[i - 1], &r_i);
let a_eval_i = self.fq_chip.carry_mod(ctx, &a_eval_i_no_carry);
a_eval.push(a_eval_i);
}
}
let mut a = vec![];
for i in 0..DIMENSION {
let a_i_lhs = self.fq_chip.mul_no_carry(ctx, &a_sc[i], &w_0);
let a_i_rhs = self.fq_chip.mul_no_carry(ctx, &a_eval[i], &w_1);
let a_i_no_carry = self.fq_chip.add_no_carry(ctx, &a_i_lhs, &a_i_rhs);
let a_i = self.fq_chip.carry_mod(ctx, &a_i_no_carry);
a.push(a_i);
}
let zk_dot_prod_chip = ZKDotProdChip::construct(
self.ecc_chip.clone(),
self.fq_chip.clone(),
self.pedersen_chip.clone(),
);
let round_proof: &AssignedZKDotProdProof<DIMENSION, F> = &proof.proofs[i];
zk_dot_prod_chip.verify(
ctx,
&tau,
a.try_into().unwrap(),
com_poly,
round_proof,
gens_1,
gens_n,
transcript,
);
}
self.fp_chip.finalize(ctx);
(
proof.comm_evals[proof.comm_evals.len() - 1].clone(),
r.try_into().unwrap(),
)
}
}

View File

@@ -0,0 +1,10 @@
use super::secq256k1::Secq256k1Chip;
use halo2_base::{utils::PrimeField, Context};
pub trait Assign<'v, F: PrimeField, A> {
fn assign(&self, ctx: &mut Context<'v, F>, secq_chip: &Secq256k1Chip<F>) -> A;
}
pub trait AssignArray<'v, F: PrimeField, A, const N: usize> {
fn assign(&self, ctx: &mut Context<'v, F>, secq_chip: &Secq256k1Chip<F>) -> [A; N];
}

View File

@@ -0,0 +1,765 @@
#![allow(non_snake_case)]
mod chips;
mod transcript;
use chips::{
dotprod::ZKDotProdChip,
eval_poly::EvalMLPolyChip,
pedersen_commit::PedersenCommitChip,
poly_eval_proof::{AssignN, PolyEvalProofChip},
proof_bullet_reduce::BulletReduceChip,
proof_log_of_dotprod::ProofLogOfDotProdChip,
proof_of_eq::ProofOfEqChip,
proof_of_opening::ZKKnowledgeProofChip,
proof_of_prod::ProofOfProdChip,
secq256k1::Secq256k1Chip,
sumcheck::{AssignZKSumCheckProof, ZKSumCheckChip},
utils::{Assign, AssignArray},
};
use halo2_base::utils::{modulus, PrimeField};
use halo2_ecc::fields::FieldChip;
use halo2_ecc::{
ecc::{fixed_base::FixedEcPoint, EccChip},
fields::fp::{FpConfig, FpStrategy},
};
use halo2_proofs::{
circuit::{Layouter, SimpleFloorPlanner},
plonk,
plonk::{Circuit, Column, ConstraintSystem, Instance},
};
use hoplite::circuit_vals::{FromCircuitVal, ToCircuitVal};
use hoplite::{
circuit_vals::{
CVEqualityProof, CVKnowledgeProof, CVPolyCommitment, CVPolyEvalProof, CVProductProof,
CVSumCheckProof,
},
commitments::{Commitments, MultiCommitGens},
};
use libspartan::{
group::CompressedGroup,
transcript::{ProofTranscript, Transcript},
};
use num_bigint::BigUint;
use num_traits::{One, Zero};
use secpq_curves::{group::Curve, Secq256k1};
use transcript::HopliteTranscript;
pub type Fp = secpq_curves::Fq;
pub type Fq = secpq_curves::Fp;
pub type FqChip<F> = FpConfig<F, secpq_curves::Fp>;
pub type FpChip<F> = FpConfig<F, secpq_curves::Fq>;
#[derive(Clone, Debug)]
pub struct HopliteCircuitConfig<F: PrimeField> {
field_config: FpChip<F>,
/// Public inputs
instance: Column<Instance>,
window_bits: usize,
}
// SpartanNIZK verification circuit
pub struct HopliteCircuit<
const NUM_INPUTS: usize,
const NUM_CONSTRAINTS: usize,
const NUM_VARS: usize,
const NUM_VARS_H: usize,
> {
pub inst: Vec<u8>,
pub input: Vec<Fq>,
pub comm_vars: CVPolyCommitment<NUM_VARS>,
pub sc_proof_phase1: CVSumCheckProof<NUM_CONSTRAINTS, 4>,
pub claims_phase2: (
Option<Secq256k1>,
Option<Secq256k1>,
Option<Secq256k1>,
Option<Secq256k1>,
),
pub pok_claims_phase2: (CVKnowledgeProof, CVProductProof),
pub proof_eq_sc_phase1: CVEqualityProof,
pub sc_proof_phase2: CVSumCheckProof<14, 3>,
pub comm_vars_at_ry: Option<Secq256k1>,
pub proof_eval_vars_at_ry: CVPolyEvalProof<NUM_VARS_H>,
pub proof_eq_sc_phase2: CVEqualityProof,
pub gens_sc_1: MultiCommitGens,
pub gens_sc_3: MultiCommitGens,
pub gens_sc_4: MultiCommitGens,
pub gens_pc_1: MultiCommitGens,
pub gens_pc_n: MultiCommitGens,
}
pub struct CircuitParams {
strategy: FpStrategy,
degree: u32,
num_advice: usize,
num_lookup_advice: usize,
num_fixed: usize,
lookup_bits: usize,
limb_bits: usize,
num_limbs: usize,
}
impl<
const NUM_INPUTS: usize,
const NUM_CONSTRAINTS: usize,
const NUM_VARS: usize,
const NUM_VARS_H: usize,
F: PrimeField,
> Circuit<F> for HopliteCircuit<NUM_INPUTS, NUM_CONSTRAINTS, NUM_VARS, NUM_VARS_H>
{
type Config = HopliteCircuitConfig<F>;
type FloorPlanner = SimpleFloorPlanner;
fn configure(meta: &mut ConstraintSystem<F>) -> Self::Config {
let params = CircuitParams {
strategy: FpStrategy::Simple,
degree: 21,
num_advice: 20,
num_lookup_advice: 6,
num_fixed: 1,
lookup_bits: 17,
limb_bits: 88,
num_limbs: 3,
};
let field_config = FpChip::<F>::configure(
meta,
params.strategy,
&[params.num_advice],
&[params.num_lookup_advice],
params.num_fixed,
params.lookup_bits,
params.limb_bits,
params.num_limbs,
modulus::<Fp>(),
0,
params.degree as usize,
);
let instance = meta.instance_column();
meta.enable_equality(instance);
HopliteCircuitConfig {
instance,
field_config,
window_bits: 4,
}
}
fn without_witnesses(&self) -> Self {
HopliteCircuit::<NUM_INPUTS, NUM_CONSTRAINTS, NUM_VARS, NUM_VARS_H> {
comm_vars: CVPolyCommitment::<NUM_VARS>::default(),
inst: vec![],
input: vec![Fq::zero(); NUM_INPUTS],
sc_proof_phase1: CVSumCheckProof::<NUM_CONSTRAINTS, 4>::default(),
claims_phase2: (None, None, None, None),
pok_claims_phase2: (CVKnowledgeProof::default(), CVProductProof::default()),
proof_eq_sc_phase1: CVEqualityProof::default(),
sc_proof_phase2: CVSumCheckProof::<14, 3>::default(),
comm_vars_at_ry: None,
proof_eval_vars_at_ry: CVPolyEvalProof::<NUM_VARS_H>::default(),
proof_eq_sc_phase2: CVEqualityProof::default(),
gens_sc_1: MultiCommitGens::default(),
gens_sc_3: MultiCommitGens::default(),
gens_sc_4: MultiCommitGens::default(),
gens_pc_1: MultiCommitGens::default(),
gens_pc_n: MultiCommitGens::default(),
}
}
fn synthesize(
&self,
config: Self::Config,
mut layouter: impl Layouter<F>,
) -> Result<(), plonk::Error> {
// Scalar mult
let n_rounds = 1;
let fp_chip = config.field_config;
fp_chip.range.load_lookup_table(&mut layouter)?;
// Actually perform the calculation
let limb_bits = fp_chip.limb_bits;
let num_limbs = fp_chip.num_limbs;
let _num_fixed = fp_chip.range.gate.constants.len();
let _lookup_bits = fp_chip.range.lookup_bits;
let _num_advice = fp_chip.range.gate.num_advice;
// We can construct the fp_chip from the config of the fp_chip
// (fp_chip can use the same columns as the fp_chip)
let fq_chip =
FqChip::<F>::construct(fp_chip.range.clone(), limb_bits, num_limbs, modulus::<Fq>());
let ecc_chip = EccChip::construct(fp_chip.clone());
let secq_chip = Secq256k1Chip::construct(ecc_chip.clone(), fq_chip.clone());
let pedersen_chip = PedersenCommitChip::construct(ecc_chip.clone(), fp_chip.clone());
let phase_1_zkdotprod_chip: ZKDotProdChip<4, F> =
ZKDotProdChip::construct(ecc_chip.clone(), fq_chip.clone(), pedersen_chip.clone());
let phase_1_zksumcheck_chip = ZKSumCheckChip::construct(
ecc_chip.clone(),
fp_chip.clone(),
fq_chip.clone(),
pedersen_chip.clone(),
phase_1_zkdotprod_chip.clone(),
);
let knowledge_proof_chip = ZKKnowledgeProofChip::construct(
ecc_chip.clone(),
fp_chip.clone(),
fq_chip.clone(),
pedersen_chip.clone(),
4,
);
let proof_of_prod_chip =
ProofOfProdChip::construct(secq_chip.clone(), pedersen_chip.clone(), 4);
let proof_of_eq_chip = ProofOfEqChip::construct(secq_chip.clone(), 4);
let eval_poly_chip = EvalMLPolyChip::<F, NUM_INPUTS>::construct(fp_chip.clone());
// let mut results = Vec::new();
layouter.assign_region(
|| "",
|region| {
let mut ctx = fp_chip.new_context(region);
let mut transcript = Transcript::new(b"test_verify");
transcript.append_protocol_name(b"Spartan NIZK proof");
transcript.append_message(b"R1CSInstanceDigest", &self.inst);
transcript.append_protocol_name(b"R1CS proof");
// Append input to the transcript
transcript.append_message(b"input", b"begin_append_vector");
for item in &self.input {
transcript.append_message(b"input", &item.to_bytes());
}
transcript.append_message(b"input", b"end_append_vector");
// Append poly commitment to the transcript
transcript.append_message(b"poly_commitment", b"poly_commitment_begin");
for comm_var in self.comm_vars.C {
transcript.append_point(
b"poly_commitment_share",
&CompressedGroup::from_circuit_val(&comm_var.unwrap()),
);
}
transcript.append_message(b"poly_commitment", b"poly_commitment_end");
let phase1_expected_sum = Fq::zero().commit(&Fq::zero(), &self.gens_sc_1);
let phase1_expected_sum =
FixedEcPoint::from_curve(phase1_expected_sum.to_affine(), num_limbs, limb_bits);
let phase1_expected_sum = FixedEcPoint::assign(
phase1_expected_sum,
&fp_chip,
&mut ctx,
fp_chip.native_modulus(),
);
let _tau: Vec<Fq> = transcript
.challenge_vector(b"challenge_tau", n_rounds)
.iter()
.map(|tau_i| tau_i.to_circuit_val())
.collect();
let phase1_sc_proof = self.sc_proof_phase1.assign(&mut ctx, &secq_chip);
let (comm_claim_post_phase1, ry) = phase_1_zksumcheck_chip.verify(
&mut ctx,
&phase1_sc_proof,
&self.gens_sc_4,
&self.gens_sc_1,
phase1_expected_sum,
true,
&mut transcript,
);
// Verify Az * Bz = Cz
let (comm_Az_claim, comm_Bz_claim, comm_Cz_claim, comm_prod_Az_Bz_claims) =
&self.claims_phase2;
let (pok_Cz_claim, proof_prod) = &self.pok_claims_phase2;
let pok_Cz_claim = pok_Cz_claim.assign(&mut ctx, &secq_chip);
let proof_prod = proof_prod.assign(&mut ctx, &secq_chip);
let comm_Cz_claim = comm_Cz_claim.assign(&mut ctx, &secq_chip);
// Assign points
let comm_Az_claim = comm_Az_claim.assign(&mut ctx, &secq_chip);
let comm_Bz_claim = comm_Bz_claim.assign(&mut ctx, &secq_chip);
let comm_prod_Az_Bz_claims = comm_prod_Az_Bz_claims.assign(&mut ctx, &secq_chip);
knowledge_proof_chip.verify(
&mut ctx,
&comm_Cz_claim,
pok_Cz_claim,
&self.gens_sc_1,
&mut transcript,
);
proof_of_prod_chip.verify(
&mut ctx,
&comm_Az_claim,
&comm_Bz_claim,
&comm_Cz_claim,
proof_prod,
&self.gens_sc_1,
&mut transcript,
);
transcript.append_circuit_point(b"comm_Az_claim", comm_Az_claim.clone());
transcript.append_circuit_point(b"comm_Bz_claim", comm_Bz_claim.clone());
transcript.append_circuit_point(b"comm_Cz_claim", comm_Cz_claim.clone());
transcript.append_circuit_point(
b"comm_prod_Az_Bz_claims",
comm_prod_Az_Bz_claims.clone(),
);
// eq_eval
let expected_claim_post_phase1 =
ecc_chip.sub_unequal(&mut ctx, &comm_prod_Az_Bz_claims, &comm_Cz_claim, true);
// eq_tau_rx;
let proof_eq_sc_phase1 = self.proof_eq_sc_phase1.assign(&mut ctx, &secq_chip);
proof_of_eq_chip.verify(
&mut ctx,
&expected_claim_post_phase1,
&comm_claim_post_phase1,
proof_eq_sc_phase1,
&self.gens_sc_1,
&mut transcript,
);
let r_A = transcript.challenge_scalar(b"challenege_Az");
let r_B = transcript.challenge_scalar(b"challenege_Bz");
let r_C = transcript.challenge_scalar(b"challenege_Cz");
let r_A = Some(r_A.to_circuit_val()).assign(&mut ctx, &secq_chip);
let r_B = Some(r_B.to_circuit_val()).assign(&mut ctx, &secq_chip);
let r_C = Some(r_C.to_circuit_val()).assign(&mut ctx, &secq_chip);
// M(r_y) = r_A * comm_Az_claim + r_B * comm_Bz_claim + r_C * comm_Cz_claim;
let r_A_comm_Az = ecc_chip.scalar_mult(
&mut ctx,
&comm_Az_claim,
&r_A.truncation.limbs,
limb_bits,
4,
);
let r_B_comm_Bz = ecc_chip.scalar_mult(
&mut ctx,
&comm_Bz_claim,
&r_B.truncation.limbs,
limb_bits,
4,
);
let r_C_comm_Cz = ecc_chip.scalar_mult(
&mut ctx,
&comm_Cz_claim,
&r_C.truncation.limbs,
limb_bits,
4,
);
let r_AB_comm_ABz =
ecc_chip.add_unequal(&mut ctx, &r_A_comm_Az, &r_B_comm_Bz, true);
let comm_claim_phase2 =
ecc_chip.add_unequal(&mut ctx, &r_AB_comm_ABz, &r_C_comm_Cz, true);
let phase_2_zkdotprod_chip: ZKDotProdChip<3, F> = ZKDotProdChip::construct(
ecc_chip.clone(),
fq_chip.clone(),
pedersen_chip.clone(),
);
let phase_2_zksumcheck_chip = ZKSumCheckChip::construct(
ecc_chip.clone(),
fp_chip.clone(),
fq_chip.clone(),
pedersen_chip.clone(),
phase_2_zkdotprod_chip.clone(),
);
let sc_proof_phase2 = self.sc_proof_phase2.assign(&mut ctx, &secq_chip);
let (comm_claim_post_phase2, ry) = phase_2_zksumcheck_chip.verify(
&mut ctx,
&sc_proof_phase2,
&self.gens_sc_3,
&self.gens_sc_1,
comm_claim_phase2,
false,
&mut transcript,
);
let comm_vars = self.comm_vars.C.assign(&mut ctx, &secq_chip);
let bullet_reduce_chip =
BulletReduceChip::construct(secq_chip.clone(), pedersen_chip.clone(), 4);
let proof_of_log_dotprod_chip = ProofLogOfDotProdChip::construct(
secq_chip.clone(),
bullet_reduce_chip.clone(),
4,
);
let polly_eval_proof_chip = PolyEvalProofChip::construct(
secq_chip.clone(),
proof_of_log_dotprod_chip.clone(),
4,
);
let poly_eval_proof = self.proof_eval_vars_at_ry.assign(&mut ctx, &secq_chip);
let comm_vars_at_ry = self.comm_vars_at_ry.assign(&mut ctx, &secq_chip);
polly_eval_proof_chip.verify(
&mut ctx,
(&ry[1..]).try_into().unwrap(),
&comm_vars_at_ry,
&comm_vars.try_into().unwrap(),
poly_eval_proof,
&self.gens_pc_1,
&self.gens_pc_n,
&mut transcript,
);
// Interpolate the input as a multilinear polynomial and evaluate at ry[1..]
let mut input_with_one: Vec<Fq> = vec![Fq::one()];
input_with_one.extend_from_slice(&self.input);
let mut input_with_one = vec![fp_chip.load_constant(&mut ctx, BigUint::one())];
for i in 1..self.input.len() {
input_with_one.push(fp_chip.load_constant(
&mut ctx,
BigUint::from_bytes_le(&self.input[i].to_bytes()),
));
}
let poly_input_eval = eval_poly_chip.eval(
&mut ctx,
input_with_one.as_slice().try_into().unwrap(),
ry[1..].try_into().unwrap(),
);
let blinder = fp_chip.load_constant(&mut ctx, BigUint::zero());
pedersen_chip.commit(&mut ctx, &poly_input_eval, &blinder, &self.gens_pc_1);
// TODO: TBD
Ok(())
},
)?;
Ok(())
}
}
#[cfg(test)]
#[allow(non_camel_case_types)]
mod tests {
use super::*;
use ark_std::{end_timer, start_timer};
use bincode;
use circuit_reader::load_as_spartan_inst;
use halo2_base::utils::{decompose_biguint, fs::gen_srs};
use halo2_proofs::{dev::MockProver, halo2curves::bn256::Fr};
use halo2_proofs::{
halo2curves::bn256::{Bn256, G1Affine},
plonk::{create_proof, keygen_pk, keygen_vk, verify_proof},
poly::{
commitment::ParamsProver,
kzg::{
commitment::{KZGCommitmentScheme, ParamsVerifierKZG},
multiopen::{ProverSHPLONK, VerifierSHPLONK},
strategy::SingleStrategy,
},
},
transcript::{
Blake2bRead, Blake2bWrite, Challenge255, TranscriptReadBuffer, TranscriptWriterBuffer,
},
};
use hoplite::{circuit_vals::ToCircuitVal, verify_nizk};
use libspartan::{
transcript::Transcript, InputsAssignment, Instance, NIZKGens, VarsAssignment, NIZK,
};
use rand_core::OsRng;
use secpq_curves::group::cofactor::CofactorCurveAffine;
use secpq_curves::Secq256k1Affine;
use std::fs::File;
use std::io::Read;
const NUM_INPUTS: usize = 5;
const NUM_CONSTRAINTS: usize = 8076;
const NUM_VARS: usize = 8097;
type SPARTAN_ECDSA_CIRCUIT = HopliteCircuit<5, 13, 64, 7>;
fn spartan_ecdsa_circuit() -> SPARTAN_ECDSA_CIRCUIT {
let mut proof_file = File::open("./prover/proof.bin").expect("Proof file not found.");
let mut input_file = File::open("./prover/input.bin").expect("Input file not found");
let mut proof = vec![];
let mut input = vec![];
proof_file.read_to_end(&mut proof).unwrap();
input_file.read_to_end(&mut input).unwrap();
let proof: NIZK = bincode::deserialize(&proof).unwrap();
let inst = load_as_spartan_inst(
"../circuits/build/pubkey_membership/pubkey_membership.r1cs".into(),
5,
);
let sc_proof_phase1: CVSumCheckProof<13, 4> =
proof.r1cs_sat_proof.sc_proof_phase1.to_circuit_val();
let r1cs_sat_proof = &proof.r1cs_sat_proof;
let claims_phase2 = &r1cs_sat_proof.claims_phase2;
let mut inputs = Vec::new();
for i in 0..NUM_INPUTS {
inputs.push(input[(i * 32)..((i + 1) * 32)].try_into().unwrap());
}
let assignment_inputs = InputsAssignment::new(&inputs).unwrap();
let input = assignment_inputs
.assignment
.iter()
.map(|x| x.to_circuit_val())
.collect();
let gens = NIZKGens::new(NUM_CONSTRAINTS, NUM_VARS, NUM_INPUTS);
/*
verify_nizk(
&inst,
num_cons,
num_vars,
&assignment_inputs.assignment,
&proof,
&gens,
);
*/
let circuit = SPARTAN_ECDSA_CIRCUIT {
inst: inst.digest,
input,
comm_vars: r1cs_sat_proof.comm_vars.to_circuit_val(),
sc_proof_phase1: sc_proof_phase1,
sc_proof_phase2: r1cs_sat_proof.sc_proof_phase2.to_circuit_val(),
claims_phase2: (
Some(claims_phase2.0.to_circuit_val()),
Some(claims_phase2.1.to_circuit_val()),
Some(claims_phase2.2.to_circuit_val()),
Some(claims_phase2.3.to_circuit_val()),
),
pok_claims_phase2: (
r1cs_sat_proof.pok_claims_phase2.0.to_circuit_val(),
r1cs_sat_proof.pok_claims_phase2.1.to_circuit_val(),
),
proof_eq_sc_phase1: r1cs_sat_proof.proof_eq_sc_phase1.to_circuit_val(),
proof_eq_sc_phase2: r1cs_sat_proof.proof_eq_sc_phase2.to_circuit_val(),
comm_vars_at_ry: Some(r1cs_sat_proof.comm_vars_at_ry.to_circuit_val()),
proof_eval_vars_at_ry: r1cs_sat_proof.proof_eval_vars_at_ry.to_circuit_val(),
gens_pc_1: gens.gens_r1cs_sat.gens_pc.gens.gens_1.into(),
gens_pc_n: gens.gens_r1cs_sat.gens_pc.gens.gens_n.into(),
gens_sc_1: gens.gens_r1cs_sat.gens_sc.gens_1.into(),
gens_sc_3: gens.gens_r1cs_sat.gens_sc.gens_3.into(),
gens_sc_4: gens.gens_r1cs_sat.gens_sc.gens_4.into(),
};
circuit
}
fn tiny_circuit() -> HopliteCircuit<4, 1, 2, 1> {
// parameters of the R1CS instance
let num_cons = 1;
let num_vars = 0;
let num_inputs = 3;
// We will encode the above constraints into three matrices, where
// the coefficients in the matrix are in the little-endian byte order
let mut A: Vec<(usize, usize, [u8; 32])> = Vec::new(); // <row, column, value>
let mut B: Vec<(usize, usize, [u8; 32])> = Vec::new();
let mut C: Vec<(usize, usize, [u8; 32])> = Vec::new();
// Create a^2 + b + 13
A.push((0, num_vars + 2, Fq::one().to_bytes())); // 1*a
B.push((0, num_vars + 2, Fq::one().to_bytes())); // 1*a
C.push((0, num_vars + 1, Fq::one().to_bytes())); // 1*z
C.push((0, num_vars, (-Fq::from(13u64)).to_bytes())); // -13*1
C.push((0, num_vars + 3, (-Fq::one()).to_bytes())); // -1*b
// Var Assignments (Z_0 = 16 is the only output)
let vars = vec![Fq::zero().to_bytes(); num_vars];
// create an InputsAssignment (a = 1, b = 2)
let mut inputs = vec![Fq::zero().to_bytes(); num_inputs];
inputs[0] = Fq::from(16u64).to_bytes();
inputs[1] = Fq::from(1u64).to_bytes();
inputs[2] = Fq::from(2u64).to_bytes();
let assignment_inputs = InputsAssignment::new(&inputs).unwrap();
let assignment_vars = VarsAssignment::new(&vars).unwrap();
// Check if instance is satisfiable
let inst = Instance::new(num_cons, num_vars, num_inputs, &A, &B, &C).unwrap();
let res = inst.is_sat(&assignment_vars, &assignment_inputs);
assert!(res.unwrap(), "should be satisfied");
let gens = NIZKGens::new(num_cons, num_vars, num_inputs);
let mut prover_transcript = Transcript::new(b"test_verify");
let proof = NIZK::prove(
&inst,
assignment_vars,
&assignment_inputs,
&gens,
&mut prover_transcript,
);
verify_nizk::<1, 3>(&inst, &assignment_inputs.assignment, &proof, &gens);
// Verify the phase 1 zk-sumcheck proof
let sc_proof_phase1: CVSumCheckProof<1, 4> =
proof.r1cs_sat_proof.sc_proof_phase1.to_circuit_val();
let r1cs_sat_proof = &proof.r1cs_sat_proof;
let claims_phase2 = &r1cs_sat_proof.claims_phase2;
let input = assignment_inputs
.assignment
.iter()
.map(|x| x.to_circuit_val())
.collect();
let circuit = HopliteCircuit::<4, 1, 2, 1> {
inst: inst.digest,
input,
comm_vars: r1cs_sat_proof.comm_vars.to_circuit_val(),
sc_proof_phase1: sc_proof_phase1,
sc_proof_phase2: r1cs_sat_proof.sc_proof_phase2.to_circuit_val(),
claims_phase2: (
Some(claims_phase2.0.to_circuit_val()),
Some(claims_phase2.1.to_circuit_val()),
Some(claims_phase2.2.to_circuit_val()),
Some(claims_phase2.3.to_circuit_val()),
),
pok_claims_phase2: (
r1cs_sat_proof.pok_claims_phase2.0.to_circuit_val(),
r1cs_sat_proof.pok_claims_phase2.1.to_circuit_val(),
),
proof_eq_sc_phase1: r1cs_sat_proof.proof_eq_sc_phase1.to_circuit_val(),
proof_eq_sc_phase2: r1cs_sat_proof.proof_eq_sc_phase2.to_circuit_val(),
comm_vars_at_ry: Some(r1cs_sat_proof.comm_vars_at_ry.to_circuit_val()),
proof_eval_vars_at_ry: r1cs_sat_proof.proof_eval_vars_at_ry.to_circuit_val(),
gens_pc_1: gens.gens_r1cs_sat.gens_pc.gens.gens_1.into(),
gens_pc_n: gens.gens_r1cs_sat.gens_pc.gens.gens_n.into(),
gens_sc_1: gens.gens_r1cs_sat.gens_sc.gens_1.into(),
gens_sc_3: gens.gens_r1cs_sat.gens_sc.gens_3.into(),
gens_sc_4: gens.gens_r1cs_sat.gens_sc.gens_4.into(),
};
circuit
}
#[test]
fn test_tiny_prove() {
// Convert ZkSumCheckProof into a HopliteCircuit
let circuit = tiny_circuit();
let k = 12;
let prover = MockProver::<Fr>::run(k, &circuit, vec![vec![]]).unwrap();
assert_eq!(prover.verify(), Ok(()));
}
#[test]
fn test_spartan_ecdsa_mock_prove() {
let circuit = spartan_ecdsa_circuit();
let k = 21;
let prover = MockProver::<Fr>::run(k, &circuit, vec![vec![]]).unwrap();
assert_eq!(prover.verify(), Ok(()));
}
#[test]
fn test_spartan_ecdsa_prove() -> Result<(), Box<dyn std::error::Error>> {
let circuit = spartan_ecdsa_circuit();
let params_gen_timer = start_timer!(|| "Parameters generation");
let params = gen_srs(21);
end_timer!(params_gen_timer);
let vkey_gen_timer = start_timer!(|| "Verification key generation");
let vk = keygen_vk(&params, &circuit)?;
end_timer!(vkey_gen_timer);
let pkey_gen_timer = start_timer!(|| "Proving key generation");
let pk = keygen_pk(&params, vk, &circuit)?;
end_timer!(pkey_gen_timer);
let mut rng = OsRng;
let target = Secq256k1Affine::generator() * secpq_curves::Fp::one();
let x_limbs: Vec<Fr> =
decompose_biguint(&BigUint::from_bytes_le(&target.x.to_bytes()), 3, 88);
let y_limbs: Vec<Fr> =
decompose_biguint(&BigUint::from_bytes_le(&target.y.to_bytes()), 3, 88);
let instances = vec![x_limbs, y_limbs].concat();
let mut transcript = Blake2bWrite::<_, _, Challenge255<_>>::init(vec![]);
let proving_timer = start_timer!(|| "Proving");
create_proof::<
KZGCommitmentScheme<Bn256>,
ProverSHPLONK<'_, Bn256>,
Challenge255<G1Affine>,
_,
Blake2bWrite<Vec<u8>, G1Affine, Challenge255<_>>,
_,
>(
&params,
&pk,
&[circuit],
&[&[instances.as_slice()]],
&mut rng,
&mut transcript,
)
.expect("prover should not fail");
let proof = transcript.finalize();
end_timer!(proving_timer);
println!("proof size: {}", proof.len());
let mut verifier_transcript = Blake2bRead::<_, G1Affine, Challenge255<_>>::init(&proof[..]);
let strategy = SingleStrategy::new(&params);
let verifier_params: ParamsVerifierKZG<Bn256> = params.verifier_params().clone();
verify_proof::<
KZGCommitmentScheme<Bn256>,
VerifierSHPLONK<'_, Bn256>,
Challenge255<G1Affine>,
Blake2bRead<&[u8], G1Affine, Challenge255<G1Affine>>,
SingleStrategy<'_, Bn256>,
>(
&verifier_params,
pk.get_vk(),
strategy,
&[&[instances.as_slice()]],
&mut verifier_transcript,
)
.expect("failed to verify bench circuit");
Ok(())
}
}

View File

@@ -0,0 +1,59 @@
use halo2_base::utils::PrimeField;
use halo2_ecc::bigint::CRTInteger;
use halo2_ecc::ecc::EcPoint;
use halo2_proofs::circuit::Value;
use libspartan::{
group::CompressedGroup,
transcript::{ProofTranscript, Transcript},
};
// TODO: Turn this into a transcript chip
pub trait HopliteTranscript<'v, F: PrimeField> {
fn append_circuit_point(&mut self, label: &'static [u8], point: EcPoint<F, CRTInteger<'v, F>>);
fn append_circuit_fq(&mut self, label: &'static [u8], fe: CRTInteger<'v, F>);
}
impl<'v, F: PrimeField> HopliteTranscript<'v, F> for Transcript {
fn append_circuit_point(
&mut self,
label: &'static [u8],
circuit_point: EcPoint<F, CRTInteger<'v, F>>,
) {
let mut x = [0u8; 32];
let _x = circuit_point.x.value.and_then(|val| {
let mut x_bytes = val.to_bytes_be().1;
x_bytes.resize(32, 0);
x = x_bytes.try_into().unwrap();
Value::known(val)
});
let mut y = [0u8; 32];
let _y = circuit_point.y.value.and_then(|val| {
let mut y_bytes = val.to_bytes_be().1;
y_bytes.resize(32, 0);
y = y_bytes.try_into().unwrap();
Value::known(val)
});
let point = if (x == [0u8; 32]) && (y == [0u8; 32]) {
CompressedGroup::identity()
} else {
CompressedGroup::from_affine_coordinates(&x.into(), &y.into(), true)
};
self.append_point(label, &point);
}
fn append_circuit_fq(&mut self, label: &'static [u8], fe: CRTInteger<'v, F>) {
// TODO: Not sure if this works!
let mut bytes = [0u8; 32];
let _ = fe.value.and_then(|val| {
let mut bytes_be = val.to_bytes_be().1;
bytes_be.resize(32, 0);
bytes = bytes_be.try_into().unwrap();
Value::known(val)
});
self.append_message(label, &bytes);
}
}

View File

@@ -1,6 +1,6 @@
{
"name": "@personaelabs/spartan-ecdsa",
"version": "1.0.2",
"version": "2.0.0",
"main": "./build/lib.js",
"types": "./build/lib.d.ts",
"license": "MIT",

View File

@@ -109,7 +109,7 @@ export class PublicInput {
/**
* Compute the group elements T and U for efficient ecdsa
* http://localhost:1313/posts/efficient-ecdsa-1/
* https://personaelabs.org/posts/efficient-ecdsa-1/
*/
export const computeEffEcdsaPubInput = (
r: bigint,

View File

@@ -20,6 +20,14 @@ export class Tree {
this.treeInner.insert(leaf);
}
delete(index: number) {
this.treeInner.delete(index);
}
leaves(): bigint[] {
return this.treeInner.leaves;
}
root(): bigint {
return this.treeInner.root;
}

View File

@@ -1,7 +1,6 @@
let wasm;
const heap = new Array(32).fill(undefined);
const heap = new Array(128).fill(undefined);
heap.push(undefined, null, true, false);
@@ -10,7 +9,7 @@ function getObject(idx) { return heap[idx]; }
let heap_next = heap.length;
function dropObject(idx) {
if (idx < 36) return;
if (idx < 132) return;
heap[idx] = heap_next;
heap_next = idx;
}
@@ -25,10 +24,10 @@ const cachedTextDecoder = new TextDecoder('utf-8', { ignoreBOM: true, fatal: tru
cachedTextDecoder.decode();
let cachedUint8Memory0 = new Uint8Array();
let cachedUint8Memory0 = null;
function getUint8Memory0() {
if (cachedUint8Memory0.buffer !== wasm.memory.buffer) {
if (cachedUint8Memory0 === null || cachedUint8Memory0.buffer !== wasm.memory.buffer) {
cachedUint8Memory0 = new Uint8Array(wasm.memory.buffer);
}
return cachedUint8Memory0;
@@ -61,10 +60,10 @@ function passArray8ToWasm0(arg, malloc) {
return ptr;
}
let cachedInt32Memory0 = new Int32Array();
let cachedInt32Memory0 = null;
function getInt32Memory0() {
if (cachedInt32Memory0.buffer !== wasm.memory.buffer) {
if (cachedInt32Memory0 === null || cachedInt32Memory0.buffer !== wasm.memory.buffer) {
cachedInt32Memory0 = new Int32Array(wasm.memory.buffer);
}
return cachedInt32Memory0;
@@ -248,6 +247,15 @@ async function load(module, imports) {
function getImports() {
const imports = {};
imports.wbg = {};
imports.wbg.__wbg_randomFillSync_6894564c2c334c42 = function() { return handleError(function (arg0, arg1, arg2) {
getObject(arg0).randomFillSync(getArrayU8FromWasm0(arg1, arg2));
}, arguments) };
imports.wbg.__wbindgen_object_drop_ref = function(arg0) {
takeObject(arg0);
};
imports.wbg.__wbg_getRandomValues_805f1c3d65988a5a = function() { return handleError(function (arg0, arg1) {
getObject(arg0).getRandomValues(getObject(arg1));
}, arguments) };
imports.wbg.__wbg_crypto_e1d53a1d73fb10b8 = function(arg0) {
const ret = getObject(arg0).crypto;
return addHeapObject(ret);
@@ -273,9 +281,6 @@ function getImports() {
const ret = typeof(getObject(arg0)) === 'string';
return ret;
};
imports.wbg.__wbindgen_object_drop_ref = function(arg0) {
takeObject(arg0);
};
imports.wbg.__wbg_msCrypto_6e7d3e1f92610cbb = function(arg0) {
const ret = getObject(arg0).msCrypto;
return addHeapObject(ret);
@@ -292,17 +297,11 @@ function getImports() {
const ret = getStringFromWasm0(arg0, arg1);
return addHeapObject(ret);
};
imports.wbg.__wbg_getRandomValues_805f1c3d65988a5a = function() { return handleError(function (arg0, arg1) {
getObject(arg0).getRandomValues(getObject(arg1));
}, arguments) };
imports.wbg.__wbg_randomFillSync_6894564c2c334c42 = function() { return handleError(function (arg0, arg1, arg2) {
getObject(arg0).randomFillSync(getArrayU8FromWasm0(arg1, arg2));
}, arguments) };
imports.wbg.__wbg_newnoargs_b5b063fc6c2f0376 = function(arg0, arg1) {
imports.wbg.__wbg_newnoargs_2b8b6bd7753c76ba = function(arg0, arg1) {
const ret = new Function(getStringFromWasm0(arg0, arg1));
return addHeapObject(ret);
};
imports.wbg.__wbg_call_97ae9d8645dc388b = function() { return handleError(function (arg0, arg1) {
imports.wbg.__wbg_call_95d1ea488d03e4e8 = function() { return handleError(function (arg0, arg1) {
const ret = getObject(arg0).call(getObject(arg1));
return addHeapObject(ret);
}, arguments) };
@@ -310,19 +309,19 @@ function getImports() {
const ret = getObject(arg0);
return addHeapObject(ret);
};
imports.wbg.__wbg_self_6d479506f72c6a71 = function() { return handleError(function () {
imports.wbg.__wbg_self_e7c1f827057f6584 = function() { return handleError(function () {
const ret = self.self;
return addHeapObject(ret);
}, arguments) };
imports.wbg.__wbg_window_f2557cc78490aceb = function() { return handleError(function () {
imports.wbg.__wbg_window_a09ec664e14b1b81 = function() { return handleError(function () {
const ret = window.window;
return addHeapObject(ret);
}, arguments) };
imports.wbg.__wbg_globalThis_7f206bda628d5286 = function() { return handleError(function () {
imports.wbg.__wbg_globalThis_87cbb8506fecf3a9 = function() { return handleError(function () {
const ret = globalThis.globalThis;
return addHeapObject(ret);
}, arguments) };
imports.wbg.__wbg_global_ba75c50d1cf384f4 = function() { return handleError(function () {
imports.wbg.__wbg_global_c85a9259e621f3db = function() { return handleError(function () {
const ret = global.global;
return addHeapObject(ret);
}, arguments) };
@@ -330,30 +329,30 @@ function getImports() {
const ret = getObject(arg0) === undefined;
return ret;
};
imports.wbg.__wbg_call_168da88779e35f61 = function() { return handleError(function (arg0, arg1, arg2) {
imports.wbg.__wbg_call_9495de66fdbe016b = function() { return handleError(function (arg0, arg1, arg2) {
const ret = getObject(arg0).call(getObject(arg1), getObject(arg2));
return addHeapObject(ret);
}, arguments) };
imports.wbg.__wbg_buffer_3f3d764d4747d564 = function(arg0) {
imports.wbg.__wbg_buffer_cf65c07de34b9a08 = function(arg0) {
const ret = getObject(arg0).buffer;
return addHeapObject(ret);
};
imports.wbg.__wbg_new_8c3f0052272a457a = function(arg0) {
imports.wbg.__wbg_new_537b7341ce90bb31 = function(arg0) {
const ret = new Uint8Array(getObject(arg0));
return addHeapObject(ret);
};
imports.wbg.__wbg_set_83db9690f9353e79 = function(arg0, arg1, arg2) {
imports.wbg.__wbg_set_17499e8aa4003ebd = function(arg0, arg1, arg2) {
getObject(arg0).set(getObject(arg1), arg2 >>> 0);
};
imports.wbg.__wbg_length_9e1ae1900cb0fbd5 = function(arg0) {
imports.wbg.__wbg_length_27a2afe8ab42b09f = function(arg0) {
const ret = getObject(arg0).length;
return ret;
};
imports.wbg.__wbg_newwithlength_f5933855e4f48a19 = function(arg0) {
imports.wbg.__wbg_newwithlength_b56c882b57805732 = function(arg0) {
const ret = new Uint8Array(arg0 >>> 0);
return addHeapObject(ret);
};
imports.wbg.__wbg_subarray_58ad4efbb5bcb886 = function(arg0, arg1, arg2) {
imports.wbg.__wbg_subarray_7526649b91a252a6 = function(arg0, arg1, arg2) {
const ret = getObject(arg0).subarray(arg1 >>> 0, arg2 >>> 0);
return addHeapObject(ret);
};
@@ -393,8 +392,8 @@ function initMemory(imports, maybe_memory) {
function finalizeInit(instance, module) {
wasm = instance.exports;
init.__wbindgen_wasm_module = module;
cachedInt32Memory0 = new Int32Array();
cachedUint8Memory0 = new Uint8Array();
cachedInt32Memory0 = null;
cachedUint8Memory0 = null;
wasm.__wbindgen_start();
return wasm;

View File

@@ -11,7 +11,7 @@ crate-type = ["cdylib", "rlib"]
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
spartan = { git = "https://github.com/DanTehrani/Spartan-secq.git", branch="master" }
spartan = { git = "https://github.com/DanTehrani/Spartan-secq.git", branch="hoplite" }
wasm-bindgen = { version = "0.2.81", features = ["serde-serialize"]}
console_error_panic_hook = "0.1.7"
merlin = "3.0.0"
@@ -32,14 +32,3 @@ poseidon = { path = "../poseidon" }
itertools = "0.9.0"
group = "0.12.0"
# Do not compile these dependencies when targeting wasm
#[target.'cfg(not(target_family = "wasm"))'.dependencies]
#nova-scotia = { git = "https://github.com/DanTehrani/Nova-Scotia.git" }
#nova-snark = "0.9.0"
#ff = "0.12.1"
#ark-std = { version = "0.3.0", features = ["print-trace"] }
[[bin]]
name = "gen_spartan_inst"
path = "src/bin/gen_spartan_inst.rs"

View File

@@ -1,4 +1 @@
pub mod wasm;
#[cfg(not(target_family = "wasm"))]
pub mod circom_reader;

2
scripts/test.sh Normal file
View File

@@ -0,0 +1,2 @@
cargo test --release &&
yarn lerna run test