Update ark-circom for arkworks 0.4.0 (#43)

This commit is contained in:
Deepak Maram
2023-03-16 17:42:33 -04:00
committed by GitHub
parent 35ce5a909e
commit b892c62597
15 changed files with 718 additions and 732 deletions

1133
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -12,14 +12,15 @@ num-traits = { version = "0.2.0", default-features = false }
num-bigint = { version = "0.4", default-features = false, features = ["rand"] }
# ZKP Generation
ark-ec = { version = "0.3.0", default-features = false, features = ["parallel"] }
ark-ff = { version = "0.3.0", default-features = false, features = ["parallel", "asm"] }
ark-std = { version = "0.3.0", default-features = false, features = ["parallel"] }
ark-bn254 = { version = "0.3.0" }
ark-groth16 = { git = "https://github.com/arkworks-rs/groth16", rev = "765817f", features = ["parallel"] }
ark-poly = { version = "^0.3.0", default-features = false, features = ["parallel"] }
ark-relations = { version = "0.3.0", default-features = false }
ark-serialize = { version = "0.3.0", default-features = false }
ark-crypto-primitives = { version = "0.4.0" }
ark-ec = { version = "0.4.1", default-features = false, features = ["parallel"] }
ark-ff = { version = "0.4.1", default-features = false, features = ["parallel", "asm"] }
ark-std = { version = "0.4.0", default-features = false, features = ["parallel"] }
ark-bn254 = { version = "0.4.0" }
ark-groth16 = { version = "0.4.0", features = ["parallel"] }
ark-poly = { version = "0.4.1", default-features = false, features = ["parallel"] }
ark-relations = { version = "0.4.0", default-features = false }
ark-serialize = { version = "0.4.1", default-features = false }
# decoding of data
hex = "0.4.3"
@@ -49,4 +50,4 @@ harness = false
bench-complex-all = []
circom-2 = []
ethereum = ["ethers-core"]
default = ["ethereum"]
default = ["circom-2", "ethereum"]

View File

@@ -35,7 +35,7 @@ let circom = builder.setup();
// Run a trusted setup
let mut rng = thread_rng();
let params = generate_random_parameters::<Bn254, _, _>(circom, &mut rng)?;
let params = generate_random_parameters_with_reduction(circom, &mut rng)?;
// Get the populated instance of the circuit with the witness
let circom = builder.build()?;
@@ -43,11 +43,11 @@ let circom = builder.build()?;
let inputs = circom.get_public_inputs().unwrap();
// Generate the proof
let proof = prove(circom, &params, &mut rng)?;
let proof = prove(&params, circom, &mut rng)?;
// Check that the proof is valid
let pvk = prepare_verifying_key(&params.vk);
let verified = verify_proof(&pvk, &proof, &inputs)?;
let pvk = process_vk(&params.vk)?;
let verified = verify_with_processed_vk(&pvk, &inputs, &proof)?;
assert!(verified);
```

View File

@@ -1,10 +1,11 @@
use ark_crypto_primitives::snark::SNARK;
use criterion::{black_box, criterion_group, criterion_main, Criterion};
use ark_circom::{read_zkey, CircomReduction, WitnessCalculator};
use ark_std::rand::thread_rng;
use ark_bn254::Bn254;
use ark_groth16::{create_proof_with_reduction_and_matrices, prepare_verifying_key, verify_proof};
use ark_groth16::Groth16;
use std::{collections::HashMap, fs::File};
@@ -15,7 +16,7 @@ fn bench_groth(c: &mut Criterion, num_validators: u32, num_constraints: u32) {
"./test-vectors/complex-circuit/complex-circuit-{}-{}.zkey",
i, j
);
let mut file = File::open(&path).unwrap();
let mut file = File::open(path).unwrap();
let (params, matrices) = read_zkey(&mut file).unwrap();
let num_inputs = matrices.num_instance_variables;
let num_constraints = matrices.num_constraints;
@@ -28,7 +29,7 @@ fn bench_groth(c: &mut Criterion, num_validators: u32, num_constraints: u32) {
inputs
};
let mut wtns = WitnessCalculator::new(&format!(
let mut wtns = WitnessCalculator::new(format!(
"./test-vectors/complex-circuit/complex-circuit-{}-{}.wasm",
i, j
))
@@ -44,7 +45,7 @@ fn bench_groth(c: &mut Criterion, num_validators: u32, num_constraints: u32) {
let r = ark_bn254::Fr::rand(rng);
let s = ark_bn254::Fr::rand(rng);
let proof = create_proof_with_reduction_and_matrices::<_, CircomReduction>(
let proof = Groth16::<Bn254, CircomReduction>::create_proof_with_reduction_and_matrices(
&params,
r,
s,
@@ -55,16 +56,16 @@ fn bench_groth(c: &mut Criterion, num_validators: u32, num_constraints: u32) {
)
.unwrap();
let pvk = prepare_verifying_key(&params.vk);
let pvk = Groth16::<Bn254>::process_vk(&params.vk).unwrap();
let inputs = &full_assignment[1..num_inputs];
let verified = verify_proof(&pvk, &proof, inputs).unwrap();
let verified = Groth16::<Bn254>::verify_with_processed_vk(&pvk, inputs, &proof).unwrap();
assert!(verified);
c.bench_function(&format!("groth proof {} {}", i, j), |b| {
b.iter(|| {
black_box(
create_proof_with_reduction_and_matrices::<_, CircomReduction>(
Groth16::<Bn254, CircomReduction>::create_proof_with_reduction_and_matrices(
&params,
r,
s,

View File

@@ -1,4 +1,4 @@
use ark_ec::PairingEngine;
use ark_ec::pairing::Pairing;
use std::{fs::File, path::Path};
use super::{CircomCircuit, R1CS};
@@ -10,20 +10,20 @@ use crate::{circom::R1CSFile, witness::WitnessCalculator};
use color_eyre::Result;
#[derive(Clone, Debug)]
pub struct CircomBuilder<E: PairingEngine> {
pub struct CircomBuilder<E: Pairing> {
pub cfg: CircomConfig<E>,
pub inputs: HashMap<String, Vec<BigInt>>,
}
// Add utils for creating this from files / directly from bytes
#[derive(Clone, Debug)]
pub struct CircomConfig<E: PairingEngine> {
pub struct CircomConfig<E: Pairing> {
pub r1cs: R1CS<E>,
pub wtns: WitnessCalculator,
pub sanity_check: bool,
}
impl<E: PairingEngine> CircomConfig<E> {
impl<E: Pairing> CircomConfig<E> {
pub fn new(wtns: impl AsRef<Path>, r1cs: impl AsRef<Path>) -> Result<Self> {
let wtns = WitnessCalculator::new(wtns).unwrap();
let reader = File::open(r1cs)?;
@@ -36,7 +36,7 @@ impl<E: PairingEngine> CircomConfig<E> {
}
}
impl<E: PairingEngine> CircomBuilder<E> {
impl<E: Pairing> CircomBuilder<E> {
/// Instantiates a new builder using the provided WitnessGenerator and R1CS files
/// for your circuit
pub fn new(cfg: CircomConfig<E>) -> Self {
@@ -81,7 +81,7 @@ impl<E: PairingEngine> CircomBuilder<E> {
// sanity check
debug_assert!({
use ark_relations::r1cs::{ConstraintSynthesizer, ConstraintSystem};
let cs = ConstraintSystem::<E::Fr>::new_ref();
let cs = ConstraintSystem::<E::ScalarField>::new_ref();
circom.clone().generate_constraints(cs.clone()).unwrap();
let is_satisfied = cs.is_satisfied().unwrap();
if !is_satisfied {

View File

@@ -1,4 +1,4 @@
use ark_ec::PairingEngine;
use ark_ec::pairing::Pairing;
use ark_relations::r1cs::{
ConstraintSynthesizer, ConstraintSystemRef, LinearCombination, SynthesisError, Variable,
};
@@ -8,13 +8,13 @@ use super::R1CS;
use color_eyre::Result;
#[derive(Clone, Debug)]
pub struct CircomCircuit<E: PairingEngine> {
pub struct CircomCircuit<E: Pairing> {
pub r1cs: R1CS<E>,
pub witness: Option<Vec<E::Fr>>,
pub witness: Option<Vec<E::ScalarField>>,
}
impl<E: PairingEngine> CircomCircuit<E> {
pub fn get_public_inputs(&self) -> Option<Vec<E::Fr>> {
impl<E: Pairing> CircomCircuit<E> {
pub fn get_public_inputs(&self) -> Option<Vec<E::ScalarField>> {
match &self.witness {
None => None,
Some(w) => match &self.r1cs.wire_mapping {
@@ -25,8 +25,11 @@ impl<E: PairingEngine> CircomCircuit<E> {
}
}
impl<E: PairingEngine> ConstraintSynthesizer<E::Fr> for CircomCircuit<E> {
fn generate_constraints(self, cs: ConstraintSystemRef<E::Fr>) -> Result<(), SynthesisError> {
impl<E: Pairing> ConstraintSynthesizer<E::ScalarField> for CircomCircuit<E> {
fn generate_constraints(
self,
cs: ConstraintSystemRef<E::ScalarField>,
) -> Result<(), SynthesisError> {
let witness = &self.witness;
let wire_mapping = &self.r1cs.wire_mapping;
@@ -34,7 +37,7 @@ impl<E: PairingEngine> ConstraintSynthesizer<E::Fr> for CircomCircuit<E> {
for i in 1..self.r1cs.num_inputs {
cs.new_input_variable(|| {
Ok(match witness {
None => E::Fr::from(1u32),
None => E::ScalarField::from(1u32),
Some(w) => match wire_mapping {
Some(m) => w[m[i]],
None => w[i],
@@ -46,7 +49,7 @@ impl<E: PairingEngine> ConstraintSynthesizer<E::Fr> for CircomCircuit<E> {
for i in 0..self.r1cs.num_aux {
cs.new_witness_variable(|| {
Ok(match witness {
None => E::Fr::from(1u32),
None => E::ScalarField::from(1u32),
Some(w) => match wire_mapping {
Some(m) => w[m[i + self.r1cs.num_inputs]],
None => w[i + self.r1cs.num_inputs],
@@ -62,10 +65,12 @@ impl<E: PairingEngine> ConstraintSynthesizer<E::Fr> for CircomCircuit<E> {
Variable::Witness(index - self.r1cs.num_inputs)
}
};
let make_lc = |lc_data: &[(usize, E::Fr)]| {
let make_lc = |lc_data: &[(usize, E::ScalarField)]| {
lc_data.iter().fold(
LinearCombination::<E::Fr>::zero(),
|lc: LinearCombination<E::Fr>, (index, coeff)| lc + (*coeff, make_index(*index)),
LinearCombination::<E::ScalarField>::zero(),
|lc: LinearCombination<E::ScalarField>, (index, coeff)| {
lc + (*coeff, make_index(*index))
},
)
};

View File

@@ -1,4 +1,4 @@
use ark_ec::PairingEngine;
use ark_ec::pairing::Pairing;
pub mod r1cs_reader;
pub use r1cs_reader::{R1CSFile, R1CS};
@@ -13,4 +13,4 @@ mod qap;
pub use qap::CircomReduction;
pub type Constraints<E> = (ConstraintVec<E>, ConstraintVec<E>, ConstraintVec<E>);
pub type ConstraintVec<E> = Vec<(usize, <E as PairingEngine>::Fr)>;
pub type ConstraintVec<E> = Vec<(usize, <E as Pairing>::ScalarField)>;

View File

@@ -1,5 +1,5 @@
use ark_ff::PrimeField;
use ark_groth16::r1cs_to_qap::{evaluate_constraint, LibsnarkReduction, R1CStoQAP};
use ark_groth16::r1cs_to_qap::{evaluate_constraint, LibsnarkReduction, R1CSToQAP};
use ark_poly::EvaluationDomain;
use ark_relations::r1cs::{ConstraintMatrices, ConstraintSystemRef, SynthesisError};
use ark_std::{cfg_into_iter, cfg_iter, cfg_iter_mut, vec};
@@ -11,7 +11,7 @@ use ark_std::{cfg_into_iter, cfg_iter, cfg_iter_mut, vec};
/// in that domain. This serves as HZ when computing the C proof element.
pub struct CircomReduction;
impl R1CStoQAP for CircomReduction {
impl R1CSToQAP for CircomReduction {
#[allow(clippy::type_complexity)]
fn instance_map_with_evaluation<F: PrimeField, D: EvaluationDomain<F>>(
cs: ConstraintSystemRef<F>,

View File

@@ -2,18 +2,20 @@
//! Copied from <https://github.com/poma/zkutil>
//! Spec: <https://github.com/iden3/r1csfile/blob/master/doc/r1cs_bin_format.md>
use byteorder::{LittleEndian, ReadBytesExt};
use std::io::{Error, ErrorKind, Result};
use std::io::{Error, ErrorKind};
use ark_ec::PairingEngine;
use ark_ff::FromBytes;
use ark_ec::pairing::Pairing;
use ark_serialize::{CanonicalDeserialize, SerializationError, SerializationError::IoError};
use ark_std::io::{Read, Seek, SeekFrom};
use std::collections::HashMap;
type IoResult<T> = Result<T, SerializationError>;
use super::{ConstraintVec, Constraints};
#[derive(Clone, Debug)]
pub struct R1CS<E: PairingEngine> {
pub struct R1CS<E: Pairing> {
pub num_inputs: usize,
pub num_aux: usize,
pub num_variables: usize,
@@ -21,7 +23,7 @@ pub struct R1CS<E: PairingEngine> {
pub wire_mapping: Option<Vec<usize>>,
}
impl<E: PairingEngine> From<R1CSFile<E>> for R1CS<E> {
impl<E: Pairing> From<R1CSFile<E>> for R1CS<E> {
fn from(file: R1CSFile<E>) -> Self {
let num_inputs = (1 + file.header.n_pub_in + file.header.n_pub_out) as usize;
let num_variables = file.header.n_wires as usize;
@@ -36,30 +38,35 @@ impl<E: PairingEngine> From<R1CSFile<E>> for R1CS<E> {
}
}
pub struct R1CSFile<E: PairingEngine> {
pub struct R1CSFile<E: Pairing> {
pub version: u32,
pub header: Header,
pub constraints: Vec<Constraints<E>>,
pub wire_mapping: Vec<u64>,
}
impl<E: PairingEngine> R1CSFile<E> {
impl<E: Pairing> R1CSFile<E> {
/// reader must implement the Seek trait, for example with a Cursor
///
/// ```rust,ignore
/// let reader = BufReader::new(Cursor::new(&data[..]));
/// ```
pub fn new<R: Read + Seek>(mut reader: R) -> Result<R1CSFile<E>> {
pub fn new<R: Read + Seek>(mut reader: R) -> IoResult<R1CSFile<E>> {
let mut magic = [0u8; 4];
reader.read_exact(&mut magic)?;
if magic != [0x72, 0x31, 0x63, 0x73] {
// magic = "r1cs"
return Err(Error::new(ErrorKind::InvalidData, "Invalid magic number"));
return Err(IoError(Error::new(
ErrorKind::InvalidData,
"Invalid magic number",
)));
}
let version = reader.read_u32::<LittleEndian>()?;
if version != 1 {
return Err(Error::new(ErrorKind::InvalidData, "Unsupported version"));
return Err(IoError(Error::new(
ErrorKind::InvalidData,
"Unsupported version",
)));
}
let num_sections = reader.read_u32::<LittleEndian>()?;
@@ -151,20 +158,20 @@ pub struct Header {
}
impl Header {
fn new<R: Read>(mut reader: R, size: u64) -> Result<Header> {
fn new<R: Read>(mut reader: R, size: u64) -> IoResult<Header> {
let field_size = reader.read_u32::<LittleEndian>()?;
if field_size != 32 {
return Err(Error::new(
return Err(IoError(Error::new(
ErrorKind::InvalidData,
"This parser only supports 32-byte fields",
));
)));
}
if size != 32 + field_size as u64 {
return Err(Error::new(
return Err(IoError(Error::new(
ErrorKind::InvalidData,
"Invalid header section size",
));
)));
}
let mut prime_size = vec![0u8; field_size as usize];
@@ -174,10 +181,10 @@ impl Header {
!= hex::decode("010000f093f5e1439170b97948e833285d588181b64550b829a031e1724e6430")
.unwrap()
{
return Err(Error::new(
return Err(IoError(Error::new(
ErrorKind::InvalidData,
"This parser only supports bn256",
));
)));
}
Ok(Header {
@@ -193,22 +200,22 @@ impl Header {
}
}
fn read_constraint_vec<R: Read, E: PairingEngine>(mut reader: R) -> Result<ConstraintVec<E>> {
fn read_constraint_vec<R: Read, E: Pairing>(mut reader: R) -> IoResult<ConstraintVec<E>> {
let n_vec = reader.read_u32::<LittleEndian>()? as usize;
let mut vec = Vec::with_capacity(n_vec);
for _ in 0..n_vec {
vec.push((
reader.read_u32::<LittleEndian>()? as usize,
E::Fr::read(&mut reader)?,
E::ScalarField::deserialize_uncompressed(&mut reader)?,
));
}
Ok(vec)
}
fn read_constraints<R: Read, E: PairingEngine>(
fn read_constraints<R: Read, E: Pairing>(
mut reader: R,
header: &Header,
) -> Result<Vec<Constraints<E>>> {
) -> IoResult<Vec<Constraints<E>>> {
// todo check section size
let mut vec = Vec::with_capacity(header.n_constraints as usize);
for _ in 0..header.n_constraints {
@@ -221,22 +228,22 @@ fn read_constraints<R: Read, E: PairingEngine>(
Ok(vec)
}
fn read_map<R: Read>(mut reader: R, size: u64, header: &Header) -> Result<Vec<u64>> {
fn read_map<R: Read>(mut reader: R, size: u64, header: &Header) -> IoResult<Vec<u64>> {
if size != header.n_wires as u64 * 8 {
return Err(Error::new(
return Err(IoError(Error::new(
ErrorKind::InvalidData,
"Invalid map section size",
));
)));
}
let mut vec = Vec::with_capacity(header.n_wires as usize);
for _ in 0..header.n_wires {
vec.push(reader.read_u64::<LittleEndian>()?);
}
if vec[0] != 0 {
return Err(Error::new(
return Err(IoError(Error::new(
ErrorKind::InvalidData,
"Wire 0 should always be mapped to 0",
));
)));
}
Ok(vec)
}

View File

@@ -1,10 +1,11 @@
//! Helpers for converting Arkworks types to U256-tuples as expected by the
//! Solidity Groth16 Verifier smart contracts
use ark_ff::{BigInteger, FromBytes, PrimeField};
use ark_ff::{BigInteger, PrimeField};
use ethers_core::types::U256;
use num_traits::Zero;
use ark_bn254::{Bn254, Fq, Fq2, Fr, G1Affine, G2Affine};
use ark_serialize::CanonicalDeserialize;
pub struct Inputs(pub Vec<U256>);
@@ -26,8 +27,11 @@ impl From<G1> for G1Affine {
fn from(src: G1) -> G1Affine {
let x: Fq = u256_to_point(src.x);
let y: Fq = u256_to_point(src.y);
let inf = x.is_zero() && y.is_zero();
G1Affine::new(x, y, inf)
if x.is_zero() && y.is_zero() {
G1Affine::identity()
} else {
G1Affine::new(x, y)
}
}
}
@@ -64,8 +68,11 @@ impl From<G2> for G2Affine {
let c1 = u256_to_point(src.y[1]);
let y = Fq2::new(c0, c1);
let inf = x.is_zero() && y.is_zero();
G2Affine::new(x, y, inf)
if x.is_zero() && y.is_zero() {
G2Affine::identity()
} else {
G2Affine::new(x, y)
}
}
}
@@ -169,14 +176,14 @@ impl From<VerifyingKey> for ark_groth16::VerifyingKey<Bn254> {
fn u256_to_point<F: PrimeField>(point: U256) -> F {
let mut buf = [0; 32];
point.to_little_endian(&mut buf);
let bigint = F::BigInt::read(&buf[..]).expect("always works");
F::from_repr(bigint).expect("alwasy works")
let bigint = F::BigInt::deserialize_uncompressed(&buf[..]).expect("always works");
F::from_bigint(bigint).expect("always works")
}
// Helper for converting a PrimeField to its U256 representation for Ethereum compatibility
// (U256 reads data as big endian)
fn point_to_u256<F: PrimeField>(point: F) -> U256 {
let point = point.into_repr();
let point = point.into_bigint();
let point_bytes = point.to_bytes_be();
U256::from(&point_bytes[..])
}
@@ -185,25 +192,24 @@ fn point_to_u256<F: PrimeField>(point: F) -> U256 {
mod tests {
use super::*;
use ark_bn254::Fq;
use ark_std::UniformRand;
fn fq() -> Fq {
Fq::from(2)
}
fn fq2() -> Fq2 {
Fq2::from(2)
}
fn fr() -> Fr {
Fr::from(2)
}
fn g1() -> G1Affine {
G1Affine::new(fq(), fq(), false)
let rng = &mut ark_std::test_rng();
G1Affine::rand(rng)
}
fn g2() -> G2Affine {
G2Affine::new(fq2(), fq2(), false)
let rng = &mut ark_std::test_rng();
G2Affine::rand(rng)
}
#[test]

View File

@@ -1,10 +1,12 @@
//! Safe-ish interface for reading and writing specific types to the WASM runtime's memory
use ark_serialize::CanonicalDeserialize;
use num_traits::ToPrimitive;
use wasmer::{Memory, MemoryView};
// TODO: Decide whether we want Ark here or if it should use a generic BigInt package
use ark_bn254::FrParameters;
use ark_ff::{BigInteger, BigInteger256, FpParameters, FromBytes, Zero};
use ark_bn254::FrConfig;
use ark_ff::MontConfig;
use ark_ff::{BigInteger, BigInteger256, Zero};
use num_bigint::{BigInt, BigUint};
@@ -38,7 +40,7 @@ impl SafeMemory {
let short_max = BigInt::from(0x8000_0000u64);
let short_min = BigInt::from_biguint(
num_bigint::Sign::NoSign,
BigUint::try_from(FrParameters::MODULUS).unwrap(),
BigUint::try_from(FrConfig::MODULUS).unwrap(),
) - &short_max;
let r_inv = BigInt::from_str(
"9915499612839321149637521777990102151350674507940716049588462388200839649614",
@@ -188,7 +190,7 @@ impl SafeMemory {
let buf = &buf[ptr..ptr + num_bytes * 32];
// TODO: Is there a better way to read big integers?
let big = BigInteger256::read(buf).unwrap();
let big = BigInteger256::deserialize_uncompressed(buf).unwrap();
let big = BigUint::try_from(big).unwrap();
Ok(big.into())
}

View File

@@ -255,16 +255,16 @@ impl WitnessCalculator {
}
pub fn calculate_witness_element<
E: ark_ec::PairingEngine,
E: ark_ec::pairing::Pairing,
I: IntoIterator<Item = (String, Vec<BigInt>)>,
>(
&mut self,
inputs: I,
sanity_check: bool,
) -> Result<Vec<E::Fr>> {
use ark_ff::{FpParameters, PrimeField};
) -> Result<Vec<E::ScalarField>> {
use ark_ff::PrimeField;
let witness = self.calculate_witness(inputs, sanity_check)?;
let modulus = <<E::Fr as PrimeField>::Params as FpParameters>::MODULUS;
let modulus = <E::ScalarField as PrimeField>::MODULUS;
// convert it to field elements
use num_traits::Signed;
@@ -277,7 +277,7 @@ impl WitnessCalculator {
} else {
w.to_biguint().unwrap()
};
E::Fr::from(w)
E::ScalarField::from(w)
})
.collect::<Vec<_>>();
@@ -421,7 +421,7 @@ mod tests {
#[test]
fn safe_multipler() {
let witness =
std::fs::read_to_string(&root_path("test-vectors/safe-circuit-witness.json")).unwrap();
std::fs::read_to_string(root_path("test-vectors/safe-circuit-witness.json")).unwrap();
let witness: Vec<String> = serde_json::from_str(&witness).unwrap();
let witness = &witness.iter().map(|x| x.as_ref()).collect::<Vec<_>>();
run_test(TestCase {
@@ -436,7 +436,7 @@ mod tests {
#[test]
fn smt_verifier() {
let witness =
std::fs::read_to_string(&root_path("test-vectors/smtverifier10-witness.json")).unwrap();
std::fs::read_to_string(root_path("test-vectors/smtverifier10-witness.json")).unwrap();
let witness: Vec<String> = serde_json::from_str(&witness).unwrap();
let witness = &witness.iter().map(|x| x.as_ref()).collect::<Vec<_>>();
@@ -466,8 +466,8 @@ mod tests {
wtns.memory.prime.to_str_radix(16),
"30644E72E131A029B85045B68181585D2833E84879B9709143E1F593F0000001".to_lowercase()
);
assert_eq!(wtns.instance.get_n_vars().unwrap() as u32, case.n_vars);
assert_eq!(wtns.n64 as u32, case.n64);
assert_eq!({ wtns.instance.get_n_vars().unwrap() }, case.n_vars);
assert_eq!({ wtns.n64 }, case.n64);
let inputs_str = std::fs::read_to_string(case.inputs_path).unwrap();
let inputs: std::collections::HashMap<String, serde_json::Value> =

View File

@@ -25,7 +25,7 @@
//! PointsC(8)
//! PointsH(9)
//! Contributions(10)
use ark_ff::{BigInteger256, FromBytes, PrimeField};
use ark_ff::{BigInteger256, PrimeField};
use ark_relations::r1cs::ConstraintMatrices;
use ark_serialize::{CanonicalDeserialize, SerializationError};
use ark_std::log2;
@@ -33,13 +33,15 @@ use byteorder::{LittleEndian, ReadBytesExt};
use std::{
collections::HashMap,
io::{Read, Result as IoResult, Seek, SeekFrom},
io::{Read, Seek, SeekFrom},
};
use ark_bn254::{Bn254, Fq, Fq2, Fr, G1Affine, G2Affine};
use ark_groth16::{ProvingKey, VerifyingKey};
use num_traits::Zero;
type IoResult<T> = Result<T, SerializationError>;
#[derive(Clone, Debug)]
struct Section {
position: u64,
@@ -285,18 +287,18 @@ impl HeaderGroth {
fn read<R: Read>(mut reader: &mut R) -> IoResult<Self> {
// TODO: Impl From<u32> in Arkworks
let n8q: u32 = FromBytes::read(&mut reader)?;
let n8q: u32 = u32::deserialize_uncompressed(&mut reader)?;
// group order r of Bn254
let q = BigInteger256::read(&mut reader)?;
let q = BigInteger256::deserialize_uncompressed(&mut reader)?;
let n8r: u32 = FromBytes::read(&mut reader)?;
let n8r: u32 = u32::deserialize_uncompressed(&mut reader)?;
// Prime field modulus
let r = BigInteger256::read(&mut reader)?;
let r = BigInteger256::deserialize_uncompressed(&mut reader)?;
let n_vars = u32::read(&mut reader)? as usize;
let n_public = u32::read(&mut reader)? as usize;
let n_vars = u32::deserialize_uncompressed(&mut reader)? as usize;
let n_public = u32::deserialize_uncompressed(&mut reader)? as usize;
let domain_size: u32 = FromBytes::read(&mut reader)?;
let domain_size: u32 = u32::deserialize_uncompressed(&mut reader)?;
let power = log2(domain_size as usize);
let verifying_key = ZVerifyingKey::new(&mut reader)?;
@@ -318,15 +320,15 @@ impl HeaderGroth {
// need to divide by R, since snarkjs outputs the zkey with coefficients
// multiplieid by R^2
fn deserialize_field_fr<R: Read>(reader: &mut R) -> IoResult<Fr> {
let bigint = BigInteger256::read(reader)?;
Ok(Fr::new(Fr::new(bigint).into_repr()))
let bigint = BigInteger256::deserialize_uncompressed(reader)?;
Ok(Fr::new_unchecked(Fr::new_unchecked(bigint).into_bigint()))
}
// skips the multiplication by R because Circom points are already in Montgomery form
fn deserialize_field<R: Read>(reader: &mut R) -> IoResult<Fq> {
let bigint = BigInteger256::read(reader)?;
// if you use ark_ff::PrimeField::from_repr it multiplies by R
Ok(Fq::new(bigint))
let bigint = BigInteger256::deserialize_uncompressed(reader)?;
// if you use Fq::new it multiplies by R
Ok(Fq::new_unchecked(bigint))
}
pub fn deserialize_field2<R: Read>(reader: &mut R) -> IoResult<Fq2> {
@@ -339,14 +341,22 @@ fn deserialize_g1<R: Read>(reader: &mut R) -> IoResult<G1Affine> {
let x = deserialize_field(reader)?;
let y = deserialize_field(reader)?;
let infinity = x.is_zero() && y.is_zero();
Ok(G1Affine::new(x, y, infinity))
if infinity {
Ok(G1Affine::identity())
} else {
Ok(G1Affine::new(x, y))
}
}
fn deserialize_g2<R: Read>(reader: &mut R) -> IoResult<G2Affine> {
let f1 = deserialize_field2(reader)?;
let f2 = deserialize_field2(reader)?;
let infinity = f1.is_zero() && f2.is_zero();
Ok(G2Affine::new(f1, f2, infinity))
if infinity {
Ok(G2Affine::identity())
} else {
Ok(G2Affine::new(f1, f2))
}
}
fn deserialize_g1_vec<R: Read>(reader: &mut R, n_vars: u32) -> IoResult<Vec<G1Affine>> {
@@ -361,16 +371,15 @@ fn deserialize_g2_vec<R: Read>(reader: &mut R, n_vars: u32) -> IoResult<Vec<G2Af
mod tests {
use super::*;
use ark_bn254::{G1Projective, G2Projective};
use ark_crypto_primitives::snark::SNARK;
use num_bigint::BigUint;
use serde_json::Value;
use std::fs::File;
use crate::circom::CircomReduction;
use crate::witness::WitnessCalculator;
use crate::{circom::CircomReduction, CircomBuilder, CircomConfig};
use ark_groth16::{
create_proof_with_reduction_and_matrices, create_random_proof_with_reduction as prove,
prepare_verifying_key, verify_proof,
};
use crate::{CircomBuilder, CircomConfig};
use ark_groth16::Groth16;
use ark_std::rand::thread_rng;
use num_traits::{One, Zero};
use std::str::FromStr;
@@ -473,8 +482,7 @@ mod tests {
let n_vars = 10;
let buf = vec![g1_buf(); n_vars]
.iter()
.cloned()
.flatten()
.flatten().cloned()
.collect::<Vec<_>>();
let expected = vec![g1_one(); n_vars];
@@ -497,8 +505,7 @@ mod tests {
let n_vars = 10;
let buf = vec![g2_buf(); n_vars]
.iter()
.cloned()
.flatten()
.flatten().cloned()
.collect::<Vec<_>>();
let expected = vec![g2_one(); n_vars];
@@ -853,11 +860,11 @@ mod tests {
let inputs = circom.get_public_inputs().unwrap();
let mut rng = thread_rng();
let proof = prove::<_, _, _, CircomReduction>(circom, &params, &mut rng).unwrap();
let proof = Groth16::<Bn254, CircomReduction>::prove(&params, circom, &mut rng).unwrap();
let pvk = prepare_verifying_key(&params.vk);
let pvk = Groth16::<Bn254>::process_vk(&params.vk).unwrap();
let verified = verify_proof(&pvk, &proof, &inputs).unwrap();
let verified = Groth16::<Bn254>::verify_with_processed_vk(&pvk, &inputs, &proof).unwrap();
assert!(verified);
}
@@ -888,7 +895,7 @@ mod tests {
let full_assignment = wtns
.calculate_witness_element::<Bn254, _>(inputs, false)
.unwrap();
let proof = create_proof_with_reduction_and_matrices::<_, CircomReduction>(
let proof = Groth16::<Bn254, CircomReduction>::create_proof_with_reduction_and_matrices(
&params,
r,
s,
@@ -899,9 +906,9 @@ mod tests {
)
.unwrap();
let pvk = prepare_verifying_key(&params.vk);
let pvk = Groth16::<Bn254>::process_vk(&params.vk).unwrap();
let inputs = &full_assignment[1..num_inputs];
let verified = verify_proof(&pvk, &proof, inputs).unwrap();
let verified = Groth16::<Bn254>::verify_with_processed_vk(&pvk, inputs, &proof).unwrap();
assert!(verified);
}

View File

@@ -3,9 +3,10 @@ use ark_std::rand::thread_rng;
use color_eyre::Result;
use ark_bn254::Bn254;
use ark_groth16::{
create_random_proof as prove, generate_random_parameters, prepare_verifying_key, verify_proof,
};
use ark_crypto_primitives::snark::SNARK;
use ark_groth16::Groth16;
type GrothBn = Groth16<Bn254>;
#[test]
fn groth16_proof() -> Result<()> {
@@ -21,17 +22,17 @@ fn groth16_proof() -> Result<()> {
let circom = builder.setup();
let mut rng = thread_rng();
let params = generate_random_parameters::<Bn254, _, _>(circom, &mut rng)?;
let params = GrothBn::generate_random_parameters_with_reduction(circom, &mut rng)?;
let circom = builder.build()?;
let inputs = circom.get_public_inputs().unwrap();
let proof = prove(circom, &params, &mut rng)?;
let proof = GrothBn::prove(&params, circom, &mut rng)?;
let pvk = prepare_verifying_key(&params.vk);
let pvk = GrothBn::process_vk(&params.vk).unwrap();
let verified = verify_proof(&pvk, &proof, &inputs)?;
let verified = GrothBn::verify_with_processed_vk(&pvk, &inputs, &proof)?;
assert!(verified);
@@ -47,14 +48,14 @@ fn groth16_proof_wrong_input() {
.unwrap();
let mut builder = CircomBuilder::new(cfg);
builder.push_input("a", 3);
// This isn't a public input to the circuit, should faild
// This isn't a public input to the circuit, should fail
builder.push_input("foo", 11);
// create an empty instance for setting it up
let circom = builder.setup();
let mut rng = thread_rng();
let _params = generate_random_parameters::<Bn254, _, _>(circom, &mut rng).unwrap();
let _params = GrothBn::generate_random_parameters_with_reduction(circom, &mut rng).unwrap();
let _ = builder.build().unwrap_err();
}
@@ -74,17 +75,17 @@ fn groth16_proof_circom2() -> Result<()> {
let circom = builder.setup();
let mut rng = thread_rng();
let params = generate_random_parameters::<Bn254, _, _>(circom, &mut rng)?;
let params = GrothBn::generate_random_parameters_with_reduction(circom, &mut rng)?;
let circom = builder.build()?;
let inputs = circom.get_public_inputs().unwrap();
let proof = prove(circom, &params, &mut rng)?;
let proof = GrothBn::prove(&params, circom, &mut rng)?;
let pvk = prepare_verifying_key(&params.vk);
let pvk = GrothBn::process_vk(&params.vk).unwrap();
let verified = verify_proof(&pvk, &proof, &inputs)?;
let verified = GrothBn::verify_with_processed_vk(&pvk, &inputs, &proof)?;
assert!(verified);

View File

@@ -3,7 +3,8 @@ use ark_std::rand::thread_rng;
use color_eyre::Result;
use ark_bn254::Bn254;
use ark_groth16::{create_random_proof as prove, generate_random_parameters};
use ark_crypto_primitives::snark::SNARK;
use ark_groth16::Groth16;
use ethers::{
contract::ContractError,
@@ -27,12 +28,12 @@ async fn solidity_verifier() -> Result<()> {
let circom = builder.setup();
let mut rng = thread_rng();
let params = generate_random_parameters::<Bn254, _, _>(circom, &mut rng)?;
let params = Groth16::<Bn254>::generate_random_parameters_with_reduction(circom, &mut rng)?;
let circom = builder.build()?;
let inputs = circom.get_public_inputs().unwrap();
let proof = prove(circom, &params, &mut rng)?;
let proof = Groth16::<Bn254>::prove(&params, circom, &mut rng)?;
// launch the network & compile the verifier
let anvil = Anvil::new().spawn();