Merge pull request #2 from zhenfeizhang/polish-single-opening

code refactoring
This commit is contained in:
Mary Maller
2022-09-07 15:33:45 +01:00
committed by GitHub
31 changed files with 4307 additions and 4620 deletions

4
.gitignore vendored
View File

@@ -9,3 +9,7 @@ Cargo.lock
# These are backup files generated by rustfmt
**/*.rs.bk
srs/
polys/
tmp/

View File

@@ -1,5 +1,5 @@
[package]
name = "caulk_single_opening"
name = "caulk"
version = "0.1.0"
authors = ["mmaller <mary.maller@ethereum.org>"]
edition = "2018"
@@ -12,20 +12,30 @@ ark-ec = { version = "^0.3.0", default-features = false }
ark-serialize = { version = "^0.3.0", default-features = false, features = [ "derive" ] }
ark-poly = { version = "^0.3.0", default-features = false }
ark-std = { version = "^0.3.0", default-features = false }
ark-relations = { version = "^0.3.0", default-features = false }
ark-crypto-primitives = { version = "^0.3.0", default-features = false }
ark-r1cs-std = { version = "^0.3.0", default-features = false, optional = true }
ark-bls12-381 = { version = "^0.3.0", features = [ "std" ] }
ark-bls12-377 = { version = "^0.3.0", features = [ "std" ] }
ark-poly-commit = { version = "^0.3.0", default-features = false }
ark-marlin = { version = "^0.3.0", default-features = false }
tracing = { version = "0.1", default-features = false, features = [ "attributes" ], optional = true }
derivative = { version = "2.0", features = ["use_core"], optional = true}
rand = "0.7.3"
rand_chacha = { version = "0.2.1" }
rand = "0.8.5"
rand_chacha = { version = "0.3.1" }
thiserror = "1.0.19"
blake2s_simd = "0.5.10"
blake2s_simd = "1.0.0"
rayon = { version = "1.5.2", default-features = false, optional = true }
merlin = { version = "3.0.0" }
[features]
asm = [ "ark-ff/asm" ]
parallel = [
"rayon",
"ark-std/parallel",
"ark-ff/parallel",
"ark-poly/parallel"
]
print-trace = [
"ark-std/print-trace"
]

View File

@@ -1,31 +0,0 @@
[package]
name = "caulk_multi_lookup"
authors = ["mmaller <mary.maller@ethereum.org>", "khovratovich <khovratovich@gmail.com>"]
version = "0.1.0"
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
ark-ff = { version = "^0.3.0", default-features = false }
ark-ec = { version = "^0.3.0", default-features = false }
ark-serialize = { version = "^0.3.0", default-features = false, features = [ "derive" ] }
ark-poly = { version = "^0.3.0", default-features = false }
ark-std = { version = "^0.3.0", default-features = false }
ark-relations = { version = "^0.3.0", default-features = false }
ark-crypto-primitives = { version = "^0.3.0", default-features = false }
ark-r1cs-std = { version = "^0.3.0", default-features = false, optional = true }
ark-bls12-381 = { version = "^0.3.0", features = [ "std" ] }
ark-poly-commit = { version = "^0.3.0", default-features = false }
ark-marlin = { version = "^0.3.0", default-features = false }
tracing = { version = "0.1", default-features = false, features = [ "attributes" ], optional = true }
derivative = { version = "2.0", features = ["use_core"], optional = true}
rand = "0.7.3"
rand_chacha = { version = "0.2.1" }
thiserror = "1.0.19"
blake2s_simd = "0.5.10"
[features]
asm = [ "ark-ff/asm" ]

View File

@@ -1,838 +0,0 @@
/*
This file includes the Caulk prover and verifier for single openings.
The protocol is described in Figure 3.
*/
use ark_bls12_381::{Bls12_381,Fr,FrParameters,G1Affine, G2Affine};
use ark_poly::{univariate::DensePolynomial, Evaluations as EvaluationsOnDomain};
use ark_ff::{Fp256, Field};
use ark_poly::{EvaluationDomain, Evaluations, GeneralEvaluationDomain, UVPolynomial, Polynomial};
use ark_ec::{AffineCurve,ProjectiveCurve,PairingEngine};
use ark_serialize::CanonicalSerialize;
use ark_std::{cfg_into_iter, One, Zero};
use std::time::{Instant};
use std::vec::Vec;
use crate::caulk_multi_setup::{setup_multi_lookup, PublicParameters};
use crate::caulk_multi_unity::{prove_multiunity,verify_multiunity,ProofMultiUnity};
use crate::tools::{KzgBls12_381, UniPoly381,
kzg_commit_g2,random_field,
generate_lagrange_polynomials_subset,aggregate_kzg_proofs_g2, hash_caulk_multi,
kzg_open_g1_native, kzg_verify_g1_native};
use crate::multiopen::{multiple_open_g2};
pub struct LookupInstance{
pub c_com: G1Affine, //polynomial C(X) that represents a table
pub phi_com: G1Affine, //polynomial phi(X) that represents the values to look up
}
pub struct LookupProverInput{
pub c_poly: DensePolynomial<Fp256<FrParameters>>, //polynomial C(X) that represents a table
pub phi_poly: DensePolynomial<Fp256<FrParameters>>, //polynomial phi(X) that represents the values to look up
pub positions: Vec<usize>, //
pub openings: Vec<G2Affine>
}
#[derive(Debug)]
#[derive(PartialEq)]
//Data structure to be stored in a file: polynomial, its commitment, and its openings (for certain SRS)
pub struct TableInput{
pub c_poly: DensePolynomial<Fp256<FrParameters>>,
pub c_com: G1Affine,
pub openings: Vec<G2Affine>
}
//Lookup proof data structure
#[allow(non_snake_case)]
pub struct LookupProof{
pub C_I_com: G1Affine, //Commitment to C_I(X)
pub H1_com: G2Affine, //Commmitment to H_1(X)
pub H2_com: G1Affine, //Commmitment to H_2(X)
pub u_com: G1Affine, //Commmitment to u(X)
pub z_I_com: G1Affine, //Commitment to z_I(X)
pub v1: Fr,
pub v2: Fr,
pub pi1:G1Affine,
pub pi2:G1Affine,
pub pi3:G1Affine
}
impl TableInput{
fn store(&self, path: &str)
{
use std::io::Write;
use std::fs::File;
//1. Polynomial
let mut o_bytes = vec![];
let mut f = File::create(path).expect("Unable to create file");
let len: u32 = self.c_poly.len().try_into().unwrap();
let len_bytes = len.to_be_bytes();
f.write_all(&len_bytes).expect("Unable to write data");
let len32: usize = len.try_into().unwrap();
for i in 0..len32
{
self.c_poly.coeffs[i].serialize_uncompressed(&mut o_bytes).ok();
}
f.write_all(&o_bytes).expect("Unable to write data");
//2. Commitment
o_bytes.clear();
self.c_com.serialize_uncompressed(&mut o_bytes).ok();
f.write_all(&o_bytes).expect("Unable to write data");
//3. Openings
o_bytes.clear();
let len: u32 = self.openings.len().try_into().unwrap();
let len_bytes = len.to_be_bytes();
f.write_all(&len_bytes).expect("Unable to write data");
let len32: usize = len.try_into().unwrap();
for i in 0..len32
{
self.openings[i].serialize_uncompressed(&mut o_bytes).ok();
}
f.write_all(&o_bytes).expect("Unable to write data");
}
fn load(path: &str) ->TableInput
{
use std::fs::File;
use std::io::Read;
use ark_serialize::CanonicalDeserialize;
const FR_UNCOMPR_SIZE: usize=32;
const G1_UNCOMPR_SIZE: usize =96;
const G2_UNCOMPR_SIZE: usize =192;
let mut data = Vec::new();
let mut f = File::open(path).expect("Unable to open file");
f.read_to_end(&mut data).expect("Unable to read data");
//1. reading c_poly
let mut cur_counter:usize = 0;
let len_bytes: [u8; 4] = (&data[0..4]).try_into().unwrap();
let len: u32 = u32::from_be_bytes(len_bytes);
let mut coeffs = vec![];
let len32: usize = len.try_into().unwrap();
cur_counter += 4;
for i in 0..len32
{
let buf_bytes = &data[cur_counter+i*FR_UNCOMPR_SIZE..cur_counter+(i+1)*FR_UNCOMPR_SIZE];
let tmp = Fr::deserialize_unchecked(buf_bytes).unwrap();
coeffs.push(tmp);
}
cur_counter+=len32*FR_UNCOMPR_SIZE;
//2. c_com
let buf_bytes = &data[cur_counter..cur_counter+G1_UNCOMPR_SIZE];
let c_com = G1Affine::deserialize_unchecked(buf_bytes).unwrap();
cur_counter += G1_UNCOMPR_SIZE;
//3 openings
let len_bytes: [u8; 4] = (&data[cur_counter..cur_counter+4]).try_into().unwrap();
let len: u32 = u32::from_be_bytes(len_bytes);
let mut openings = vec![];
let len32: usize = len.try_into().unwrap();
cur_counter += 4;
for _ in 0..len32
{
let buf_bytes = &data[cur_counter..cur_counter+G2_UNCOMPR_SIZE];
let tmp = G2Affine::deserialize_unchecked(buf_bytes).unwrap();
openings.push(tmp);
cur_counter+=G2_UNCOMPR_SIZE;
}
return TableInput{
c_poly: DensePolynomial { coeffs },
c_com: c_com,
openings: openings
}
}
}
#[allow(non_snake_case)]
pub fn compute_lookup_proof(
instance: &LookupInstance,
input: &LookupProverInput,
srs: &PublicParameters
)->(LookupProof, ProofMultiUnity)
{
let m = input.positions.len();
///////////////////////////////////////////////////////////////////
//1. Blinders
///////////////////////////////////////////////////////////////////
// provers blinders for zero-knowledge
let r1: Fp256<FrParameters> = random_field::<Fr>();
let r2: Fp256<FrParameters> = random_field::<Fr>();
let r3: Fp256<FrParameters> = random_field::<Fr>();
let r4: Fp256<FrParameters> = random_field::<Fr>();
let r5: Fp256<FrParameters> = random_field::<Fr>();
let r6: Fp256<FrParameters> = random_field::<Fr>();
let r7: Fp256<FrParameters> = random_field::<Fr>();
///////////////////////////////////////////////////////////////////
//2. Compute z_I(X) = r1 prod_{i in I} (X - w^i)
///////////////////////////////////////////////////////////////////
// z_I includes each position only once.
let mut positions_no_repeats = Vec::new();
for i in 0..input.positions.len() {
if positions_no_repeats.contains( &input.positions[i] ) { }
else {
positions_no_repeats.push( input.positions[i] );
}
}
// insert 0 into z_I so that we can pad when m is not a power of 2.
if positions_no_repeats.contains( &(0 as usize) ) {}
else {
positions_no_repeats.push( 0 as usize );
}
// z_I(X)
let mut z_I = DensePolynomial::from_coefficients_slice(
&[
r1
]);
for j in 0..positions_no_repeats.len() {
z_I = &z_I * &DensePolynomial::from_coefficients_slice(
&[
-srs.domain_N.element(positions_no_repeats[j]) ,
Fr::one()]);
}
///////////////////////////////////////////////////////////////////
//2. Compute C_I(X) = (r_2+r_3X + r4X^2)*Z_I(X) + sum_j c_j*tau_j(X)
///////////////////////////////////////////////////////////////////
let mut c_I_poly = DensePolynomial::from_coefficients_slice(&[Fr::zero()]);
// tau_polys[i] = 1 at positions_no_repeats[i] and 0 at positions_no_repeats[j]
// Takes m^2 time, or 36ms when m = 32. Can be done in m log^2(m) time if this ever becomes a bottleneck.
// See https://people.csail.mit.edu/devadas/pubs/scalable_thresh.pdf
let tau_polys = generate_lagrange_polynomials_subset(&positions_no_repeats, srs);
// C_I(X) = sum_j c_j*tau_j(X)
// Takes m^2 time, or 38ms when m = 32. Can be done faster if we store c_poly evaluations.
for j in 0..positions_no_repeats.len(){
c_I_poly = &c_I_poly + &(&tau_polys[j]*input.c_poly.evaluate(&srs.domain_N.element(positions_no_repeats[j]))); //sum_j c_j*tau_j
}
// extra_blinder = r2 + r3 X + r4 X^2
let extra_blinder=DensePolynomial::from_coefficients_slice(
&[ r2,
r3, r4]);
// C_I(X) = C_I(X) + z_I(X) * (r2 + r3 X + r4 X^2)
c_I_poly = &c_I_poly + &(&z_I*&extra_blinder);
///////////////////////////////////////////////////////////////////
//4. Compute H1
///////////////////////////////////////////////////////////////////
// Compute [Q(x)]_2 by aggregating kzg proofs such that
// Q(X) = ( C(X) - sum_{i in I} c_{i+1} tau_i(X) ) / ( prod_{i in I} (X - w^i) )
let g2_Q=aggregate_kzg_proofs_g2(&input.openings, &positions_no_repeats, &srs.domain_N);
// blind_com = [ r2 + r3 x + r4 x^2 ]_2
let blind_com = kzg_commit_g2(&extra_blinder, srs);
// H1_com = [ r1^{-1} Q(x) ]_2 - blind_com
let H1_com = (g2_Q.mul(r1.inverse().unwrap())
-blind_com.into_projective())
.into_affine();
///////////////////////////////////////////////////////////////////
//5. Compute u(X) = sum_j w^{i_j} mu_j(X) + (r5 + r6 X + r7 X^2) z_{Vm}(X)
///////////////////////////////////////////////////////////////////
// u(X) = sum_j w^{i_j} mu_j(X)
let mut u_vals= vec![];
for j in 0..m {
u_vals.push(srs.domain_N.element(input.positions[j]));
}
// u(X) = u(X) + (r5 + r6 X + r7 X^2) z_{Vm}(X)
let extra_blinder2=DensePolynomial::from_coefficients_slice(
&[
r5,
r6,
r7
]);
let u_poly = &EvaluationsOnDomain::from_vec_and_domain(u_vals.clone(), srs.domain_m).interpolate()
+ &(extra_blinder2.mul_by_vanishing_poly(srs.domain_m));
///////////////////////////////////////////////////////////////////
//6. Commitments
///////////////////////////////////////////////////////////////////
let (z_I_com, _) = KzgBls12_381::commit(&srs.poly_ck, &z_I, None, None).unwrap();
let (C_I_com, _) = KzgBls12_381::commit(&srs.poly_ck, &c_I_poly, None, None).unwrap();
let (u_com, _) = KzgBls12_381::commit(&srs.poly_ck, &u_poly, None, None).unwrap();
///////////////////////////////////////////////////////////////////
//7 Prepare unity proof
///////////////////////////////////////////////////////////////////
// hash_input initialised to zero
let mut hash_input = Fr::zero();
//let now = Instant::now();
let unity_proof = prove_multiunity( &srs, &mut hash_input, &u_com.0, u_vals.clone(), extra_blinder2 );
//println!("Time to prove unity {:?}", now.elapsed());
// quick test can be uncommented to check if unity proof verifies
// let unity_check = verify_multiunity( &srs, &mut Fr::zero(), u_com.0.clone(), &unity_proof );
// println!("unity_check = {}", unity_check);
///////////////////////////////////////////////////////////////////
//8. Hash outputs to get chi
///////////////////////////////////////////////////////////////////
let chi = hash_caulk_multi::<Fr>(
hash_input,
Some(& [ &instance.c_com, &instance.phi_com,
// hash last round of unity proof for good practice
&unity_proof.g1_u_bar_alpha, &unity_proof.g1_h_2_alpha,
&unity_proof.pi_1, &unity_proof.pi_2, &unity_proof.pi_3, &unity_proof.pi_4, &unity_proof.pi_5,
// lookup inputs
&C_I_com.0, &z_I_com.0, &u_com.0 ].to_vec() ),
Some(& [ &H1_com.clone() ].to_vec() ),
Some(& [ &unity_proof.v1, &unity_proof.v2, &unity_proof.v3 ].to_vec() ));
hash_input = chi.clone();
///////////////////////////////////////////////////////////////////
//9. Compute z_I( u(X) )
///////////////////////////////////////////////////////////////////
// Need a bigger domain to compute z_I(u(X)) over.
// Has size O(m^2)
let domain_m_sq: GeneralEvaluationDomain<Fr> = GeneralEvaluationDomain::new( z_I.len() * u_poly.len() + 2 ).unwrap();
// id_poly(X) = 0 for sigma_i < m and 1 for sigma_i > m
// used for when m is not a power of 2
let mut id_poly = DensePolynomial::from_coefficients_slice( & [Fr::one()]);
id_poly = &id_poly - &srs.id_poly;
// Compute z_I( u(X) + w^0 id(X) )
// Computing z_I(u(X)) is done naively and could be faster. Currently this is not a bottleneck
let evals: Vec<Fp256<FrParameters>> = cfg_into_iter!(0..domain_m_sq.size())
.map(|k| {
z_I.evaluate( &(
u_poly.evaluate(&domain_m_sq.element(k))
+ id_poly.evaluate(&domain_m_sq.element(k))
) )
}).collect();
let z_I_u_poly = Evaluations::from_vec_and_domain(evals, domain_m_sq).interpolate();
///////////////////////////////////////////////////////////////////
//10. Compute C_I(u(X))-phi(X)
///////////////////////////////////////////////////////////////////
// Compute C_I( u(X) )
// Computing C_I(u(X)) is done naively and could be faster. Currently this is not a bottleneck
//Actually compute c_I( u(X) + id(X) ) in case m is not a power of 2
let evals: Vec<Fp256<FrParameters>> = cfg_into_iter!(0..domain_m_sq.size())
.map(|k| {
c_I_poly.evaluate( &(
u_poly.evaluate(&domain_m_sq.element(k))
+ id_poly.evaluate(&domain_m_sq.element(k))
) )
}).collect();
// c_I_u_poly = C_I( u(X) ) - phi(X)
let c_I_u_poly = &Evaluations::from_vec_and_domain(evals, domain_m_sq)
.interpolate()
- &input.phi_poly;
///////////////////////////////////////////////////////////////////
//11. Compute H2
///////////////////////////////////////////////////////////////////
// temp_poly(X) = z_I(u(X)) + chi [ C_I(u(X)) - phi(X) ]
let temp_poly = &z_I_u_poly + &(&c_I_u_poly*chi);
//H2(X) = temp_poly / z_Vm(X)
let (H2_poly, rem) = temp_poly.divide_by_vanishing_poly( srs.domain_m ).unwrap();
assert!(rem== DensePolynomial::from_coefficients_slice(&[Fr::zero()]), "H_2(X) doesn't divide");
///////////////////////////////////////////////////////////////////
//12. Compute commitments to H2
///////////////////////////////////////////////////////////////////
//let now = Instant::now();
let (H2_com, _) = KzgBls12_381::commit(&srs.poly_ck, &H2_poly, None, None).unwrap();
//println!("Time to commit to H2 {:?}", now.elapsed());
///////////////////////////////////////////////////////////////////
//13. Hash outputs to get alpha
///////////////////////////////////////////////////////////////////
let alpha = hash_caulk_multi::<Fr>(
hash_input,
Some(& [ &H2_com.0.clone() ].to_vec() ),
None, None );
// last hash so don't need to update hash_input
// hash_input = alpha.clone();
///////////////////////////////////////////////////////////////////
//14. Open u at alpha, get v1
///////////////////////////////////////////////////////////////////
let (evals1, pi1) = kzg_open_g1_native(&srs.poly_ck, &u_poly, None, [&alpha].to_vec() );
let v1 = evals1[0];
///////////////////////////////////////////////////////////////////
//15. Compute p1(X) and open at v1
///////////////////////////////////////////////////////////////////
// v1_id = u(alpha) + id(alpha) for when m is not a power of 2
let v1_id = v1 + id_poly.evaluate(&alpha);
// p1(X) = z_IX() + chi cI(X)
let p1_poly = &z_I + &(&c_I_poly * chi);
let (evals2, pi2) = kzg_open_g1_native(&srs.poly_ck, &p1_poly, None, [&v1_id].to_vec() );
///////////////////////////////////////////////////////////////////
//16. Compute p2(X) and open p2 at alpha
///////////////////////////////////////////////////////////////////
// p2(X) = zI(u(alpha)) + chi C_I( u(alpha) )
let mut p2_poly = DensePolynomial::from_coefficients_slice(
&[ z_I.evaluate(&v1_id) + chi * c_I_poly.evaluate(&v1_id) ] );
// p2(X) = p2(X) - chi phi(X)
p2_poly = &p2_poly - &(&input.phi_poly * chi);
// p2(X) = p2(X) - zVm(alpha) H2(X)
let zVm: UniPoly381 = srs.domain_m.vanishing_polynomial().into();
p2_poly = &p2_poly - &( &H2_poly * zVm.evaluate(&alpha) );
// Open p2(X) at alpha
let (evals3, pi3) = kzg_open_g1_native(&srs.poly_ck, &p2_poly, None, [&alpha].to_vec() );
// check that p2_poly(alpha) = 0
assert!(evals3[0]==Fr::zero(), "p2(alpha) does not equal 0");
///////////////////////////////////////////////////////////////////
//17. Compose proof
///////////////////////////////////////////////////////////////////
let proof = LookupProof{
C_I_com: C_I_com.0,
H1_com: H1_com,
H2_com: H2_com.0,
z_I_com: z_I_com.0,
u_com: u_com.0,
v1: v1,
v2: evals2[0],
pi1: pi1,
pi2: pi2,
pi3: pi3
};
return (proof, unity_proof);
}
#[allow(non_snake_case)]
pub fn verify_lookup_proof(
c_com: G1Affine,
phi_com: G1Affine,
proof: &LookupProof,
unity_proof: &ProofMultiUnity,
srs: &PublicParameters
)->bool
{
///////////////////////////////////////////////////////////////////
//1. check unity
///////////////////////////////////////////////////////////////////
// hash_input initialised to zero
let mut hash_input = Fr::zero();
let unity_check = verify_multiunity(srs, &mut hash_input, proof.u_com, unity_proof );
assert!(unity_check, "failure on unity");
///////////////////////////////////////////////////////////////////
//2. Hash outputs to get chi
///////////////////////////////////////////////////////////////////
let chi = hash_caulk_multi::<Fr>(
hash_input.clone(),
Some(& [ &c_com, &phi_com,
// include last round of unity proof outputs for good practice
&unity_proof.g1_u_bar_alpha, &unity_proof.g1_h_2_alpha,
&unity_proof.pi_1, &unity_proof.pi_2, &unity_proof.pi_3,
&unity_proof.pi_4, &unity_proof.pi_5,
// outputs from multi-lookup
&proof.C_I_com, &proof.z_I_com, &proof.u_com ].to_vec() ),
Some(& [ &proof.H1_com ].to_vec() ),
Some(& [ &unity_proof.v1, &unity_proof.v2, &unity_proof.v3 ].to_vec() ));
hash_input = chi.clone();
///////////////////////////////////////////////////////////////////
//3. Hash outputs to get alpha
///////////////////////////////////////////////////////////////////
let alpha = hash_caulk_multi::<Fr>(
hash_input,
Some(& [ &proof.H2_com ].to_vec() ),
None, None );
// last hash so don't need to update hash_input
// hash_input = alpha.clone();
///////////////////////////////////////////////////////////////////
//4. Check pi_1
///////////////////////////////////////////////////////////////////
// KZG.Verify(srs_KZG, [u]_1, deg = bot, alpha, v1, pi1)
let check1 = kzg_verify_g1_native(
&srs,
proof.u_com.clone(),
None,
[alpha].to_vec(),
[proof.v1].to_vec(),
proof.pi1
);
assert!(check1,"failure on pi_1 check");
///////////////////////////////////////////////////////////////////
//5. Check pi_2
///////////////////////////////////////////////////////////////////
// v1_id = u(alpha)+ id(alpha) for when m is not a power of 2
let v1_id = proof.v1 + (Fr::one() - srs.id_poly.evaluate(&alpha));
// [P1]_1 = [z_I]_1 + chi [c_I]_1
let p1_com =(proof.z_I_com.into_projective()
+ proof.C_I_com.mul(chi)).into_affine();
// KZG.Verify(srs_KZG, [P1]_1, deg = bot, v1_id, v2, pi2)
let check2 = kzg_verify_g1_native(
&srs,
p1_com,
None,
[v1_id].to_vec(),
[proof.v2].to_vec(),
proof.pi2
);
assert!(check2, "failure on pi_2 check");
///////////////////////////////////////////////////////////////////
//6. Check pi_3
///////////////////////////////////////////////////////////////////
// z_Vm(X)
let zVm: UniPoly381 = srs.domain_m.vanishing_polynomial().into(); //z_V_m(alpah)
// [P2]_1 = [v2]_1 - chi cm - zVm(alpha) [H_2]_1
let p2_com = (
srs.poly_ck.powers_of_g[0].mul(proof.v2 ) // [v2]_1
- phi_com.mul( chi ) //[phi]_1
- proof.H2_com.mul(zVm.evaluate(&alpha)) // [H2]_1 * zVm(alpha)
).into_affine();
// KZG.Verify(srs_KZG, [P2]_1, deg = bot, alpha, 0, pi3)
let check3 = kzg_verify_g1_native(
&srs,
p2_com,
None,
[alpha].to_vec(),
[Fr::zero()].to_vec(),
proof.pi3
);
assert!(check3, "failure on check 3");
///////////////////////////////////////////////////////////////////
//7. Check final pairing
///////////////////////////////////////////////////////////////////
// pairing1 = e([C]_1 - [C_I]_1, [1]_2)
let pairing1=Bls12_381::pairing((c_com.into_projective()-proof.C_I_com.into_projective()).into_affine(), srs.g2_powers[0]);
// pairing2 = e([z_I]_1, [H_1]_2)
let pairing2 = Bls12_381::pairing(proof.z_I_com,proof.H1_com);
assert!(pairing1 == pairing2, "failure on pairing check");
return true;
}
#[allow(non_snake_case)]
#[allow(dead_code)]
pub fn generate_lookup_input()
->(
LookupProverInput,
PublicParameters //SRS
)
{
let n: usize =8;//bitlength of poly degree
let m: usize = 4;
//let m: usize = (1<<(n/2-1)); //should be power of 2
let N: usize = 1<<n;
let max_degree: usize = if N>2*m*m {N-1} else {2*m*m};
let actual_degree = N-1;
let now = Instant::now();
let pp =setup_multi_lookup(&max_degree,&N,&m,&n);
println!("Time to setup {:?}", now.elapsed());
let rng = &mut ark_std::test_rng();
let c_poly = UniPoly381::rand(actual_degree, rng);
let mut positions: Vec<usize> = vec![];
for j in 0..m { //generate positions evenly distributed in the set
let i_j: usize = j*(N/m);
positions.push(i_j);
};
//generating phi
let blinder: Fp256<FrParameters> = random_field::<Fr>();
let a_m = DensePolynomial::from_coefficients_slice(&[blinder]);
let mut phi_poly = a_m.mul_by_vanishing_poly(pp.domain_m);
for j in 0..m
{
phi_poly = &phi_poly +
&(&pp.lagrange_polynomials_m[j]
* c_poly.evaluate(&pp.domain_N.element(positions[j]))); //adding c(w^{i_j})*mu_j(X)
}
for j in m..pp.domain_m.size() {
phi_poly = &phi_poly +
&(&pp.lagrange_polynomials_m[j]
* c_poly.evaluate(&pp.domain_N.element(0)));
}
let now = Instant::now();
let openings = multiple_open_g2(&pp.g2_powers, &c_poly, n);
println!("Time to generate openings {:?}", now.elapsed());
return (LookupProverInput{
c_poly: c_poly,
phi_poly:phi_poly,
positions: positions,
openings: openings},
pp);
}
#[allow(non_snake_case)]
#[test]
pub fn test_lookup()
{
_do_lookup();
}
#[allow(non_snake_case)]
#[test]
pub fn test_store()
{
//1. Setup
let n: usize = 6;
let N: usize = 1<<n;
let powers_size: usize = N+2; //SRS SIZE
let temp_m = n; //dummy
let pp =setup_multi_lookup(&powers_size,&N,&temp_m,&n);
let actual_degree = N-1;
let path=format!("tmp/poly_openings.log");
//2. Store
let rng = &mut ark_std::test_rng();
let c_poly = UniPoly381::rand(actual_degree, rng);
let (c_comx, _) = KzgBls12_381::commit(&pp.poly_ck, &c_poly, None, None).unwrap();
let openings = multiple_open_g2(&pp.g2_powers, &c_poly, pp.n);
let table = TableInput{
c_poly: c_poly,
c_com: c_comx.0,
openings: openings
};
table.store(&path);
//3. Load
let table_loaded = TableInput::load(&path);
//4. Test
assert_eq!(table,table_loaded);
std::fs::remove_file(&path).expect("File can not be deleted");
}
#[allow(non_snake_case)]
#[test]
pub fn test_multiple_lookups()
{
do_multiple_lookups()
}
pub fn get_poly_and_g2_openings(
pp: &PublicParameters,
actual_degree: usize,
)->TableInput
{
use std::fs::File;
//try opening the file. If it exists load the setup from there, otherwise generate
let path=format!("polys/poly_{}_openings_{}.setup",actual_degree,pp.N);
let res = File::open(path.clone());
match res{
Ok(_)=>{
let now = Instant::now();
let table = TableInput::load(&path);
println!("Time to load openings = {:?}", now.elapsed());
return table;
}
Err(_)=>{
let rng = &mut ark_std::test_rng();
let c_poly = UniPoly381::rand(actual_degree, rng);
let (c_comx, _) = KzgBls12_381::commit(&pp.poly_ck, &c_poly, None, None).unwrap();
let now = Instant::now();
let openings = multiple_open_g2(&pp.g2_powers, &c_poly, pp.n);
println!("Time to generate openings = {:?}", now.elapsed());
let table = TableInput{
c_poly: c_poly,
c_com: c_comx.0,
openings: openings
};
table.store(&path);
return table;
}
}
}
#[cfg(test)]
pub mod tests {
#[allow(non_snake_case)]
pub fn do_multiple_lookups()
{
const MIN_LOG_N: usize = 7;
const MAX_LOG_N: usize = 15;
const EPS: usize=1;
const MIN_LOG_M: usize=2;
const MAX_LOG_M: usize=5;
for n in MIN_LOG_N..=MAX_LOG_N
{
//1. Setup
let N: usize = 1<<n;
let powers_size: usize = N+2; //SRS SIZE
println!("N={}",N);
let temp_m = n; //dummy
let mut pp =setup_multi_lookup(&powers_size,&N,&temp_m,&n);
let actual_degree = N-EPS;
//println!("time for powers of tau {:?} for N={:?}", now.elapsed(),N);
//2. Poly and openings
let table=get_poly_and_g2_openings(&pp,actual_degree);
for logm in MIN_LOG_M..=std::cmp::min(MAX_LOG_M,n/2-1)
{
//3. Setup
let now = Instant::now();
let mut m = 1<<logm;
m = m + 1;
println!("m={}",m);
pp.regenerate_lookup_params(m);
println!("Time to generate aux domain {:?}", now.elapsed());
//4. Positions
let mut positions: Vec<usize> = vec![];
for j in 0..m { //generate positions evenly distributed in the set
let i_j: usize = j*(actual_degree/m);
positions.push(i_j);
};
//5. generating phi
let blinder: Fp256<FrParameters> = random_field::<Fr>();
let a_m = DensePolynomial::from_coefficients_slice(&[blinder]);
let mut phi_poly = a_m.mul_by_vanishing_poly(pp.domain_m);
let c_poly_local = table.c_poly.clone();
for j in 0..m
{
phi_poly = &phi_poly +
&(&pp.lagrange_polynomials_m[j]
* c_poly_local.evaluate(&pp.domain_N.element(positions[j]))); //adding c(w^{i_j})*mu_j(X)
}
for j in m..pp.domain_m.size()
{
phi_poly = &phi_poly +
&(&pp.lagrange_polynomials_m[j]
* c_poly_local.evaluate( &pp.domain_N.element(0) ) ); //adding c(w^{i_j})*mu_j(X)
}
//6. Running proofs
let now = Instant::now();
let (c_com, _) = KzgBls12_381::commit(&pp.poly_ck, &table.c_poly, None, None).unwrap();
let (phi_com, _) = KzgBls12_381::commit(&pp.poly_ck, &phi_poly, None, None).unwrap();
println!("Time to generate inputs = {:?}", now.elapsed());
let lookup_instance = LookupInstance{
c_com: c_com.0.clone(),
phi_com: phi_com.0.clone(),
};
let prover_input = LookupProverInput{
c_poly: table.c_poly.clone(),
phi_poly:phi_poly,
positions: positions,
openings: table.openings.clone()};
let now = Instant::now();
let (proof, unity_proof) = compute_lookup_proof(&lookup_instance, &prover_input,&pp);
println!("Time to generate proof for = {:?}", now.elapsed());
let now = Instant::now();
let res=verify_lookup_proof(table.c_com, phi_com.0, &proof, &unity_proof, &pp);
println!("Time to verify proof for = {:?}", now.elapsed());
assert!(res);
println!("Lookup test succeeded");
}
}
}
pub fn _do_lookup()
{
let now = Instant::now();
let (prover_input,srs)=generate_lookup_input();
println!("Time to generate parameters for n={:?} = {:?}", srs.n, now.elapsed());
//kzg_test(&srs);
let (c_com, _) = KzgBls12_381::commit(&srs.poly_ck, &prover_input.c_poly, None, None).unwrap();
let (phi_com, _) = KzgBls12_381::commit(&srs.poly_ck, &prover_input.phi_poly, None, None).unwrap();
let lookup_instance = LookupInstance{
c_com: c_com.0.clone(),
phi_com: phi_com.0.clone(),
};
let now = Instant::now();
let (proof, unity_proof) = compute_lookup_proof(&lookup_instance, &prover_input,&srs);
println!("Time to generate proof for m={:?} = {:?}", srs.m, now.elapsed());
let now = Instant::now();
let res=verify_lookup_proof(c_com.0, phi_com.0, &proof, &unity_proof, &srs);
println!("Time to verify proof for n={:?} = {:?}", srs.n, now.elapsed());
assert!(res);
println!("Lookup test succeeded");
}
}

View File

@@ -1,399 +0,0 @@
/*
This file includes the setup algorithm for Caulk with multi openings.
A full description of the setup is not formally given in the paper.
*/
use ark_poly_commit::kzg10::*;
use ark_ec::{bls12::Bls12, PairingEngine,AffineCurve,ProjectiveCurve};
use ark_poly::{ UVPolynomial, GeneralEvaluationDomain, Evaluations as EvaluationsOnDomain,
EvaluationDomain};
use ark_bls12_381::{Bls12_381, Fr, FrParameters,G1Affine, G2Affine};
use ark_ff::{Fp256, UniformRand};
use ark_serialize::{CanonicalSerialize, CanonicalDeserialize};
use crate::tools::{UniPoly381, KzgBls12_381};
use std::{time::{Instant}, fs::File, io::Read};
use ark_std::{One, Zero,cfg_into_iter};
// structure of public parameters
#[allow(non_snake_case)]
pub struct PublicParameters {
pub poly_ck: Powers<'static, Bls12<ark_bls12_381::Parameters> >,
pub poly_vk: VerifierKey<Bls12<ark_bls12_381::Parameters>>,
pub domain_m: GeneralEvaluationDomain<Fr>,
pub domain_n: GeneralEvaluationDomain<Fr>,
pub domain_N: GeneralEvaluationDomain<Fr>,
pub verifier_pp: VerifierPublicParameters,
pub lagrange_polynomials_n: Vec< UniPoly381>,
pub lagrange_polynomials_m: Vec< UniPoly381>,
pub id_poly: UniPoly381,
pub N: usize,
pub m: usize,
pub n: usize,
pub g2_powers: Vec<G2Affine>,
}
pub struct LookupParameters{
m: usize,
lagrange_polynomials_m: Vec< UniPoly381>,
domain_m: GeneralEvaluationDomain<Fr>,
id_poly: UniPoly381,
}
impl PublicParameters{
pub fn regenerate_lookup_params(&mut self, m: usize){
let lp = generate_lookup_params(m);
self.m = lp.m;
self.lagrange_polynomials_m = lp.lagrange_polynomials_m;
self.domain_m = lp.domain_m;
self.id_poly = lp.id_poly;
}
//store powers of g in a file
pub fn store(&self, path: &str) {
use std::io::Write;
//1. Powers of g
let mut g_bytes = vec![];
let mut f = File::create(path).expect("Unable to create file");
let deg: u32 = self.poly_ck.powers_of_g.len().try_into().unwrap();
let deg_bytes = deg.to_be_bytes();
f.write_all(&deg_bytes).expect("Unable to write data");
let deg32: usize = deg.try_into().unwrap();
for i in 0..deg32
{
self.poly_ck.powers_of_g[i].into_projective().into_affine().serialize_uncompressed(&mut g_bytes).ok();
}
f.write_all(&g_bytes).expect("Unable to write data");
//2. Powers of gammag
let deg_gamma: u32 = self.poly_ck.powers_of_gamma_g.len().try_into().unwrap();
let mut gg_bytes = vec![];
let deg_bytes = deg_gamma.to_be_bytes();
f.write_all(&deg_bytes).expect("Unable to write data");
let deg32: usize = deg.try_into().unwrap();
for i in 0..deg32
{
self.poly_ck.powers_of_gamma_g[i].into_projective().into_affine().serialize_uncompressed(&mut gg_bytes).ok();
}
f.write_all(&gg_bytes).expect("Unable to write data");
//3. Verifier key
let mut h_bytes = vec![];
self.poly_vk.h.serialize_uncompressed(&mut h_bytes).ok();
self.poly_vk.beta_h.serialize_uncompressed(&mut h_bytes).ok();
f.write_all(&h_bytes).expect("Unable to write data");
//4. g2 powers
let mut g2_bytes = vec![];
let deg2: u32 = self.g2_powers.len().try_into().unwrap();
let deg2_bytes = deg2.to_be_bytes();
f.write_all(&deg2_bytes).expect("Unable to write data");
let deg2_32: usize = deg2.try_into().unwrap();
for i in 0..deg2_32
{
self.g2_powers[i].into_projective().into_affine().serialize_uncompressed(&mut g2_bytes).ok();
}
f.write_all(&g2_bytes).expect("Unable to write data");
}
//load powers of g from a file
pub fn load(path: &str)
->(
Powers<'static, Bls12<ark_bls12_381::Parameters> >,
VerifierKey<Bls12<ark_bls12_381::Parameters>>,
Vec<G2Affine>
)
{
const G1_UNCOMPR_SIZE: usize =96;
const G2_UNCOMPR_SIZE: usize =192;
let mut data = Vec::new();
let mut f = File::open(path).expect("Unable to open file");
f.read_to_end(&mut data).expect("Unable to read data");
//1. reading g powers
let mut cur_counter:usize = 0;
let deg_bytes: [u8; 4] = (&data[0..4]).try_into().unwrap();
let deg: u32 = u32::from_be_bytes(deg_bytes);
let mut powers_of_g = vec![];
let deg32: usize = deg.try_into().unwrap();
cur_counter += 4;
for i in 0..deg32
{
let buf_bytes = &data[cur_counter+i*G1_UNCOMPR_SIZE..cur_counter+(i+1)*G1_UNCOMPR_SIZE];
let tmp = G1Affine::deserialize_unchecked(buf_bytes).unwrap();
powers_of_g.push(tmp);
}
cur_counter+=deg32*G1_UNCOMPR_SIZE;
//2. reading gamma g powers
let deg_bytes: [u8; 4] = (&data[cur_counter..cur_counter+4]).try_into().unwrap();
let deg: u32 = u32::from_be_bytes(deg_bytes);
let mut powers_of_gamma_g = vec![];
let deg32: usize = deg.try_into().unwrap();
cur_counter += 4;
for i in 0..deg32
{
let buf_bytes = &data[cur_counter+i*G1_UNCOMPR_SIZE..cur_counter+(i+1)*G1_UNCOMPR_SIZE];
let tmp = G1Affine::deserialize_unchecked(buf_bytes).unwrap();
powers_of_gamma_g.push(tmp);
}
cur_counter+=deg32*G1_UNCOMPR_SIZE;
//3. reading verifier key
let buf_bytes = &data[cur_counter..cur_counter+G2_UNCOMPR_SIZE];
let h = G2Affine::deserialize_unchecked(buf_bytes).unwrap();
cur_counter+= G2_UNCOMPR_SIZE;
let buf_bytes = &data[cur_counter..cur_counter+G2_UNCOMPR_SIZE];
let beta_h = G2Affine::deserialize_unchecked(buf_bytes).unwrap();
cur_counter+= G2_UNCOMPR_SIZE;
//4. reading G2 powers
let deg2_bytes: [u8; 4] = (&data[cur_counter..cur_counter+4]).try_into().unwrap();
let deg2: u32 = u32::from_be_bytes(deg2_bytes);
let mut g2_powers = vec![];
let deg2_32: usize = deg2.try_into().unwrap();
cur_counter += 4;
for _ in 0..deg2_32
{
let buf_bytes = &data[cur_counter ..cur_counter+G2_UNCOMPR_SIZE];
let tmp = G2Affine::deserialize_unchecked(buf_bytes).unwrap();
g2_powers.push(tmp);
cur_counter+=G2_UNCOMPR_SIZE;
}
let vk = VerifierKey {
g: powers_of_g[0].clone(),
gamma_g: powers_of_gamma_g[0].clone(),
h: h,
beta_h: beta_h,
prepared_h: h.into(),
prepared_beta_h: beta_h.into(),
};
let powers = Powers {
powers_of_g: ark_std::borrow::Cow::Owned(powers_of_g),
powers_of_gamma_g: ark_std::borrow::Cow::Owned(powers_of_gamma_g),
};
(powers, vk, g2_powers)
}
}
// smaller set of public parameters used by verifier
pub struct VerifierPublicParameters {
pub poly_vk: VerifierKey<Bls12<ark_bls12_381::Parameters>>,
pub domain_m_size: usize,
}
fn generate_lookup_params(m: usize)
->LookupParameters
{
let domain_m: GeneralEvaluationDomain<Fr> = GeneralEvaluationDomain::new( m.clone() ).unwrap();
// id_poly(X) = 1 for omega_m in range and 0 for omega_m not in range.
let mut id_vec = Vec::new();
for _ in 0..m.clone() {
id_vec.push( Fr::one() );
}
for _ in m.clone() .. domain_m.size() {
id_vec.push( Fr::zero() );
}
let id_poly = EvaluationsOnDomain::from_vec_and_domain(id_vec, domain_m).interpolate();
let mut lagrange_polynomials_m: Vec< UniPoly381 > = Vec::new();
for i in 0..domain_m.size() {
let evals: Vec<Fp256<FrParameters>> = cfg_into_iter!(0..domain_m.size())
.map(|k| {
if k == i { Fr::one() }
else { Fr::zero() }
}).collect();
lagrange_polynomials_m.push(EvaluationsOnDomain::from_vec_and_domain(evals, domain_m).interpolate());
}
return LookupParameters {
m: m,
lagrange_polynomials_m: lagrange_polynomials_m,
domain_m: domain_m,
id_poly: id_poly };
}
// Reduces full srs down to smaller srs for smaller polynomials
// Copied from arkworks library (where same function is private)
fn trim<E: PairingEngine, P: UVPolynomial<E::Fr>>(
srs: UniversalParams<E>,
mut supported_degree: usize,
) -> (Powers<'static, E>, VerifierKey<E>) {
if supported_degree == 1 {
supported_degree += 1;
}
let pp = srs.clone();
let powers_of_g = pp.powers_of_g[..=supported_degree].to_vec();
let powers_of_gamma_g = (0..=supported_degree)
.map(|i| pp.powers_of_gamma_g[&i])
.collect();
let powers = Powers {
powers_of_g: ark_std::borrow::Cow::Owned(powers_of_g),
powers_of_gamma_g: ark_std::borrow::Cow::Owned(powers_of_gamma_g),
};
let vk = VerifierKey {
g: pp.powers_of_g[0],
gamma_g: pp.powers_of_gamma_g[&0],
h: pp.h,
beta_h: pp.beta_h,
prepared_h: pp.prepared_h.clone(),
prepared_beta_h: pp.prepared_beta_h.clone(),
};
(powers, vk)
}
// setup algorithm for index_hiding_polycommit
// also includes a bunch of precomputation.
// @max_degree max degree of table polynomial C(X), also the size of the trusted setup
// @N domain size on which proofs are constructed. Should not be smaller than max_degree
// @m lookup size. Can be changed later
// @n suppl domain for the unity proofs. Should be at least 6+log N
#[allow(non_snake_case)]
pub fn setup_multi_lookup(max_degree: &usize, N: &usize, m: &usize, n: &usize) -> PublicParameters
{
let rng = &mut ark_std::test_rng();
// Setup algorithm. To be replaced by output of a universal setup before being production ready.
//let mut srs = KzgBls12_381::setup(4, true, rng).unwrap();
let poly_ck: Powers<'static, Bls12<ark_bls12_381::Parameters> >;
let poly_vk: VerifierKey<Bls12<ark_bls12_381::Parameters>>;
let mut g2_powers: Vec<G2Affine>=Vec::new();
//try opening the file. If it exists load the setup from there, otherwise generate
let path=format!("srs/srs_{}.setup",max_degree);
let res = File::open(path.clone());
let store_to_file:bool;
match res{
Ok(_)=>{
let now = Instant::now();
let (_poly_ck, _poly_vk, _g2_powers) = PublicParameters::load(&path);
println!("time to load powers = {:?}", now.elapsed());
store_to_file = false;
g2_powers = _g2_powers;
poly_ck = _poly_ck;
poly_vk = _poly_vk;
}
Err(_)=>{
let now = Instant::now();
let srs = KzgBls12_381::setup(max_degree.clone(), true, rng).unwrap();
println!("time to setup powers = {:?}", now.elapsed());
// trim down to size
let (poly_ck2, poly_vk2) = trim::<Bls12_381, UniPoly381>(srs, max_degree.clone());
poly_ck = Powers {
powers_of_g: ark_std::borrow::Cow::Owned(poly_ck2.powers_of_g.into()),
powers_of_gamma_g: ark_std::borrow::Cow::Owned(poly_ck2.powers_of_gamma_g.into()),
};
poly_vk = poly_vk2;
// need some powers of g2
// arkworks setup doesn't give these powers but the setup does use a fixed randomness to generate them.
// so we can generate powers of g2 directly.
let rng = &mut ark_std::test_rng();
let beta: Fp256<FrParameters> = Fr::rand(rng);
let mut temp = poly_vk.h.clone();
for _ in 0..poly_ck.powers_of_g.len() {
g2_powers.push( temp.clone() );
temp = temp.mul( beta ).into_affine();
}
store_to_file = true;
}
}
// domain where openings {w_i}_{i in I} are embedded
let domain_n: GeneralEvaluationDomain<Fr> = GeneralEvaluationDomain::new( n.clone() ).unwrap();
let domain_N: GeneralEvaluationDomain<Fr> = GeneralEvaluationDomain::new( N.clone() ).unwrap();
// precomputation to speed up prover
// lagrange_polynomials[i] = polynomial equal to 0 at w^j for j!= i and 1 at w^i
let mut lagrange_polynomials_n: Vec< UniPoly381 > = Vec::new();
for i in 0..domain_n.size() {
let evals: Vec<Fp256<FrParameters>> = cfg_into_iter!(0..domain_n.size())
.map(|k| {
if k == i { Fr::one() }
else { Fr::zero() }
}).collect();
lagrange_polynomials_n.push(EvaluationsOnDomain::from_vec_and_domain(evals, domain_n).interpolate());
}
let lp = generate_lookup_params(m.clone());
let verifier_pp = VerifierPublicParameters {
poly_vk: poly_vk.clone(),
domain_m_size: lp.domain_m.size(),
};
let pp = PublicParameters {
poly_ck: poly_ck,
domain_m: lp.domain_m,
domain_n: domain_n,
lagrange_polynomials_n: lagrange_polynomials_n,
lagrange_polynomials_m: lp.lagrange_polynomials_m,
id_poly: lp.id_poly,
domain_N: domain_N,
poly_vk: poly_vk,
verifier_pp: verifier_pp,
N: N.clone(),
n: n.clone(),
m: lp.m.clone(),
g2_powers: g2_powers.clone()
};
if store_to_file
{
pp.store(&path);
}
return pp
}
#[test]
#[allow(non_snake_case)]
pub fn test_load()
{
let n: usize = 4;
let N: usize = 1<<n;
let powers_size: usize = 4*N; //SRS SIZE
let temp_m = n; //dummy
let pp =setup_multi_lookup(&powers_size,&N,&temp_m,&n);
let path = "powers.log";
pp.store(path);
let loaded = PublicParameters::load(path);
assert_eq!(pp.poly_ck.powers_of_g,loaded.0.powers_of_g);
assert_eq!(pp.poly_ck.powers_of_gamma_g,loaded.0.powers_of_gamma_g);
assert_eq!(pp.poly_vk.h,loaded.1.h);
assert_eq!(pp.poly_vk.beta_h,loaded.1.beta_h);
assert_eq!(pp.g2_powers,loaded.2);
std::fs::remove_file(&path).expect("File can not be deleted");
}

View File

@@ -1,444 +0,0 @@
/*
This file includes the Caulk's unity prover and verifier for multi openings.
The protocol is described in Figure 4.
*/
use ark_bls12_381::{G1Affine, FrParameters, Fr};
use ark_ff::{Fp256};
use ark_poly::{EvaluationDomain, UVPolynomial, Evaluations as EvaluationsOnDomain,
univariate::DensePolynomial, Polynomial};
use ark_std::{Zero, One};
use ark_ec::{msm::{VariableBaseMSM}, ProjectiveCurve, AffineCurve};
use crate::caulk_multi_setup::{PublicParameters};
use crate::tools::{UniPoly381, bipoly_commit, hash_caulk_multi, convert_to_bigints,
kzg_open_g1_native, kzg_verify_g1_native, kzg_partial_open_g1_native, kzg_partial_verify_g1_native};
// output structure of prove_unity
pub struct ProofMultiUnity {
pub g1_u_bar: G1Affine,
pub g1_h_1: G1Affine,
pub g1_h_2: G1Affine,
pub g1_u_bar_alpha: G1Affine,
pub g1_h_2_alpha: G1Affine,
pub v1: Fr,
pub v2: Fr,
pub v3: Fr,
pub pi_1: G1Affine,
pub pi_2: G1Affine,
pub pi_3: G1Affine,
pub pi_4: G1Affine,
pub pi_5: G1Affine,
}
// Prove knowledge of vec_u_evals such that g1_u = g1^(sum_j u_j mu_j(x)) and u_j^N = 1
#[allow(non_snake_case)]
pub fn prove_multiunity(
pp: &PublicParameters,
hash_input: &mut Fr,
g1_u: &G1Affine,
mut vec_u_evals: Vec<Fp256<FrParameters>>,
u_poly_quotient: UniPoly381) -> ProofMultiUnity
{
// The test_rng is deterministic. Should be replaced with actual random generator.
let rng_arkworks = &mut ark_std::test_rng();
// let rng_arkworks = &mut ark_std::test_rng();
let n = pp.n;
let deg_blinders = 11 / n ;
let z_Vm: UniPoly381 = pp.domain_m.vanishing_polynomial().into();
//////////////////////////////////////////////////////////////////////////////////////////////////////////
// 1. Compute polynomials u_s(X) = vec_u_polys[s] such that u_s( nu_i ) = w_i^{2^s}
//////////////////////////////////////////////////////////////////////////////////////////////////////////
let mut vec_u_polys: Vec<UniPoly381> = Vec::new();
vec_u_polys.push(
EvaluationsOnDomain::from_vec_and_domain(vec_u_evals.clone(), pp.domain_m).interpolate()
+ (&z_Vm * &u_poly_quotient) );
for _ in 1..pp.domain_n.size() {
for i in 0..vec_u_evals.len() {
vec_u_evals[i] = vec_u_evals[i] * vec_u_evals[i].clone();
}
vec_u_polys.push(
EvaluationsOnDomain::from_vec_and_domain(vec_u_evals.clone(), pp.domain_m).interpolate()
+ (&z_Vm * &UniPoly381::rand(deg_blinders, rng_arkworks)) );
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////
// 2. Compute U_bar(X,Y) = sum_{s= 1}^n u_{s-1} rho_s(Y)
//////////////////////////////////////////////////////////////////////////////////////////////////////////
// bivariate polynomials such that bipoly_U_bar[j] = a_j(Y) where U_bar(X,Y) = sum_j X^j a_j(Y)
let mut bipoly_U_bar = Vec::new();
// vec_u_polys[0] has an extended degree because it is blinded so use vec_u_polys[1] for the length
for j in 0..vec_u_polys[1].len() {
/*
Denoting u_{s-1}(X) = sum_j u_{s-1, j} X^j then
temp is a_j(Y) = sum_{s=1}^n u_{s-1, j} * rho_s(Y)
*/
let mut temp = DensePolynomial::from_coefficients_slice(&[Fr::zero()]);
for s in 1..n {
let u_s_j = DensePolynomial::from_coefficients_slice( &[vec_u_polys[s][j]] );
temp = &temp + &(&u_s_j * &pp.lagrange_polynomials_n[s]);
}
// add a_j(X) to U_bar(X,Y)
bipoly_U_bar.push( temp);
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////
// 3. Hs(X) = u_{s-1}^2(X) - u_s(X)
//////////////////////////////////////////////////////////////////////////////////////////////////////////
// id_poly(X) = 1 for omega_m in range and 0 for omega_m not in range.
let id_poly = pp.id_poly.clone();
// Hs(X) = (u_{s-1}^2(X) - u_s(X)) / zVm(X). Abort if doesn't divide.
let mut vec_H_s_polys: Vec<DensePolynomial<Fr>> = Vec::new();
for s in 1..n {
let (poly_H_s, remainder) = ( &( &vec_u_polys[s-1] * &vec_u_polys[s-1] ) - &vec_u_polys[s] ).divide_by_vanishing_poly(pp.domain_m).unwrap();
assert!(remainder.is_zero());
vec_H_s_polys.push(poly_H_s);
}
// Hn(X) = u_{n-1}^2(X) - id(X) / zVm(X). Abort if doesn't divide.
let (poly_H_s, remainder) = ( &( &vec_u_polys[n-1] * &vec_u_polys[n-1] ) - &id_poly ).divide_by_vanishing_poly(pp.domain_m).unwrap();
assert!(remainder.is_zero());
vec_H_s_polys.push(poly_H_s);
//////////////////////////////////////////////////////////////////////////////////////////////////////////
// 4. h_2(X,Y) = sum_{s=1}^n rho_s(Y) H_s(X)
//////////////////////////////////////////////////////////////////////////////////////////////////////////
// h_2[j] = a_j(Y) where h_2(X,Y) = sum_j X^j a_j(Y)
let mut bipoly_h_2 = Vec::new();
// first add H_1(X) rho_1(Y)
for j in 0..vec_H_s_polys[0].len() {
let h_0_j = DensePolynomial::from_coefficients_slice( &[vec_H_s_polys[0][j]] );
bipoly_h_2.push( &h_0_j * &pp.lagrange_polynomials_n[0]);
}
// In case length of H_1(X) and H_2(X) is different pad with zeros.
for _ in vec_H_s_polys[0].len()..vec_H_s_polys[1].len() {
let h_0_j = DensePolynomial::from_coefficients_slice( &[Fr::zero()] );
bipoly_h_2.push( h_0_j );
}
// h_2(X,Y) = sum_j ( sum_s H_{s,j} * rho_s(Y) ) X^j
for j in 0..vec_H_s_polys[1].len() {
// h_2[j] = sum_s H_{s,j} * rho_s(Y)
for s in 1..n {
let h_s_j = DensePolynomial::from_coefficients_slice( &[vec_H_s_polys[s][j]] );
// h_2[j] += H_{s,j} * rho_s(Y)
bipoly_h_2[j] = &bipoly_h_2[j] + &(&h_s_j * &pp.lagrange_polynomials_n[s]);
}
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////
// 5. Commit to U_bar(X^n, X) and h_2(X^n, X)
//////////////////////////////////////////////////////////////////////////////////////////////////////////
let g1_u_bar = bipoly_commit( pp, &bipoly_U_bar, pp.domain_n.size());
let g1_h_2 = bipoly_commit( pp, &bipoly_h_2, pp.domain_n.size());
////////////////////////////
// 6. alpha = Hash(g1_u, g1_u_bar, g1_h_2)
////////////////////////////
let alpha = hash_caulk_multi::<Fr>(
hash_input.clone(),
Some(& [ &g1_u, &g1_u_bar, &g1_h_2 ].to_vec() ),
None, None );
*hash_input = alpha.clone();
//////////////////////////////////////////////////////////////////////////////////////////////////////////
// 7. Compute h_1(Y)
//////////////////////////////////////////////////////////////////////////////////////////////////////////
// poly_U_alpha = sum_{s=1}^n u_{s-1}(alpha) rho_s(Y)
let mut poly_U_alpha = DensePolynomial::from_coefficients_slice(&[Fr::zero()]);
// poly_Usq_alpha = sum_{s=1}^n u_{s-1}^2(alpha) rho_s(Y)
let mut poly_Usq_alpha = DensePolynomial::from_coefficients_slice(&[Fr::zero()]);
for s in 0..n {
let u_s_alpha = vec_u_polys[s].evaluate(&alpha);
let mut temp = DensePolynomial::from_coefficients_slice( &[ u_s_alpha ] );
poly_U_alpha = &poly_U_alpha + &(&temp * &pp.lagrange_polynomials_n[s]);
temp = DensePolynomial::from_coefficients_slice( &[ u_s_alpha.clone() * &u_s_alpha ] );
poly_Usq_alpha = &poly_Usq_alpha + &(&temp * &pp.lagrange_polynomials_n[s]);
}
// divide h1(Y) = [ U^2(alpha,Y) - sum_{s=1}^n u_{s-1}^2(alpha) rho_s(Y) ) ] / zVn(Y)
// return an error if division fails
let (poly_h_1, remainder) = ( &(&poly_U_alpha * &poly_U_alpha) - &poly_Usq_alpha).divide_by_vanishing_poly(pp.domain_n).unwrap();
assert!(remainder.is_zero(), "poly_h_1 does not divide");
//////////////////////////////////////////////////////////////////////////////////////////////////////////
// 8. Commit to h_1(Y)
//////////////////////////////////////////////////////////////////////////////////////////////////////////
assert!( pp.poly_ck.powers_of_g.len() >= poly_h_1.len() );
let g1_h_1 = VariableBaseMSM::multi_scalar_mul(&pp.poly_ck.powers_of_g, convert_to_bigints(&poly_h_1.coeffs).as_slice()).into_affine();
////////////////////////////
//9. beta = Hash( g1_h_1 )
////////////////////////////
let beta = hash_caulk_multi::<Fr>(
hash_input.clone(),
Some(& [ &g1_h_1 ].to_vec() ),
None, None );
*hash_input = beta.clone();
//////////////////////////////////////////////////////////////////////////////////////////////////////////
// 10. Compute p(Y) = (U^2(alpha, beta) - h1(Y) zVn(beta) ) - (u_bar(alpha, beta sigma^(-1)) + id(alpha) rho_n(Y)) - zVm(alpha )h2(alpha,Y)
//////////////////////////////////////////////////////////////////////////////////////////////////////////
// p(Y) = U^2(alpha, beta)
let u_alpha_beta = poly_U_alpha.evaluate( &beta );
let mut poly_p = DensePolynomial::from_coefficients_slice( &[ u_alpha_beta.clone() * &u_alpha_beta ] );
////////////////////////////
// p(Y) = p(Y) - ( u_bar(alpha, beta sigma) + id(alpha) rho_n(beta))
// u_bar_alpha_shiftbeta = u_bar(alpha, beta sigma)
let mut u_bar_alpha_shiftbeta = Fr::zero();
let beta_shift = beta * &pp.domain_n.element(1);
for s in 1..n {
let u_s_alpha = vec_u_polys[s].evaluate(&alpha);
u_bar_alpha_shiftbeta = u_bar_alpha_shiftbeta + &(u_s_alpha * &pp.lagrange_polynomials_n[s].evaluate(&beta_shift));
}
// temp = u_bar(alpha, beta sigma) + id(alpha) rho_n(beta)
let temp = u_bar_alpha_shiftbeta + &(id_poly.evaluate(&alpha) * &pp.lagrange_polynomials_n[n-1].evaluate(&beta));
let temp = DensePolynomial::from_coefficients_slice( &[ temp ] );
poly_p = &poly_p - &temp;
////////////////////////////
// p(Y) = p(Y) - h1(Y) zVn(beta)
let z_Vn: UniPoly381 = pp.domain_n.vanishing_polynomial().into();
let temp = &DensePolynomial::from_coefficients_slice( &[ z_Vn.evaluate(&beta) ] ) * &poly_h_1;
poly_p = &poly_p - &temp;
////////////////////////////
// p(Y) = p(Y) - z_Vm(alpha) h_2(alpha, Y)
// poly_h_2_alpha = h_2(alpha, Y)
let mut poly_h_2_alpha = DensePolynomial::from_coefficients_slice(&[Fr::zero()]);
for s in 0..vec_H_s_polys.len() {
let h_s_j = DensePolynomial::from_coefficients_slice( &[vec_H_s_polys[s].evaluate(&alpha)] );
poly_h_2_alpha = &poly_h_2_alpha + &(&h_s_j * &pp.lagrange_polynomials_n[s]);
}
let temp = &DensePolynomial::from_coefficients_slice( &[ z_Vm.evaluate(&alpha) ] ) * &poly_h_2_alpha;
poly_p = &poly_p - &temp;
// check p(beta) = 0
assert!(poly_p.evaluate(&beta) == Fr::zero());
//////////////////////////////////////////////////////////////////////////////////////////////////////////
// 11. Open KZG commitments
//////////////////////////////////////////////////////////////////////////////////////////////////////////
// KZG.Open( srs, u(X), deg = bot, X = alpha )
let (evals_1, pi_1) = kzg_open_g1_native( &pp.poly_ck, &vec_u_polys[0], None, [&alpha].to_vec());
// KZG.Open( srs, U_bar(X,Y), deg = bot, X = alpha )
let (g1_u_bar_alpha, pi_2, poly_u_bar_alpha) = kzg_partial_open_g1_native( &pp, &bipoly_U_bar, pp.domain_n.size(), &alpha);
// KZG.Open( srs, h_2(X,Y), deg = bot, X = alpha )
let (g1_h_2_alpha, pi_3, _) = kzg_partial_open_g1_native( &pp, &bipoly_h_2, pp.domain_n.size(), &alpha);
// KZG.Open( srs, U_bar(alpha,Y), deg = bot, Y = [1, beta, beta * sigma] ) should evaluate to (0, v2, v3)
let (evals_2, pi_4) = kzg_open_g1_native( &pp.poly_ck, &poly_u_bar_alpha, Some(&(pp.domain_n.size()-1)), [ &Fr::one(), &beta, &(beta * &pp.domain_n.element(1))].to_vec() );
assert!( evals_2[0] == Fr::zero() );
// KZG.Open(srs, p(Y), deg = n-1, Y = beta)
let (evals_3, pi_5) = kzg_open_g1_native( &pp.poly_ck, &poly_p, Some(&(pp.domain_n.size()-1)), [&beta].to_vec());
assert!( evals_3[0] == Fr::zero() );
let proof = ProofMultiUnity {
g1_u_bar: g1_u_bar,
g1_h_1: g1_h_1,
g1_h_2: g1_h_2,
g1_u_bar_alpha: g1_u_bar_alpha,
g1_h_2_alpha: g1_h_2_alpha,
v1: evals_1[0],
v2: evals_2[1],
v3: evals_2[2],
pi_1: pi_1,
pi_2: pi_2,
pi_3: pi_3,
pi_4: pi_4,
pi_5: pi_5,
};
proof
}
// Verify that the prover knows vec_u_evals such that g1_u = g1^(sum_j u_j mu_j(x)) and u_j^N = 1
#[allow(non_snake_case)]
pub fn verify_multiunity(pp: &PublicParameters, hash_input: &mut Fr,
g1_u: G1Affine, pi_unity: &ProofMultiUnity
) -> bool {
////////////////////////////
// alpha = Hash(g1_u, g1_u_bar, g1_h_2)
////////////////////////////
let alpha = hash_caulk_multi::<Fr>(
hash_input.clone(),
Some(& [ &g1_u, &pi_unity.g1_u_bar, &pi_unity.g1_h_2 ].to_vec() ),
None, None );
*hash_input = alpha.clone();
////////////////////////////
// beta = Hash( g1_h_1 )
////////////////////////////
let beta = hash_caulk_multi::<Fr>(
hash_input.clone(),
Some(& [ &pi_unity.g1_h_1 ].to_vec() ),
None, None );
*hash_input = beta.clone();
/////////////////////////////
// Compute [P]_1
////////////////////////////
let u_alpha_beta = pi_unity.v1 * &pp.lagrange_polynomials_n[0].evaluate( &beta ) + &pi_unity.v2;
// g1_P = [ U^2 - (v3 + id(alpha)* pn(beta) )]_1
let mut g1_P = pp.poly_ck.powers_of_g[0].mul( u_alpha_beta * &u_alpha_beta
- &(pi_unity.v3
+ &(pp.id_poly.evaluate( &alpha ) * &pp.lagrange_polynomials_n[pp.n - 1].evaluate( &beta )
) ) );
// g1_P = g1_P - h1 zVn(beta)
let zVn = pp.domain_n.vanishing_polynomial();
g1_P = g1_P - &(pi_unity.g1_h_1.mul( zVn.evaluate(&beta)) ) ;
// g1_P = g1_P - h2_alpha zVm(alpha)
let zVm = pp.domain_m.vanishing_polynomial();
g1_P = g1_P - &(pi_unity.g1_h_2_alpha.mul( zVm.evaluate(&alpha)) ) ;
/////////////////////////////
// Check the KZG openings
////////////////////////////
let check1 = kzg_verify_g1_native( &pp, g1_u.clone(), None, [alpha].to_vec(), [pi_unity.v1].to_vec(), pi_unity.pi_1 );
let check2 = kzg_partial_verify_g1_native( &pp, pi_unity.g1_u_bar, pp.domain_n.size(), alpha, pi_unity.g1_u_bar_alpha, pi_unity.pi_2 );
let check3 = kzg_partial_verify_g1_native( &pp, pi_unity.g1_h_2, pp.domain_n.size(), alpha, pi_unity.g1_h_2_alpha, pi_unity.pi_3 );
let check4 = kzg_verify_g1_native( &pp,
pi_unity.g1_u_bar_alpha,
Some( &(pp.domain_n.size() - 1) ),
[Fr::one(), beta, beta * &pp.domain_n.element(1)].to_vec(),
[Fr::zero(),pi_unity.v2, pi_unity.v3].to_vec(),
pi_unity.pi_4 );
let check5 = kzg_verify_g1_native( &pp, g1_P.into_affine(), Some( &(pp.domain_n.size() - 1) ), [beta].to_vec(), [Fr::zero()].to_vec(), pi_unity.pi_5 );
return check1 && check2 && check3 && check4 && check5
}
#[cfg(test)]
pub mod tests {
use std::time::{Instant};
use crate::caulk_multi_setup::{setup_multi_lookup};
use crate::caulk_multi_unity::{prove_multiunity,verify_multiunity};
use crate::tools::{UniPoly381,convert_to_bigints};
use rand::Rng;
use ark_poly::{EvaluationDomain,Evaluations as EvaluationsOnDomain,UVPolynomial};
use ark_ff::Fp256;
use ark_bls12_381::{ FrParameters};
use ark_ec::{msm::{VariableBaseMSM}, ProjectiveCurve};
//#[test]
#[allow(non_snake_case)]
#[test]
pub fn test_unity() {
let mut rng = rand::thread_rng();
let n: usize =8;//bitlength of poly degree
let max_degree: usize = (1<<n) +2;
let N: usize = (1<<n)-1;
let m_bitsize: usize = 3;
let m: usize = (1<<m_bitsize)-1;
// run the setup
let now = Instant::now();
let pp = setup_multi_lookup(&max_degree, &N, &m, &n);
println!("time to setup single openings of table size {:?} = {:?}", N + 1, now.elapsed());
////////////////////////////////////////////////////////////////////////////////////
// generating values for testing
////////////////////////////////////////////////////////////////////////////////////
// choose [u1, ..., um] such that uj**N = 1
let mut vec_u_evals: Vec<Fp256<FrParameters>> = Vec::new();
for _ in 0..m {
let j = rng.gen_range(0,pp.domain_N.size());
vec_u_evals.push( pp.domain_N.element(j) );
}
// choose random quotient polynomial of degree 1.
let rng_arkworks = &mut ark_std::test_rng();
let u_poly_quotient = UniPoly381::rand(5, rng_arkworks);
// X^m - 1
let z_Vm: UniPoly381 = pp.domain_m.vanishing_polynomial().into();
//commit to polynomial u(X) = sum_j uj muj(X) + u_quotient(X) z_Vm(X)
let u_poly = &EvaluationsOnDomain::from_vec_and_domain(vec_u_evals.clone(), pp.domain_m)
.interpolate() + &(&u_poly_quotient * &z_Vm);
assert!( pp.poly_ck.powers_of_g.len() >= u_poly.len() );
let g1_u = VariableBaseMSM::multi_scalar_mul(&pp.poly_ck.powers_of_g, convert_to_bigints(&u_poly.coeffs).as_slice()).into_affine();
////////////////////////////////////////////////////////////////////////////////////
// run the prover
////////////////////////////////////////////////////////////////////////////////////
let pi_unity = prove_multiunity( &pp, &g1_u, vec_u_evals.clone(), u_poly_quotient );
////////////////////////////////////////////////////////////////////////////////////
// run the verifier
////////////////////////////////////////////////////////////////////////////////////
println!( "unity proof verifies {:?}", verify_multiunity( &pp, g1_u, pi_unity ) );
}
}

View File

@@ -1,120 +0,0 @@
mod caulk_multi_setup;
mod caulk_multi_unity;
mod tools;
mod caulk_multi_lookup;
mod multiopen;
use crate::tools::{read_line, KzgBls12_381, random_field};
use crate::caulk_multi_setup::setup_multi_lookup;
use crate::caulk_multi_lookup::{LookupProverInput, LookupInstance,
get_poly_and_g2_openings,
compute_lookup_proof, verify_lookup_proof};
use ark_poly_commit::{Polynomial, UVPolynomial};
use ark_bls12_381::{Fr, FrParameters};
use ark_ff::Fp256;
use ark_std::time::Instant;
use ark_poly::{EvaluationDomain, univariate::DensePolynomial};
use std::cmp::max;
use rand::Rng;
#[allow(non_snake_case)]
fn main() {
//1. Setup
// setting public parameters
// current kzg setup should be changed with output from a setup ceremony
println!("What is the bitsize of the degree of the polynomial inside the commitment? ");
let n: usize = read_line();
println!("How many positions m do you want to open the polynomial at? ");
let m: usize = read_line();
let N: usize = 1 << n;
let powers_size: usize = max( N + 2, 1024 ) ;
let actual_degree = N - 1;
let temp_m = n; //dummy
let now = Instant::now();
let mut pp =setup_multi_lookup(&powers_size, &N, &temp_m, &n);
println!("Time to setup multi openings of table size {:?} = {:?}", actual_degree + 1, now.elapsed());
//2. Poly and openings
let now = Instant::now();
let table=get_poly_and_g2_openings(&pp, actual_degree);
println!("Time to generate commitment table = {:?}", now.elapsed());
//3. Setup
pp.regenerate_lookup_params(m);
//4. Positions
let mut rng = rand::thread_rng();
let mut positions: Vec<usize> = vec![];
for _ in 0..m { //generate positions randomly in the set
//let i_j: usize = j*(actual_degree/m);
let i_j: usize = rng.gen_range(0,actual_degree);
positions.push(i_j);
};
println!("positions = {:?}", positions);
//5. generating phi
let blinder: Fp256<FrParameters> = random_field::<Fr>();
let a_m = DensePolynomial::from_coefficients_slice(&[blinder]);
let mut phi_poly = a_m.mul_by_vanishing_poly(pp.domain_m);
let c_poly_local = table.c_poly.clone();
for j in 0..m
{
phi_poly = &phi_poly +
&(&pp.lagrange_polynomials_m[j]
* c_poly_local.evaluate(&pp.domain_N.element(positions[j]))); //adding c(w^{i_j})*mu_j(X)
}
for j in m..pp.domain_m.size()
{
phi_poly = &phi_poly +
&(&pp.lagrange_polynomials_m[j]
* c_poly_local.evaluate( &pp.domain_N.element(0) ) ); //adding c(w^{i_j})*mu_j(X)
}
//6. Running proofs
let now = Instant::now();
let (c_com, _) = KzgBls12_381::commit(&pp.poly_ck, &table.c_poly, None, None).unwrap();
let (phi_com, _) = KzgBls12_381::commit(&pp.poly_ck, &phi_poly, None, None).unwrap();
println!("Time to generate inputs = {:?}", now.elapsed());
let lookup_instance = LookupInstance{
c_com: c_com.0.clone(),
phi_com: phi_com.0.clone(),
};
let prover_input = LookupProverInput{
c_poly: table.c_poly.clone(),
phi_poly:phi_poly,
positions: positions,
openings: table.openings.clone()};
println!("We are now ready to run the prover. How many times should we run it?" );
let number_of_openings: usize = read_line();
let now = Instant::now();
let (proof, unity_proof) = compute_lookup_proof(&lookup_instance, &prover_input,&pp);
for _ in 1..number_of_openings {
_ = compute_lookup_proof(&lookup_instance, &prover_input,&pp);
}
println!("Time to evaluate {} times {} multi-openings of table size {:?} = {:?} ", number_of_openings, m, N, now.elapsed());
let now = Instant::now();
for _ in 0..number_of_openings {
verify_lookup_proof(table.c_com, phi_com.0, &proof, &unity_proof, &pp);
}
println!("Time to verify {} times {} multi-openings of table size {:?} = {:?} ", number_of_openings, m, N, now.elapsed());
assert!(verify_lookup_proof(table.c_com, phi_com.0, &proof, &unity_proof, &pp), "Result does not verify");
}

View File

@@ -1,449 +0,0 @@
/*
This file includes an algorithm for calculating n openings of a KZG vector commitment of size n in n log(n) time.
The algorithm is by Feist and khovratovich.
It is useful for preprocessing.
The full algorithm is described here https://github.com/khovratovich/Kate/blob/master/Kate_amortized.pdf
*/
use std::str::FromStr;
//use std::time::{Instant};
use std::vec::Vec;
use ark_ff::{PrimeField, Fp256, Field};
use ark_poly::{univariate::DensePolynomial,EvaluationDomain, GeneralEvaluationDomain, UVPolynomial};
use ark_ec::{AffineCurve,ProjectiveCurve};
use ark_bls12_381::{Fr,FrParameters, G2Affine,G2Projective};
pub fn compute_h_opt_g2(
c_poly: &DensePolynomial<Fp256<FrParameters>>, //c(X) degree up to d<2^p , i.e. c_poly has at most d+1 coeffs non-zero
g2powers: &Vec<G2Affine>, //SRS
p: usize
)->Vec<G2Projective>
{
let mut coeffs = c_poly.coeffs().to_vec();
let dom_size = 1<<p;
let fpzero = Fp256::from_str("0").unwrap();
coeffs.resize(dom_size,fpzero);
//let now = Instant::now();
//1. x_ext = [[x^(d-1)], [x^{d-2},...,[x],[1], d+2 [0]'s]
let mut x_ext = vec![];
for i in 0..=dom_size-2{
x_ext.push( g2powers[dom_size-2-i].into_projective());
}
let g1inf = g2powers[0].mul(fpzero);
x_ext.resize(2*dom_size,g1inf); //filling 2d+2 neutral elements
let y = dft_g2_opt(&x_ext, p+1);
//println!("Step 1 computed in {:?}", now.elapsed());
//2. c_ext = [c_d, d zeroes, c_d,c_{0},c_1,...,c_{d-2},c_{d-1}]
//let now = Instant::now();
let mut c_ext = vec![];
c_ext.push(coeffs[coeffs.len()-1]);
c_ext.resize(dom_size,fpzero);
c_ext.push(coeffs[coeffs.len()-1]);
for i in 0..coeffs.len()-1{
c_ext.push(coeffs[i]);
}
assert_eq!(c_ext.len(),2*dom_size);
let v = dft_opt(&c_ext, p+1);
//println!("Step 2 computed in {:?}", now.elapsed());
//3. u = y o v
//let now = Instant::now();
let u = y.into_iter()
.zip(v.into_iter())
.map(|(a,b)|{a.mul(b.into_repr())})
.collect();
// println!("Step 3 computed in {:?}", now.elapsed());
//4. h_ext = idft_{2d+2}(u)
//let now = Instant::now();
let h_ext = idft_g2_opt(&u, p+1);
//println!("Step 4 computed in {:?}", now.elapsed());
return h_ext[0..dom_size].to_vec();
}
//compute dft of size @dom_size over vector of G1 elements
//q_i = h_0 + h_1w^i + h_2w^{2i}+\cdots + h_{dom_size-1}w^{(dom_size-1)i} for 0<= i< dom_size=2^p
pub fn dft_g2_opt(
h: &Vec<G2Projective>,
p: usize
)->Vec<G2Projective>
{
let dom_size = 1<<p;
assert_eq!(h.len(),dom_size); //we do not support inputs of size not power of 2
let input_domain: GeneralEvaluationDomain<Fr> = EvaluationDomain::new(dom_size).unwrap();
let mut l = dom_size/2;
let mut m: usize=1;
//Stockham FFT
let mut xprev = h.to_vec();
for _ in 1..=p{
let mut xnext= vec![];
xnext.resize(xprev.len(),h[0]);
for j in 0..l{
for k in 0..m{
let c0 = xprev[k+j*m].clone();
let c1 = &xprev[k+j*m+l*m];
xnext[k+2*j*m] = c0+c1;
let wj_2l=input_domain.element((j*dom_size/(2*l))%dom_size);
xnext[k+2*j*m+m]= (c0-c1).mul(wj_2l.into_repr());
}
}
l = l/2;
m = m*2;
xprev = xnext;
}
return xprev;
}
//compute dft of size @dom_size over vector of Fr elements
//q_i = h_0 + h_1w^i + h_2w^{2i}+\cdots + h_{dom_size-1}w^{(dom_size-1)i} for 0<= i< dom_size=2^p
pub fn dft_opt(
h: &Vec<Fr>,
p: usize
)->Vec<Fr>
{
let dom_size = 1<<p;
assert_eq!(h.len(),dom_size); //we do not support inputs of size not power of 2
let input_domain: GeneralEvaluationDomain<Fr> = EvaluationDomain::new(dom_size).unwrap();
let mut l = dom_size/2;
let mut m: usize=1;
//Stockham FFT
let mut xprev = h.to_vec();
for _ in 1..=p{
let mut xnext= vec![];
xnext.resize(xprev.len(),h[0]);
for j in 0..l{
for k in 0..m{
let c0 = xprev[k+j*m].clone();
let c1 = &xprev[k+j*m+l*m];
xnext[k+2*j*m] = c0+c1;
let wj_2l=input_domain.element((j*dom_size/(2*l))%dom_size);
xnext[k+2*j*m+m]= (c0-c1)*(wj_2l);
}
}
l = l/2;
m = m*2;
xprev = xnext;
}
return xprev;
}
//compute idft of size @dom_size over vector of G1 elements
//q_i = (h_0 + h_1w^-i + h_2w^{-2i}+\cdots + h_{dom_size-1}w^{-(dom_size-1)i})/dom_size for 0<= i< dom_size=2^p
pub fn idft_g2_opt(
h: &Vec<G2Projective>,
p: usize
)->Vec<G2Projective>
{
let dom_size = 1<<p;
assert_eq!(h.len(),dom_size); //we do not support inputs of size not power of 2
let input_domain: GeneralEvaluationDomain<Fr> = EvaluationDomain::new(dom_size).unwrap();
let mut l = dom_size/2;
let mut m: usize=1;
let mut dom_fr = Fr::from_str("1").unwrap();
//Stockham FFT
let mut xprev = h.to_vec();
for _ in 1..=p{
let mut xnext= vec![];
xnext.resize(xprev.len(),h[0]);
for j in 0..l{
for k in 0..m{
let c0 = xprev[k+j*m].clone();
let c1 = &xprev[k+j*m+l*m];
xnext[k+2*j*m] = c0+c1;
let wj_2l=input_domain.element((dom_size-(j*dom_size/(2*l))%dom_size)%dom_size);
xnext[k+2*j*m+m]= (c0-c1).mul(wj_2l.into_repr()); //Difference #1 to forward dft
}
}
l = l/2;
m = m*2;
dom_fr = dom_fr+dom_fr;
xprev=xnext;
}
let res = xprev
.iter()
.map(|x|{x
.mul(dom_fr
.inverse()
.unwrap().into_repr())})
.collect();
return res;
}
//compute all openings to c_poly using a smart formula
pub fn multiple_open_g2(
g2powers: &Vec<G2Affine>, //SRS
c_poly: &DensePolynomial<Fp256<FrParameters>>, //c(X)
p: usize
)->Vec<G2Affine>
{
let degree=c_poly.coeffs.len()-1;
let input_domain: GeneralEvaluationDomain<Fr> = EvaluationDomain::new(degree).unwrap();
//let now = Instant::now();
let h2 = compute_h_opt_g2(c_poly,g2powers,p);
//println!("H2 computed in {:?}", now.elapsed());
//assert_eq!(h,h2);
let dom_size = input_domain.size();
assert_eq!(1<<p,dom_size);
assert_eq!(degree+1,dom_size);
/*let now = Instant::now();
let q = dftG1(&h,p);
println!("Q computed in {:?}", now.elapsed());*/
//let now = Instant::now();
let q2 = dft_g2_opt(&h2,p);
//println!("Q2 computed in {:?}", now.elapsed());
//assert_eq!(q,q2);
let mut res: Vec<G2Affine> = vec![];
for i in 0..dom_size{
res.push(q2[i].into_affine());
}
return res;
}
#[cfg(test)]
pub mod tests {
use std::{time::{Instant}};
use ark_poly_commit::kzg10::*;
use ark_bls12_381::{Bls12_381,G1Affine,G1Projective};
use ark_ff::{Fp256};
use ark_ec::{AffineCurve,ProjectiveCurve};
use ark_poly_commit::UVPolynomial;
use ark_poly::EvaluationDomain;
use ark_poly::univariate::DensePolynomial;
use ark_std::{One};
use crate::tools::{KzgBls12_381, UniPoly381, kzg_open_g1,kzg_commit_g2};
use crate::caulk_multi_setup::{setup_multi_lookup, PublicParameters};
use crate::multiopen::*;
pub fn commit_direct(
c_poly: &DensePolynomial<Fp256<FrParameters>>, //c(X)
poly_ck: &Powers<Bls12_381>, //SRS
)-> G1Affine
{
assert!(c_poly.coeffs.len()<=poly_ck.powers_of_g.len());
let mut com = poly_ck.powers_of_g[0].mul(c_poly.coeffs[0]);
for i in 1..c_poly.coeffs.len()
{
com = com + poly_ck.powers_of_g[i].mul(c_poly.coeffs[i]);
}
return com.into_affine();
}
//compute dft of size @dom_size over vector of G1 elements
//q_i = h_0 + h_1w^i + h_2w^{2i}+\cdots + h_{dom_size-1}w^{(dom_size-1)i} for 0<= i< dom_size=2^p
#[allow(dead_code)]
pub fn dft_g1_opt(
h: &Vec<G1Projective>,
p: usize
)->Vec<G1Projective>
{
let dom_size = 1<<p;
assert_eq!(h.len(),dom_size); //we do not support inputs of size not power of 2
let input_domain: GeneralEvaluationDomain<Fr> = EvaluationDomain::new(dom_size).unwrap();
let mut l = dom_size/2;
let mut m: usize=1;
//Stockham FFT
let mut xprev = h.to_vec();
for _ in 1..=p{
let mut xnext= vec![];
xnext.resize(xprev.len(),h[0]);
for j in 0..l{
for k in 0..m{
let c0 = xprev[k+j*m].clone();
let c1 = &xprev[k+j*m+l*m];
xnext[k+2*j*m] = c0+c1;
let wj_2l=input_domain.element((j*dom_size/(2*l))%dom_size);
xnext[k+2*j*m+m]= (c0-c1).mul(wj_2l.into_repr());
}
}
l = l/2;
m = m*2;
xprev = xnext;
}
return xprev;
}
//compute idft of size @dom_size over vector of G1 elements
//q_i = (h_0 + h_1w^-i + h_2w^{-2i}+\cdots + h_{dom_size-1}w^{-(dom_size-1)i})/dom_size for 0<= i< dom_size=2^p
#[allow(dead_code)]
pub fn idft_g1_opt(
h: &Vec<G1Projective>,
p: usize
)->Vec<G1Projective>
{
let dom_size = 1<<p;
assert_eq!(h.len(),dom_size); //we do not support inputs of size not power of 2
let input_domain: GeneralEvaluationDomain<Fr> = EvaluationDomain::new(dom_size).unwrap();
let mut l = dom_size/2;
let mut m: usize=1;
let mut dom_fr = Fr::from_str("1").unwrap();
//Stockham FFT
let mut xprev = h.to_vec();
for _ in 1..=p{
let mut xnext= vec![];
xnext.resize(xprev.len(),h[0]);
for j in 0..l{
for k in 0..m{
let c0 = xprev[k+j*m].clone();
let c1 = &xprev[k+j*m+l*m];
xnext[k+2*j*m] = c0+c1;
let wj_2l=input_domain.element((dom_size-(j*dom_size/(2*l))%dom_size)%dom_size);
xnext[k+2*j*m+m]= (c0-c1).mul(wj_2l.into_repr()); //Difference #1 to forward dft
}
}
l = l/2;
m = m*2;
dom_fr = dom_fr+dom_fr;
xprev = xnext;
}
let res = xprev
.iter()
.map(|x|{x
.mul(dom_fr
.inverse()
.unwrap().into_repr())})
.collect();
return res;
}
//compute all openings to c_poly by mere calling `open` N times
#[allow(dead_code)]
pub fn multiple_open_naive(
c_poly: &DensePolynomial<Fp256<FrParameters>>,
c_com_open: &Randomness< Fp256<FrParameters>, DensePolynomial<Fp256<FrParameters>> >,
poly_ck: &Powers<Bls12_381>,
degree: usize
)
->Vec<G1Affine>
{
let input_domain: GeneralEvaluationDomain<Fr> = EvaluationDomain::new(degree).unwrap();
let mut res: Vec<G1Affine> = vec![];
for i in 0..input_domain.size(){
let omega_i = input_domain.element(i);
res.push( kzg_open_g1(&c_poly, &omega_i, &c_com_open, &poly_ck).w);
}
return res;
}
//compute all openings to c_poly by mere calling `open` N times
pub fn multiple_open_naive_g2(
c_poly: &DensePolynomial<Fp256<FrParameters>>,
srs: &PublicParameters,
degree: usize
)
->Vec<G2Affine>
{
let input_domain: GeneralEvaluationDomain<Fr> = EvaluationDomain::new(degree).unwrap();
let mut res: Vec<G2Affine> = vec![];
for i in 0..input_domain.size(){
let omega_i = input_domain.element(i);
res.push( kzg_open_g2(&c_poly, &omega_i,srs));
}
return res;
}
pub fn kzg_open_g2(
p: &DensePolynomial<Fp256<FrParameters>>,
x: &Fp256<FrParameters>, //point
srs: &PublicParameters
) -> G2Affine {
let tmp = DensePolynomial::from_coefficients_slice(&[Fr::one()]);
let (_tmp_com, tmp_com_open) = KzgBls12_381::commit(&srs.poly_ck, &tmp, None, None).unwrap();
let (witness_polynomial, _random_witness_polynomial) =
KzgBls12_381::compute_witness_polynomial(p, x.clone(), &tmp_com_open).unwrap();
return kzg_commit_g2(&witness_polynomial, srs);
}
#[allow(non_snake_case)]
#[test]
pub fn test_commit()
{
// current kzg setup should be changed with output from a setup ceremony
let p: usize =8;//bitlength of poly degree
let max_degree: usize = (1<<p) +2;
let actual_degree: usize = (1<<p)-1;
let m: usize = 1<<(p/2);
let N: usize = 1<<p;
let pp =setup_multi_lookup(&max_degree,&N,&m,&p);
// Setting up test instance to run evaluate on.
// test randomness for c_poly is same everytime.
// g_c = g^(c(x))
let rng = &mut ark_std::test_rng();
let c_poly = UniPoly381::rand(actual_degree, rng);
let (c_com, _) = KzgBls12_381::commit( &pp.poly_ck, &c_poly, None, None).unwrap();
let g_c1 = c_com.0;
let g_c2 = commit_direct(&c_poly, &pp.poly_ck);
assert_eq!(g_c1,g_c2);
println!("commit test passed")
}
#[allow(non_snake_case)]
#[test]
pub fn test_multi_g2()
{
// current kzg setup should be changed with output from a setup ceremony
let p: usize =6;//bitlength of poly degree
let max_degree: usize = (1<<p) +2;
let actual_degree: usize = (1<<p)-1;
let m: usize = 1<<(p/2);
let N: usize = 1<<p;
let pp =setup_multi_lookup(&max_degree,&N,&m,&p);
// Setting up test instance to run evaluate on.
// test randomness for c_poly is same everytime.
// test index equals 5 everytime
// g_c = g^(c(x))
let rng = &mut ark_std::test_rng();
let c_poly = UniPoly381::rand(actual_degree, rng);
let now = Instant::now();
let q = multiple_open_naive_g2(&c_poly,&pp,actual_degree);
println!("Multi naive computed in {:?}", now.elapsed());
let now = Instant::now();
let q2 = multiple_open_g2(&pp.g2_powers,&c_poly,p);
println!("Multi advanced computed in {:?}", now.elapsed());
assert_eq!(q,q2);
}
}

View File

@@ -1,640 +0,0 @@
/*
This file includes backend tools:
(1) read_line() is for taking inputs from the user
(2) bipoly_commit commits to bivariate polynomials
(3) hash_caulk_single is for hashing group and field elements into a field element
(4) random_field is for generating random field elements
(5) convert_to_bigints is for formatting inputs into multiscalar operations
(6) kzg_open_g1_native is for opening KZG commitments
(7) kzg_partial_open_g1_native is for partially opening bivariate commitments to univariate commitments
(8) kzg_verify_g1_native is for verifying KZG commitments
(9) kzg_partial_open_g1_native is for partially verifying bivariate commitments to univariate commitments
(10) generate_lagrange_polynomials_subset is for generating lagrange polynomials over a subset that is not roots of unity.
(11) aggregate_kzg_proofs_g2 is for aggregating KZG proofs
*/
use ark_bls12_381::{Bls12_381, FrParameters, G1Affine, G2Affine, G1Projective, G2Projective, Fr};
use ark_ff::{PrimeField, Fp256, Field};
use ark_std::{One};
use ark_poly_commit::kzg10::*;
use ark_poly::{univariate::DensePolynomial as DensePoly, UVPolynomial, Polynomial,
GeneralEvaluationDomain, EvaluationDomain};
use ark_ec::{PairingEngine, msm::VariableBaseMSM, ProjectiveCurve, AffineCurve};
use ark_std::Zero;
use ark_serialize::CanonicalSerialize;
use blake2s_simd::Params;
use rand::{Rng, SeedableRng, thread_rng};
use rand_chacha::ChaChaRng;
use std::{io, str::FromStr, error::Error};
use crate::caulk_multi_setup::{PublicParameters};
pub type UniPoly381 = DensePoly<<Bls12_381 as PairingEngine>::Fr>;
pub type KzgBls12_381 = KZG10<Bls12_381, UniPoly381>;
// Function for reading inputs from the command line.
pub fn read_line<T: FromStr>() -> T
where <T as FromStr>::Err: Error + 'static
{
let mut input = String::new();
io::stdin().read_line(&mut input).expect("Failed to get console input.");
let output: T = input.trim().parse().expect("Console input is invalid.");
output
}
/*
Function to commit to f(X,Y)
here f = [ [a0, a1, a2], [b1, b2, b3] ] represents (a0 + a1 Y + a2 Y^2 ) + X (b1 + b2 Y + b3 Y^2)
First we unwrap to get a vector of form [a0, a1, a2, b0, b1, b2]
Then we commit to f as a commitment to f'(X) = a0 + a1 X + a2 X^2 + b0 X^3 + b1 X^4 + b2 X^5
We also need to know the maximum degree of (a0 + a1 Y + a2 Y^2 ) to prevent overflow errors.
This is described in Section 4.6.2
*/
pub fn bipoly_commit( pp: &PublicParameters,
poly: &Vec<DensePoly<Fp256<FrParameters>>>, deg_x: usize ) -> G1Affine {
let mut poly_formatted = Vec::new();
for i in 0..poly.len() {
let temp = convert_to_bigints(&poly[i].coeffs);
for j in 0..poly[i].len() {
poly_formatted.push(temp[j]);
}
let temp = convert_to_bigints(&[Fr::zero()].to_vec())[0];
for _ in poly[i].len()..deg_x {
poly_formatted.push(temp);
}
}
assert!( pp.poly_ck.powers_of_g.len() >= poly_formatted.len() );
let g1_poly = VariableBaseMSM::multi_scalar_mul(&pp.poly_ck.powers_of_g, poly_formatted.as_slice()).into_affine();
return g1_poly;
}
/////////////////////////////////////////////////////////////////////
// Hashing
/////////////////////////////////////////////////////////////////////
// hashing to field copied from
// https://github.com/kobigurk/aggregatable-dkg/blob/main/src/signature/utils/hash.rs
fn rng_from_message(personalization: &[u8], message: &[u8]) -> ChaChaRng {
let hash = Params::new()
.hash_length(32)
.personal(personalization)
.to_state()
.update(message)
.finalize();
let mut seed = [0u8; 32];
seed.copy_from_slice(hash.as_bytes());
let rng = ChaChaRng::from_seed(seed);
rng
}
pub fn hash_to_field<F: PrimeField>(
personalization: &[u8],
message: &[u8],
) -> F {
let mut rng = rng_from_message(personalization, message);
loop {
let bytes: Vec<u8> = (0..F::zero().serialized_size())
.map(|_| rng.gen())
.collect();
if let Some(p) = F::from_random_bytes(&bytes) {
return p;
}
}
}
/* hash function that takes as input:
(1) some state (either equal to the last hash output or zero)
(2) a vector of g1 elements
(3) a vector of g2 elements
(4) a vector of field elements
It returns a field element.
*/
pub fn hash_caulk_multi<F: PrimeField>(
state: Fr,
g1_elements: Option< &Vec< &G1Affine>>,
g2_elements: Option< &Vec< &G2Affine>>,
field_elements: Option< &Vec< &Fr>> ) -> Fr
{
// PERSONALIZATION distinguishes this hash from other hashes that may be in the system
const PERSONALIZATION: &[u8] = b"CAULK2";
///////////////////////////////////////////////////////////
// Handling cases where no g1_elements or no g1_elements or no field elements are input
///////////////////////////////////////////////////////////
let g1_elements_len: usize;
let g2_elements_len: usize;
let field_elements_len: usize;
if g1_elements == None {
g1_elements_len = 0;
}
else {
g1_elements_len = g1_elements.unwrap().len();
}
if g2_elements == None {
g2_elements_len = 0;
}
else {
g2_elements_len = g2_elements.unwrap().len();
}
if field_elements == None {
field_elements_len = 0;
}
else {
field_elements_len = field_elements.unwrap().len();
}
///////////////////////////////////////////////////////////
// Transform inputs into bytes
///////////////////////////////////////////////////////////
let mut state_bytes = vec![];
state.serialize(&mut state_bytes).ok();
let mut g1_elements_bytes = Vec::new();
for i in 0..g1_elements_len {
let mut temp = vec![];
g1_elements.unwrap()[i].clone().serialize( &mut temp ).ok();
g1_elements_bytes.append( &mut temp.clone() );
}
let mut g2_elements_bytes = Vec::new();
for i in 0..g2_elements_len {
let mut temp = vec![];
g2_elements.unwrap()[i].clone().serialize( &mut temp ).ok();
g2_elements_bytes.append( &mut temp.clone() );
}
let mut field_elements_bytes = Vec::new();
for i in 0..field_elements_len {
let mut temp = vec![];
field_elements.unwrap()[i].clone().serialize( &mut temp ).ok();
field_elements_bytes.append( &mut temp.clone() );
}
// Transform bytes into vector of bytes of the form expected by hash_to_field
let mut hash_input: Vec<u8> = state_bytes.clone();
for i in 0..g1_elements_bytes.len() {
hash_input = [ &hash_input as &[_], &[g1_elements_bytes[i]] ].concat();
}
for i in 0..g2_elements_bytes.len() {
hash_input = [ &hash_input as &[_], &[g2_elements_bytes[i]] ].concat();
}
for i in 0..field_elements_bytes.len() {
hash_input = [ &hash_input as &[_], &[field_elements_bytes[i]] ].concat();
}
// hash_to_field
return hash_to_field::<Fr>(
PERSONALIZATION,
&hash_input
);
}
//////////////////////////////////////////////////
pub fn random_field< F: PrimeField >() -> F {
let mut rng = thread_rng();
loop {
let bytes: Vec<u8> = (0..F::zero().serialized_size())
.map(|_| rng.gen())
.collect();
if let Some(p) = F::from_random_bytes(&bytes) {
return p;
}
}
}
//copied from arkworks
pub fn convert_to_bigints<F: PrimeField>(p: &Vec<F>) -> Vec<F::BigInt> {
let coeffs = ark_std::cfg_iter!(p)
.map(|s| s.into_repr())
.collect::<Vec<_>>();
coeffs
}
////////////////////////////////////////////////
//
/*
KZG.Open( srs_KZG, f(X), deg, (alpha1, alpha2, ..., alphan) )
returns ([f(alpha1), ..., f(alphan)], pi)
Algorithm described in Section 4.6.1, Multiple Openings
*/
pub fn kzg_open_g1_native(poly_ck: &Powers<Bls12_381>,
poly: &DensePoly<Fr>,
max_deg: Option<&usize>,
points: Vec<&Fr>) -> (Vec<Fr>, G1Affine) {
let mut evals = Vec::new();
let mut proofs = Vec::new();
for i in 0..points.len() {
let (eval, pi) = kzg_open_g1_native_single( poly_ck, poly, max_deg, points[i] );
evals.push( eval );
proofs.push( pi );
}
let mut res: G1Projective = G1Projective::zero(); //default value
for j in 0..points.len()
{
let w_j= points[j].clone();
//1. Computing coefficient [1/prod]
let mut prod =Fr::one();
for k in 0..points.len() {
let w_k = points[k];
if k!=j{
prod = prod*(w_j-w_k);
}
}
//2. Summation
let q_add = proofs[j].mul(prod.inverse().unwrap()); //[1/prod]Q_{j}
res = res + q_add;
}
return (evals, res.into_affine());
}
fn kzg_open_g1_native_single(poly_ck: &Powers<Bls12_381>,
poly: &DensePoly<Fr>,
max_deg: Option<&usize>,
point: &Fr) -> (Fr, G1Affine) {
let eval = poly.evaluate( &point);
let global_max_deg = poly_ck.powers_of_g.len();
let mut d: usize = 0;
if max_deg == None {
d += global_max_deg;
}
else {
d += max_deg.unwrap();
}
let divisor = DensePoly::from_coefficients_vec(vec![-point.clone(), Fr::one()]);
let witness_polynomial = poly / &divisor;
assert!( poly_ck.powers_of_g[(global_max_deg - d)..].len() >= witness_polynomial.len());
let proof = VariableBaseMSM::multi_scalar_mul(&poly_ck.powers_of_g[(global_max_deg - d)..], &convert_to_bigints(&witness_polynomial.coeffs).as_slice() ).into_affine();
return (eval, proof)
}
////////////////////////////////////////////////
//
/*
KZG.Open( srs_KZG, f(X, Y), deg, alpha )
returns ([f(alpha, x)]_1, pi)
Algorithm described in Section 4.6.2, KZG for Bivariate Polynomials
*/
pub fn kzg_partial_open_g1_native(pp: &PublicParameters,
poly: &Vec<DensePoly<Fr>>,
deg_x: usize,
point: &Fr) -> (G1Affine, G1Affine, DensePoly<Fr>) {
let mut poly_partial_eval = DensePoly::from_coefficients_vec(vec![Fr::zero()]);
let mut alpha = Fr::one();
for i in 0..poly.len() {
let pow_alpha = DensePoly::from_coefficients_vec(vec![alpha.clone()]);
poly_partial_eval = poly_partial_eval + &pow_alpha * &poly[i];
alpha = alpha * point;
}
let eval = VariableBaseMSM::multi_scalar_mul(&pp.poly_ck.powers_of_g, convert_to_bigints(&poly_partial_eval.coeffs).as_slice()).into_affine();
let mut witness_bipolynomial = Vec::new();
let poly_reverse: Vec<_> = poly.into_iter().rev().collect();
witness_bipolynomial.push( poly_reverse[ 0 ].clone() );
let alpha = DensePoly::from_coefficients_vec(vec![point.clone()]);
for i in 1..(poly_reverse.len() - 1) {
witness_bipolynomial.push( poly_reverse[ i ].clone() + &alpha * &witness_bipolynomial[i-1] );
}
witness_bipolynomial.reverse();
let proof = bipoly_commit( pp, &witness_bipolynomial, deg_x );
return (eval, proof, poly_partial_eval)
}
/*
// KZG.Verify( srs_KZG, F, deg, (alpha1, alpha2, ..., alphan), (v1, ..., vn), pi )
Algorithm described in Section 4.6.1, Multiple Openings
*/
pub fn kzg_verify_g1_native( //Verify that @c_com is a commitment to C(X) such that C(x)=z
srs: &PublicParameters,
c_com: G1Affine, //commitment
max_deg: Option<&usize>, // max degree
points: Vec<Fr>, // x such that eval = C(x)
evals: Vec<Fr>, //evaluation
pi: G1Affine, //proof
)
->bool{
// Interpolation set
// tau_i(X) = lagrange_tau[i] = polynomial equal to 0 at point[j] for j!= i and 1 at points[i]
let mut lagrange_tau = DensePoly::from_coefficients_slice(&[Fr::zero()]);
for i in 0..points.len() {
let mut temp : UniPoly381 = DensePoly::from_coefficients_slice(&[Fr::one()]);
for j in 0..points.len() {
if i != j {
temp = &temp * (&DensePoly::from_coefficients_slice(&[-points[j] ,Fr::one()]));
}
}
let lagrange_scalar = temp.evaluate(&points[i]).inverse().unwrap() * &evals[i] ;
lagrange_tau = lagrange_tau + &temp * (&DensePoly::from_coefficients_slice(&[lagrange_scalar])) ;
}
// commit to sum evals[i] tau_i(X)
// println!( "lagrange_tau = {:?}", lagrange_tau.evaluate(&points[0]) == evals[0] );
assert!( srs.poly_ck.powers_of_g.len() >= lagrange_tau.len(), "not enough powers of g in kzg_verify_g1_native" );
let g1_tau = VariableBaseMSM::multi_scalar_mul(&srs.poly_ck.powers_of_g[..lagrange_tau.len()], convert_to_bigints(&lagrange_tau.coeffs).as_slice());
// vanishing polynomial
// z_tau[i] = polynomial equal to 0 at point[j]
let mut z_tau = DensePoly::from_coefficients_slice(&[Fr::one()]);
for i in 0..points.len() {
z_tau = &z_tau * (&DensePoly::from_coefficients_slice(&[-points[i] ,Fr::one()]));
}
// commit to z_tau(X) in g2
assert!( srs.g2_powers.len() >= z_tau.len() );
let g2_z_tau = VariableBaseMSM::multi_scalar_mul(&srs.g2_powers[..z_tau.len()], convert_to_bigints(&z_tau.coeffs).as_slice());
let global_max_deg = srs.poly_ck.powers_of_g.len();
let mut d: usize = 0;
if max_deg == None {
d += global_max_deg;
}
else {
d += max_deg.unwrap();
}
let pairing1 = Bls12_381::pairing(
c_com.into_projective()-g1_tau,
srs.g2_powers[global_max_deg - d]
);
let pairing2 =Bls12_381::pairing(
pi,
g2_z_tau
);
return pairing1==pairing2;
}
/*
KZG.Verify( srs_KZG, F, deg, alpha, F_alpha, pi )
Algorithm described in Section 4.6.2, KZG for Bivariate Polynomials
Be very careful here. Verification is only valid if it is paired with a degree check.
*/
pub fn kzg_partial_verify_g1_native(srs: &PublicParameters,
c_com: G1Affine, //commitment
deg_x: usize,
point: Fr,
partial_eval: G1Affine,
pi: G1Affine, //proof
) -> bool {
let pairing1 = Bls12_381::pairing(
c_com.into_projective()-partial_eval.into_projective(),
srs.g2_powers[0]
);
let pairing2 =Bls12_381::pairing(
pi,
srs.g2_powers[deg_x].into_projective() - srs.g2_powers[0].mul(point)
);
return pairing1==pairing2;
}
pub fn kzg_commit_g2(
poly: &DensePoly<Fp256<FrParameters>>,
srs: &PublicParameters
)->G2Affine
{
let mut res=srs.g2_powers[0].mul(poly[0]);
for i in 1..poly.len(){
res = res+srs.g2_powers[i].mul(poly[i])
}
return res.into_affine();
}
//////////////////////////////////////////////////////
pub fn generate_lagrange_polynomials_subset(
positions: &Vec<usize>,
srs: &PublicParameters
)->Vec<DensePoly<Fp256<FrParameters>>>
{
let mut tau_polys = vec![];
let m = positions.len();
for j in 0..m{
let mut tau_j= DensePoly::from_coefficients_slice(&[Fr::one()]); //start from tau_j =1
for k in 0..m{
if k != j { //tau_j = prod_{k\neq j} (X-w^(i_k))/(w^(i_j)-w^(i_k))
let denum = srs.domain_N.element(positions[j])-srs.domain_N.element(positions[k]);
tau_j = &tau_j * &DensePoly::from_coefficients_slice(&[
-srs.domain_N.element(positions[k])/denum ,//-w^(i_k))/(w^(i_j)-w^(i_k)
Fr::one()/denum //1//(w^(i_j)-w^(i_k))
]);
}
}
tau_polys.push(tau_j.clone());
}
tau_polys
}
/*
Algorithm for aggregating KZG proofs into a single proof
Described in Section 4.6.3 Subset openings
compute Q =\sum_{j=1}^m \frac{Q_{i_j}}}{\prod_{1\leq k\leq m,\; k\neq j}(\omega^{i_j}-\omega^{i_k})}
*/
pub fn aggregate_kzg_proofs_g2(
openings: &Vec<G2Affine>, //Q_i
positions: &Vec<usize>, //i_j
input_domain: &GeneralEvaluationDomain<Fr>
)->G2Affine
{
let m = positions.len();
let mut res: G2Projective = openings[0].into_projective(); //default value
for j in 0..m
{
let i_j = positions[j];
let w_ij=input_domain.element(i_j);
//1. Computing coefficient [1/prod]
let mut prod =Fr::one();
for k in 0..m{
let i_k = positions[k];
let w_ik = input_domain.element(i_k);
if k!=j{
prod = prod*(w_ij-w_ik);
}
}
//2. Summation
let q_add = openings[i_j].mul(prod.inverse().unwrap()); //[1/prod]Q_{j}
if j==0{
res=q_add;
}
else{
res = res + q_add;
}
}
return res.into_affine();
}
//////////////////////////////////////////////////////
#[cfg(test)]
pub mod tests {
use crate::caulk_multi_setup::{setup_multi_lookup};
use crate::tools::{UniPoly381,KzgBls12_381,generate_lagrange_polynomials_subset,aggregate_kzg_proofs_g2};
use crate::multiopen::multiple_open_g2;
use ark_poly::{univariate::DensePolynomial as DensePoly, UVPolynomial, Polynomial,
EvaluationDomain};
use std::time::{Instant};
use ark_bls12_381::{Bls12_381,G2Affine,Fr};
use ark_ec::{AffineCurve,PairingEngine,ProjectiveCurve};
use ark_std::{ One,Zero};
#[allow(non_snake_case)]
#[test]
pub fn test_lagrange()
{
let p: usize =8;//bitlength of poly degree
let max_degree: usize = (1<<p) +2;
let m: usize = 8;
let N: usize = 1<<p;
let pp =setup_multi_lookup(&max_degree,&N,&m,&p);
let now = Instant::now();
println!("time to setup {:?}", now.elapsed());
let mut positions: Vec<usize> = vec![];
for i in 0..m{ //generate positions evenly distributed in the set
let i_j: usize = i*(max_degree/m);
positions.push(i_j);
};
let tau_polys=generate_lagrange_polynomials_subset(&positions, &pp);
for j in 0..m{
for k in 0..m{
if k==j
{
assert_eq!(tau_polys[j].evaluate(&pp.domain_N.element(positions[k])),Fr::one())
}
else{
assert_eq!(tau_polys[j].evaluate(&pp.domain_N.element(positions[k])),Fr::zero())
}
}
}
}
#[allow(non_snake_case)]
#[test]
pub fn test_Q_g2(){
// current kzg setup should be changed with output from a setup ceremony
let p: usize =6;//bitlength of poly degree
let max_degree: usize = (1<<p) +2;
let actual_degree: usize = (1<<p)-1;
let m: usize = 1<<(p/2);
let N: usize = 1<<p;
let pp =setup_multi_lookup(&max_degree,&N,&m,&p);
// Setting up test instance to run evaluate on.
// test randomness for c_poly is same everytime.
// test index equals 5 everytime
// g_c = g^(c(x))
let rng = &mut ark_std::test_rng();
let c_poly = UniPoly381::rand(actual_degree, rng);
let (c_com, _) = KzgBls12_381::commit( &pp.poly_ck, &c_poly, None, None).unwrap();
let now = Instant::now();
let openings = multiple_open_g2(&pp.g2_powers, &c_poly, p);
println!("Multi advanced computed in {:?}", now.elapsed());
let mut positions: Vec<usize> = vec![];
for i in 0..m{
let i_j: usize = i*(max_degree/m);
positions.push(i_j);
};
let now = Instant::now();
//Compute proof
let Q:G2Affine =aggregate_kzg_proofs_g2(&openings, &positions, &pp.domain_N);
println!("Full proof for {:?} positions computed in {:?}", m, now.elapsed());
//Compute commitment to C_I
let mut C_I = DensePoly::from_coefficients_slice(&[Fr::zero()]); //C_I = sum_j c_j*tau_j
let tau_polys = generate_lagrange_polynomials_subset(&positions, &pp);
for j in 0..m{
C_I = &C_I + &(&tau_polys[j]*c_poly.evaluate(&pp.domain_N.element(positions[j]))); //sum_j c_j*tau_j
}
let (c_I_com, _c_I_com_open) = KzgBls12_381::commit( &pp.poly_ck, &C_I, None, None).unwrap();
//Compute commitment to z_I
let mut z_I = DensePoly::from_coefficients_slice(
&[Fr::one()]);
for j in 0..m {
z_I = &z_I * &DensePoly::from_coefficients_slice(
&[
-pp.domain_N.element(positions[j]) ,
Fr::one()]);
}
let (z_I_com, _) =KzgBls12_381::commit( &pp.poly_ck, &z_I, None, None).unwrap();
//pairing check
let pairing1=Bls12_381::pairing((c_com.0.into_projective()-c_I_com.0.into_projective()).into_affine(), pp.g2_powers[0]);
let pairing2 = Bls12_381::pairing(z_I_com.0, Q);
assert_eq!(pairing1,pairing2);
}
}

View File

@@ -1,171 +0,0 @@
/*
This file includes the Caulk prover and verifier for single openings.
The protocol is described in Figure 1.
*/
use ark_bls12_381::{Bls12_381, Fr, G1Affine, G2Affine};
use ark_ff::{PrimeField, Field};
use ark_ec::{AffineCurve, ProjectiveCurve, PairingEngine};
use ark_poly::{EvaluationDomain, GeneralEvaluationDomain};
use ark_std::{One, Zero};
use crate::caulk_single_setup::{PublicParameters, VerifierPublicParameters};
use crate::caulk_single_unity::{caulk_single_unity_prove, caulk_single_unity_verify,
PublicParametersUnity,CaulkProofUnity, VerifierPublicParametersUnity};
use crate::pedersen::{prove_pedersen, verify_pedersen, ProofPed};
use crate::tools::{random_field, hash_caulk_single};
// Structure of opening proofs output by prove.
#[allow(non_snake_case)]
pub struct CaulkProof {
pub g2_z: G2Affine,
pub g1_T: G1Affine,
pub g2_S: G2Affine,
pub pi_ped: ProofPed,
pub pi_unity: CaulkProofUnity,
}
//Proves knowledge of (i, Q, z, r) such that
// 1) Q is a KZG opening proof that g1_C opens to z at i
// 2) cm = g^z h^r
//Takes as input opening proof Q. Does not need knowledge of contents of C = g1_C.
#[allow(non_snake_case)]
pub fn caulk_single_prove(pp: &PublicParameters, g1_C: &G1Affine,
cm: &G1Affine, index: usize, g1_q: &G1Affine, v: &Fr, r: &Fr ) -> CaulkProof {
// provers blinders for zero-knowledge
let a: Fr = random_field::<Fr>();
let s: Fr = random_field::<Fr>();
let domain_H: GeneralEvaluationDomain<Fr> = GeneralEvaluationDomain::new( pp.domain_H_size ).unwrap();
///////////////////////////////
// Compute [z]_2, [T]_1, and [S]_2
///////////////////////////////
// [z]_2 = [ a (x - omega^i) ]_2
let g2_z = ( pp.poly_vk.beta_h.mul( a ) + pp.poly_vk.h.mul( - a * domain_H.element(index) ) ).into_affine();
// [T]_1 = [ ( a^(-1) Q + s h]_1 for Q precomputed KZG opening.
let g1_T = (g1_q.mul( a.inverse().unwrap() ) + pp.ped_h.mul(s)).into_affine();
// [S]_2 = [ - r - s z ]_2
let g2_S =( pp.poly_vk.h.mul( (-*r).into_repr() )+ g2_z.mul((-s).into_repr())).into_affine();
///////////////////////////////
// Pedersen prove
///////////////////////////////
// hash the instance and the proof elements to determine hash inputs for Pedersen prover
let mut hash_input = hash_caulk_single::<Fr>(Fr::zero(),
Some(& [g1_C.clone(), g1_T.clone()].to_vec() ),
Some( & [g2_z.clone(), g2_S.clone()].to_vec() ), None );
// proof that cm = g^z h^rs
let pi_ped = prove_pedersen( &pp.ped_g, &pp.ped_h, &mut hash_input, &cm, v, r );
///////////////////////////////
// Unity prove
///////////////////////////////
// hash the last round of the pedersen proof to determine hash input to the unity prover
hash_input = hash_caulk_single::<Fr>( hash_input,
None,
None,
Some( &[ pi_ped.t1.clone(), pi_ped.t2.clone()].to_vec() ) );
// Setting up the public parameters for the unity prover
let pp_unity = PublicParametersUnity {
poly_ck: pp.poly_ck.clone(),
gxd: pp.poly_ck_d.clone(),
gxpen: pp.poly_ck_pen.clone(),
lagrange_polynomials_Vn: pp.lagrange_polynomials_Vn.clone(),
poly_prod: pp.poly_prod.clone(),
logN: pp.logN.clone(),
domain_Vn: pp.domain_Vn.clone(),
};
// proof that A = [a x - b ]_2 for a^n = b^n
let pi_unity = caulk_single_unity_prove(&pp_unity,
&mut hash_input,
g2_z, a, a * domain_H.element(index) );
let proof = CaulkProof {
g2_z: g2_z, g1_T: g1_T, g2_S: g2_S, pi_ped: pi_ped, pi_unity: pi_unity,
};
proof
}
//Verifies that the prover knows of (i, Q, z, r) such that
// 1) Q is a KZG opening proof that g1_C opens to z at i
// 2) cm = g^z h^r
#[allow(non_snake_case)]
pub fn caulk_single_verify( vk: &VerifierPublicParameters,
g1_C: &G1Affine, cm: &G1Affine, proof: &CaulkProof) -> bool {
///////////////////////////////
// Pairing check
///////////////////////////////
// check that e( - C + cm, [1]_2) + e( [T]_1, [z]_2 ) + e( [h]_1, [S]_2 ) = 1
let eq1: Vec<(ark_ec::bls12::G1Prepared<ark_bls12_381::Parameters>, ark_ec::bls12::G2Prepared<ark_bls12_381::Parameters>)>
= vec![
( ( g1_C.mul( -Fr::one()) + cm.into_projective() ).into_affine().into(), vk.poly_vk.prepared_h.clone()),
( ( proof.g1_T ).into(), proof.g2_z.into() ),
( vk.ped_h.into(), proof.g2_S.into() )
];
let check1 = Bls12_381::product_of_pairings(&eq1).is_one();
///////////////////////////////
// Pedersen check
///////////////////////////////
// hash the instance and the proof elements to determine hash inputs for Pedersen prover
let mut hash_input = hash_caulk_single::<Fr>(Fr::zero(),
Some(& [g1_C.clone(), proof.g1_T.clone()].to_vec() ),
Some( & [proof.g2_z.clone(), proof.g2_S.clone()].to_vec() ), None );
// verify that cm = [v + r h]
let check2 = verify_pedersen(&vk.ped_g, &vk.ped_h, &mut hash_input, &cm, &proof.pi_ped );
///////////////////////////////
// Unity check
///////////////////////////////
// hash the last round of the pedersen proof to determine hash input to the unity prover
hash_input = hash_caulk_single::<Fr>( hash_input,
None,
None,
Some( &[ proof.pi_ped.t1.clone(), proof.pi_ped.t2.clone()].to_vec() ) );
let vk_unity = VerifierPublicParametersUnity {
poly_vk: vk.poly_vk.clone(),
gxpen: vk.poly_ck_pen.clone(),
g1: vk.ped_g.clone(),
g1_x: vk.g1_x.clone(),
lagrange_scalars_Vn: vk.lagrange_scalars_Vn.clone(),
poly_prod: vk.poly_prod.clone(),
logN: vk.logN.clone(),
domain_Vn: vk.domain_Vn.clone(),
powers_of_g2: vk.powers_of_g2.clone(),
};
// Verify that g2_z = [ ax - b ]_1 for (a/b)**N = 1
let check3 = caulk_single_unity_verify(
&vk_unity,
&mut hash_input,
&proof.g2_z,
&proof.pi_unity);
return check1 && check2 && check3;
}

View File

@@ -1,222 +0,0 @@
/*
This file includes the setup algorithm for Caulk with single openings.
A full description of the setup is not formally given in the paper.
*/
use ark_ff::{ UniformRand, Fp256, Field};
use ark_poly_commit::kzg10::*;
use ark_ec::{bls12::Bls12, PairingEngine, ProjectiveCurve, AffineCurve};
use ark_poly::{ UVPolynomial, Evaluations as EvaluationsOnDomain, GeneralEvaluationDomain,
EvaluationDomain, univariate::DensePolynomial};
use ark_bls12_381::{Bls12_381, G1Projective, FrParameters, Fr, G1Affine, G2Affine};
use ark_std::{Zero, One, cfg_into_iter};
use std::cmp::max;
use crate::tools::{UniPoly381, KzgBls12_381};
use std::time::{Instant};
// structure of public parameters
#[allow(non_snake_case)]
pub struct PublicParameters {
pub poly_ck: Powers<'static, Bls12<ark_bls12_381::Parameters> >,
pub poly_ck_d: G1Affine,
pub poly_ck_pen: G1Affine,
pub lagrange_polynomials_Vn: Vec< UniPoly381>,
pub poly_prod: UniPoly381,
pub poly_vk: VerifierKey<Bls12<ark_bls12_381::Parameters>>,
pub ped_g: G1Affine,
pub ped_h: G1Affine,
pub domain_H_size: usize,
pub logN: usize,
pub domain_Vn: GeneralEvaluationDomain<Fr>,
pub domain_Vn_size: usize,
pub verifier_pp: VerifierPublicParameters,
pub actual_degree: usize,
}
// smaller set of public parameters used by verifier
#[allow(non_snake_case)]
pub struct VerifierPublicParameters {
pub poly_ck_pen: G1Affine,
pub lagrange_scalars_Vn: Vec<Fr>,
pub poly_prod: UniPoly381,
pub poly_vk: VerifierKey<Bls12<ark_bls12_381::Parameters>>,
pub ped_g: G1Affine,
pub g1_x: G1Affine,
pub ped_h: G1Affine,
pub domain_H_size: usize,
pub logN: usize,
pub domain_Vn: GeneralEvaluationDomain<Fr>,
pub domain_Vn_size: usize,
pub powers_of_g2: Vec<G2Affine>,
}
// Reduces full srs down to smaller srs for smaller polynomials
// Copied from arkworks library (where same function is private)
fn trim<E: PairingEngine, P: UVPolynomial<E::Fr>>(
srs: UniversalParams<E>,
mut supported_degree: usize,
) -> (Powers<'static, E>, VerifierKey<E>) {
if supported_degree == 1 {
supported_degree += 1;
}
let pp = srs.clone();
let powers_of_g = pp.powers_of_g[..=supported_degree].to_vec();
let powers_of_gamma_g = (0..=supported_degree)
.map(|i| pp.powers_of_gamma_g[&i])
.collect();
let powers = Powers {
powers_of_g: ark_std::borrow::Cow::Owned(powers_of_g),
powers_of_gamma_g: ark_std::borrow::Cow::Owned(powers_of_gamma_g),
};
let vk = VerifierKey {
g: pp.powers_of_g[0],
gamma_g: pp.powers_of_gamma_g[&0],
h: pp.h,
beta_h: pp.beta_h,
prepared_h: pp.prepared_h.clone(),
prepared_beta_h: pp.prepared_beta_h.clone(),
};
(powers, vk)
}
// setup algorithm for Caulk with single openings
// also includes a bunch of precomputation.
#[allow(non_snake_case)]
pub fn caulk_single_setup(max_degree: usize, actual_degree: usize) -> PublicParameters
{
// deterministic randomness. Should never be used in practice.
let rng = &mut ark_std::test_rng();
// domain where vector commitment is defined
let domain_H: GeneralEvaluationDomain<Fr> = GeneralEvaluationDomain::new( actual_degree ).unwrap();
let logN: usize = ((actual_degree as f32).log(2.0)).ceil() as usize;
// smaller domain for unity proofs with generator w
let domain_Vn: GeneralEvaluationDomain<Fr> = GeneralEvaluationDomain::new( 6 + logN ).unwrap();
// Determining how big an srs we need.
// Need an srs of size actual_degree to commit to the polynomial.
// Need an srs of size 2 * domain_Vn_size + 3 to run the unity prover.
// We take the larger of the two.
let poly_ck_size = max( actual_degree, 2 * domain_Vn.size() + 3);
// Setup algorithm. To be replaced by output of a universal setup before being production ready.
let now = Instant::now();
let srs = KzgBls12_381::setup(max(max_degree,poly_ck_size), true, rng).unwrap();
println!("time to setup powers = {:?}", now.elapsed());
// trim down to size.
let (poly_ck, poly_vk) = trim::<Bls12_381, UniPoly381>(srs.clone(), poly_ck_size.clone());
// g^x^d = maximum power given in setup
let poly_ck_d = srs.powers_of_g[ srs.powers_of_g.len() - 1 ];
// g^x^(d-1) = penultimate power given in setup
let poly_ck_pen = srs.powers_of_g[ srs.powers_of_g.len() - 2 ];
// random pedersen commitment generatoor
let ped_h: G1Affine = G1Projective::rand(rng).into_affine();
// precomputation to speed up prover
// lagrange_polynomials_Vn[i] = polynomial equal to 0 at w^j for j!= i and 1 at w^i
let mut lagrange_polynomials_Vn: Vec< UniPoly381 > = Vec::new();
// precomputation to speed up verifier.
// scalars such that lagrange_scalars_Vn[i] = prod_(j != i) (w^i - w^j)^(-1)
let mut lagrange_scalars_Vn: Vec<Fr> = Vec::new();
for i in 0..domain_Vn.size() {
let evals: Vec<Fp256<FrParameters>> = cfg_into_iter!(0..domain_Vn.size())
.map(|k| {
if k == i { Fr::one() }
else { Fr::zero() }
}).collect();
lagrange_polynomials_Vn.push(EvaluationsOnDomain::from_vec_and_domain(evals, domain_Vn).interpolate());
}
for i in 0..5 {
let mut temp = Fr::one();
for j in 0..domain_Vn.size() {
if j != i {
temp = temp * ( domain_Vn.element(i) - domain_Vn.element(j) );
}
}
lagrange_scalars_Vn.push(temp.inverse().unwrap());
}
// also want lagrange_scalars_Vn[logN + 5]
let mut temp = Fr::one();
for j in 0..domain_Vn.size() {
if j != (logN + 5) {
temp = temp * ( domain_Vn.element(logN + 5) - domain_Vn.element(j) );
}
}
lagrange_scalars_Vn.push(temp.inverse().unwrap());
// poly_prod = (X - 1) (X - w) (X - w^2) (X - w^3) (X - w^4) (X - w^(5 + logN)) (X - w^(6 + logN))
// for efficiency not including (X - w^i) for i > 6 + logN
// prover sets these evaluations to 0 anyway.
let mut poly_prod = DensePolynomial::from_coefficients_slice(&[Fr::one()]);
for i in 0..domain_Vn.size() {
if i < 5 {
poly_prod = &poly_prod * (& DensePolynomial::from_coefficients_slice(&[-domain_Vn.element(i) ,Fr::one()]))
}
if i == (5 + logN) {
poly_prod = &poly_prod * (& DensePolynomial::from_coefficients_slice(&[-domain_Vn.element(i) ,Fr::one()]))
}
if i == (6 + logN) {
poly_prod = &poly_prod * (& DensePolynomial::from_coefficients_slice(&[-domain_Vn.element(i) ,Fr::one()]))
}
}
// ped_g = g^x^0 from kzg commitment key.
let ped_g = poly_ck.powers_of_g[0];
// need some powers of g2
// arkworks setup doesn't give these powers but the setup does use a fixed randomness to generate them.
// so we can generate powers of g2 directly.
let rng = &mut ark_std::test_rng();
let beta: Fp256<FrParameters> = Fr::rand(rng);
let mut temp = poly_vk.h.clone();
let mut powers_of_g2: Vec<G2Affine> = Vec::new();
for _ in 0..3.clone() {
powers_of_g2.push( temp.clone() );
temp = temp.mul( beta ).into_affine();
}
let verifier_pp = VerifierPublicParameters {
poly_ck_pen: poly_ck_pen, lagrange_scalars_Vn: lagrange_scalars_Vn,
poly_prod: poly_prod.clone(), poly_vk: poly_vk.clone(),
ped_g: ped_g,
g1_x: srs.powers_of_g[ 1 ],
ped_h: ped_h,
domain_H_size: domain_H.size(),
logN: logN,
domain_Vn: domain_Vn.clone(),
domain_Vn_size: domain_Vn.size(),
powers_of_g2: powers_of_g2.clone()
};
let pp = PublicParameters {
poly_ck: poly_ck, poly_ck_d: poly_ck_d, poly_ck_pen: poly_ck_pen,
lagrange_polynomials_Vn: lagrange_polynomials_Vn,
poly_prod: poly_prod, ped_g: ped_g, ped_h: ped_h,
domain_H_size: domain_H.size(),
logN: logN, poly_vk: poly_vk,
domain_Vn_size: domain_Vn.size(),
domain_Vn: domain_Vn,
verifier_pp: verifier_pp,
actual_degree: actual_degree.clone(),
};
return pp
}

View File

@@ -1,379 +0,0 @@
/*
This file includes the Caulk's unity prover and verifier for single openings.
The protocol is described in Figure 2.
*/
use ark_ec::{bls12::Bls12, AffineCurve, PairingEngine, ProjectiveCurve};
use ark_ff::{Fp256, Field};
use ark_poly::{GeneralEvaluationDomain, EvaluationDomain, UVPolynomial,
Evaluations as EvaluationsOnDomain, univariate::DensePolynomial, Polynomial};
use ark_poly_commit::kzg10::*;
use ark_bls12_381::{Bls12_381, FrParameters, Fr, G1Affine, G2Affine};
use ark_std::{cfg_into_iter, One, Zero};
use crate::tools::{UniPoly381, KzgBls12_381, hash_caulk_single, random_field,
kzg_open_g1, kzg_verify_g1};
// prover public parameters structure for caulk_single_unity_prove
#[allow(non_snake_case)]
pub struct PublicParametersUnity {
pub poly_ck: Powers<'static, Bls12<ark_bls12_381::Parameters> >,
pub gxd: G1Affine,
pub gxpen: G1Affine,
pub lagrange_polynomials_Vn: Vec< UniPoly381>,
pub poly_prod: UniPoly381,
pub logN: usize,
pub domain_Vn: GeneralEvaluationDomain<Fr>,
}
// verifier parameters structure for caulk_single_unity_verify
#[allow(non_snake_case)]
pub struct VerifierPublicParametersUnity {
pub poly_vk: VerifierKey<Bls12<ark_bls12_381::Parameters>>,
pub gxpen: G1Affine,
pub g1: G1Affine,
pub g1_x: G1Affine,
pub lagrange_scalars_Vn: Vec<Fr>,
pub poly_prod: UniPoly381,
pub logN: usize,
pub domain_Vn: GeneralEvaluationDomain<Fr>,
pub powers_of_g2: Vec<G2Affine>,
}
// output structure of caulk_single_unity_prove
#[allow(non_snake_case)]
pub struct CaulkProofUnity {
pub g1_F: G1Affine,
pub g1_H: G1Affine,
pub v1: Fp256<FrParameters>,
pub v2: Fp256<FrParameters>,
pub pi1: G1Affine,
pub pi2: G1Affine,
// pub g1_q3: G1Affine,
}
// Prove knowledge of a, b such that g2_z = [ax - b]_2 and a^n = b^n
#[allow(non_snake_case)]
pub fn caulk_single_unity_prove(
pp: &PublicParametersUnity,
hash_input: &mut Fr,
g2_z: G2Affine,
a: Fp256<FrParameters>,
b: Fp256<FrParameters>,
) -> CaulkProofUnity {
// a_poly = a X - b
let a_poly = DensePolynomial::from_coefficients_slice(&[-b, a]);
// provers blinders for zero-knowledge
let r0: Fp256<FrParameters> = random_field::<Fr>();
let r1: Fp256<FrParameters> = random_field::<Fr>();
let r2: Fp256<FrParameters> = random_field::<Fr>();
let r3: Fp256<FrParameters> = random_field::<Fr>();
let r_poly = DensePolynomial::from_coefficients_slice(&[r1, r2, r3]);
// roots of unity in domain of size m = log_2(n) + 6
let sigma = pp.domain_Vn.element(1);
// X^n - 1
let z: UniPoly381 = pp.domain_Vn.vanishing_polynomial().into();
// computing [ (a/b), (a/b)^2, (a/b)^4, ..., (a/b)^(2^logN) = (a/b)^n ]
let mut a_div_b = a * (b.inverse()).unwrap();
let mut vec_a_div_b: Vec< Fp256<FrParameters> > = Vec::new();
for _ in 0..(pp.logN+1) {
vec_a_div_b.push( a_div_b.clone() );
a_div_b = a_div_b * a_div_b;
}
////////////////////////////
// computing f(X). First compute in domain.
////////////////////////////
let f_evals: Vec<Fp256<FrParameters>> =cfg_into_iter!(0..pp.domain_Vn.size())
.map(|k| {
if k == 0 { a - b }
else if k == 1 { a * sigma - b }
else if k == 2 { a }
else if k == 3 { b }
else if k > 3 && k < (pp.logN + 5) { vec_a_div_b[ k - 4] }
else if k == pp.logN + 5 { r0 }
else {
Fr::zero()
}
}).collect();
let f_poly = &EvaluationsOnDomain::from_vec_and_domain(f_evals, pp.domain_Vn)
.interpolate()
+ &(&r_poly * &z);
// computing f( sigma^(-1) X) and f( sigma^(-2) X)
let mut f_poly_shift_1 = f_poly.clone();
let mut f_poly_shift_2 = f_poly.clone();
let mut shift_1 = Fr::one();
let mut shift_2 = Fr::one();
for i in 0..f_poly.len() {
f_poly_shift_1[i] = f_poly_shift_1[i] * shift_1 ;
f_poly_shift_2[i] = f_poly_shift_2[i] * shift_2 ;
shift_1 = shift_1 * pp.domain_Vn.element( pp.domain_Vn.size() - 1 );
shift_2 = shift_2 * pp.domain_Vn.element( pp.domain_Vn.size() - 2 );
}
////////////////////////////
// computing h(X). First compute p(X) then divide.
////////////////////////////
// p(X) = p(X) + (f(X) - a(X)) (rho_1(X) + rho_2(X))
let mut p_poly = &(&f_poly - &a_poly) * &(&pp.lagrange_polynomials_Vn[0] + &pp.lagrange_polynomials_Vn[1]) ;
// p(X) = p(X) + ( (1 - sigma) f(X) - f(sigma^(-2)X) + f(sigma^(-1) X) ) rho_3(X)
p_poly = &p_poly +
&( &(&( &(&DensePolynomial::from_coefficients_slice(&[(Fr::one() - sigma)]) * &f_poly)
- &f_poly_shift_2 )
+ &f_poly_shift_1 ) * &pp.lagrange_polynomials_Vn[2] ) ;
// p(X) = p(X) + ( -sigma f(sigma^(-1) X) + f(sigma^(-2)X) + f(X) ) rho_4(X)
p_poly = &p_poly +
&( &(&( &(&DensePolynomial::from_coefficients_slice(&[ - sigma]) * &f_poly_shift_1)
+ &f_poly_shift_2 )
+ &f_poly ) * &pp.lagrange_polynomials_Vn[3] ) ;
// p(X) = p(X) + ( f(X) f(sigma^(-1) X) - f(sigma^(-2)X) ) rho_5(X)
p_poly = &p_poly +
&( &( &(&f_poly * &f_poly_shift_1) - &f_poly_shift_2 ) * &pp.lagrange_polynomials_Vn[4] ) ;
// p(X) = p(X) + ( f(X) - f(sigma^(-1) X) * f(sigma^(-1)X) ) prod_(i not in [5, .. , logN + 4]) (X - sigma^i)
p_poly = &p_poly +
&(&( &f_poly - &(&f_poly_shift_1 * &f_poly_shift_1) ) * &pp.poly_prod ) ;
// p(X) = p(X) + ( f(sigma^(-1) X) - 1 ) rho_(logN + 6)(X)
p_poly = &p_poly +
&( &(&f_poly_shift_1 -
&(DensePolynomial::from_coefficients_slice(&[ Fr::one()]) )) * &pp.lagrange_polynomials_Vn[pp.logN + 5] ) ;
// Compute h_hat_poly = p(X) / z_Vn(X) and abort if division is not perfect
let (h_hat_poly, remainder) = p_poly.divide_by_vanishing_poly(pp.domain_Vn).unwrap();
assert!(remainder.is_zero(), "z_Vn(X) does not divide p(X)");
////////////////////////////
// Commit to f(X) and h(X)
////////////////////////////
let (g1_F, _) = KzgBls12_381::commit( &pp.poly_ck, &f_poly, None, None).unwrap();
let g1_F: G1Affine = g1_F.0;
let (h_hat_com, _ ) = KzgBls12_381::commit( &pp.poly_ck, &h_hat_poly, None, None).unwrap();
// g1_H is a commitment to h_hat_poly + X^(d-1) z(X)
let g1_H = h_hat_com.0 + (pp.gxd.mul(-a) + pp.gxpen.mul(b) ).into_affine();
////////////////////////////
// alpha = Hash([z]_2, [F]_1, [H]_1)
////////////////////////////
let alpha = hash_caulk_single::<Fr>(
hash_input.clone(),
Some(& [g1_F, g1_H].to_vec()),
Some(& [g2_z].to_vec()),
None );
*hash_input = alpha.clone();
////////////////////////////
// v1 = f(sigma^(-1) alpha) and v2 = f(w^(-2) alpha)
////////////////////////////
let alpha1 = alpha * pp.domain_Vn.element( pp.domain_Vn.size() - 1 );
let alpha2 = alpha * pp.domain_Vn.element( pp.domain_Vn.size() - 2 );
let v1 = f_poly.evaluate(&alpha1);
let v2 = f_poly.evaluate(&alpha2);
////////////////////////////
// Compute polynomial p_alpha(X) that opens at alpha to 0
////////////////////////////
// restating some field elements as polynomials so that can multiply polynomials
let pz_alpha = DensePolynomial::from_coefficients_slice(&[ - z.evaluate(&alpha)]);
let pv1 = DensePolynomial::from_coefficients_slice(&[ v1 ]);
let pv2 = DensePolynomial::from_coefficients_slice(&[ v2 ]);
let prho1_add_2 = DensePolynomial::from_coefficients_slice(&[ pp.lagrange_polynomials_Vn[0].evaluate(&alpha)
+ pp.lagrange_polynomials_Vn[1].evaluate(&alpha)]);
let prho3 = DensePolynomial::from_coefficients_slice(&[ pp.lagrange_polynomials_Vn[2].evaluate(&alpha)] );
let prho4 = DensePolynomial::from_coefficients_slice(&[ pp.lagrange_polynomials_Vn[3].evaluate(&alpha)] );
let prho5 = DensePolynomial::from_coefficients_slice(&[ pp.lagrange_polynomials_Vn[4].evaluate(&alpha)] );
let ppolyprod = DensePolynomial::from_coefficients_slice(&[ pp.poly_prod.evaluate(&alpha)] );
let prhologN6 = DensePolynomial::from_coefficients_slice(&[ pp.lagrange_polynomials_Vn[pp.logN + 5].evaluate(&alpha)] );
// p_alpha(X) = - zVn(alpha) h(X)
let mut p_alpha_poly = &pz_alpha * &h_hat_poly;
// p_alpha(X) = p_alpha(X) + ( f(X) - z(X) )(rho1(alpha) + rho2(alpha))
p_alpha_poly = &p_alpha_poly + &(&(&f_poly - &a_poly) * &prho1_add_2 ) ;
// p_alpha(X) = p_alpha(X) + ( (1-sigma) f(X) - v2 + v1 ) rho3(alpha)
p_alpha_poly = &p_alpha_poly +
&( &(&( &(&DensePolynomial::from_coefficients_slice(&[(Fr::one() - sigma)]) * &f_poly)
- &pv2 )
+ &pv1 ) * &prho3 ) ;
// p_alpha(X) = p_alpha(X) + ( f(X) + v2 - sigma v1 ) rho4(alpha)
p_alpha_poly = &p_alpha_poly +
&( &(&( &(&DensePolynomial::from_coefficients_slice(&[ - sigma]) * &pv1)
+ &pv2 )
+ &f_poly ) * &prho4 ) ;
// p_alpha(X) = p_alpha(X) + ( v1 f(X) - v2 ) rho5(alpha)
p_alpha_poly = &p_alpha_poly +
&( &( &(&f_poly * &pv1) - &pv2 ) * &prho5 ) ;
// p_alpha(X) = p_alpha(X) + ( f(X) - v1^2 ) prod_(i not in [5, .. , logN + 4]) (alpha - sigma^i)
p_alpha_poly = &p_alpha_poly +
&(&( &f_poly - &(&pv1 * &pv1) ) * &ppolyprod ) ;
/*
Differing slightly from paper
Paper uses p_alpha(X) = p_alpha(X) + ( v1 - 1 ) rho_(n)(alpha) assuming that logN = n - 6
We use p_alpha(X) = p_alpha(X) + ( v1 - 1 ) rho_(logN + 6)(alpha) to allow for any value of logN
*/
p_alpha_poly = &p_alpha_poly +
&( &(&pv1 -
&(DensePolynomial::from_coefficients_slice(&[ Fr::one()]) )) * &prhologN6 ) ;
////////////////////////////
// Compute opening proofs
////////////////////////////
// KZG.Open(srs_KZG, f(X), deg = bot, (alpha1, alpha2))
let (_evals1, pi1) = kzg_open_g1(
&pp.poly_ck,
&f_poly,
None,
[&alpha1, &alpha2].to_vec()
);
// KZG.Open(srs_KZG, p_alpha(X), deg = bot, alpha)
let (evals2, pi2) = kzg_open_g1(
& pp.poly_ck,
& p_alpha_poly,
None,
[&alpha].to_vec()
);
// abort if p_alpha( alpha) != 0
assert!( evals2[0] == Fr::zero(), "p_alpha(X) does not equal 0 at alpha" );
let proof = CaulkProofUnity {
g1_F: g1_F,
g1_H: g1_H,
v1: v1,
v2: v2,
pi1: pi1,
pi2: pi2,
};
proof
}
// Verify that the prover knows a, b such that g2_z = g2^(a x - b) and a^n = b^n
#[allow(non_snake_case)]
pub fn caulk_single_unity_verify(
vk: &VerifierPublicParametersUnity,
hash_input: &mut Fr,
g2_z: &G2Affine,
proof: &CaulkProofUnity
) -> bool {
// g2_z must not be the identity
assert!( g2_z.is_zero() == false, "g2_z is the identity");
// roots of unity in domain of size m = log1_2(n) + 6
let sigma = vk.domain_Vn.element(1);
let v1 = proof.v1; let v2 = proof.v2;
////////////////////////////
// alpha = Hash(A, F, H)
////////////////////////////
let alpha = hash_caulk_single::<Fr>( hash_input.clone(), Some(& [proof.g1_F, proof.g1_H].to_vec()), Some(& [g2_z.clone()].to_vec()), None );
*hash_input = alpha.clone();
// alpha1 = sigma^(-1) alpha and alpha2 = sigma^(-2) alpha
let alpha1: Fr = alpha * vk.domain_Vn.element( vk.domain_Vn.size() - 1 );
let alpha2: Fr = alpha * vk.domain_Vn.element( vk.domain_Vn.size() - 2 );
///////////////////////////////
// Compute P = commitment to p_alpha(X)
///////////////////////////////
// Useful field elements.
// zalpha = z(alpha) = alpha^n - 1,
let zalpha = vk.domain_Vn.vanishing_polynomial().evaluate(&alpha);
// rhoi = L_i(alpha) = ls_i * [(X^m - 1) / (alpha - w^i) ]
// where ls_i = lagrange_scalars_Vn[i] = prod_{j neq i} (w_i - w_j)^(-1)
let rho0 = zalpha * (alpha - vk.domain_Vn.element(0)).inverse().unwrap() * vk.lagrange_scalars_Vn[0];
let rho1 = zalpha * (alpha - vk.domain_Vn.element(1)).inverse().unwrap() * vk.lagrange_scalars_Vn[1];
let rho2 = zalpha * (alpha - vk.domain_Vn.element(2)).inverse().unwrap() * vk.lagrange_scalars_Vn[2];
let rho3 = zalpha * (alpha - vk.domain_Vn.element(3)).inverse().unwrap() * vk.lagrange_scalars_Vn[3];
let rho4 = zalpha * (alpha - vk.domain_Vn.element(4)).inverse().unwrap() * vk.lagrange_scalars_Vn[4];
let rhologN5 = zalpha * (alpha - vk.domain_Vn.element(vk.logN + 5)).inverse().unwrap() * vk.lagrange_scalars_Vn[5];
// pprod = prod_(i not in [5,..,logN+4]) (alpha - w^i)
let pprod = vk.poly_prod.evaluate(&alpha);
// P = H^(-z(alpha)) * F^(rho0(alpha) + L_1(alpha) + (1 - w)L_2(alpha) + L_3(alpha) + v1 L_4(alpha)
// + prod_(i not in [5,..,logN+4]) (alpha - w^i))
// * g^( (v1 -v2)L_2(alpha) + (v2 - w v1)L_3(alpha) - v2 L_4(alpha) + (v1 - 1)L_(logN+5)(alpha)
// - v1^2 * prod_(i not in [5,..,logN+4]) (alpha - w^i) )
let g1_p = proof.g1_H.mul( -zalpha )
+ proof.g1_F.mul(rho0 + rho1 + (Fr::one() - sigma) * rho2 + rho3 + v1 * rho4 + pprod)
+ vk.g1.mul( (v1 - v2) * rho2 + (v2 - sigma * v1) * rho3 - v2 * rho4 + (v1 - Fr::one()) * rhologN5 - v1 * v1 * pprod )
;
///////////////////////////////
// Pairing checks
///////////////////////////////
///////////////////////////////
// KZG opening check
///////////////////////////////
let check1 = kzg_verify_g1(
& [vk.g1, vk.g1_x].to_vec(), & vk.powers_of_g2,
proof.g1_F,
None,
[alpha1, alpha2].to_vec(),
[proof.v1, proof.v2].to_vec(),
proof.pi1
);
let g1_q = proof.pi2.clone();
// check that e(P Q3^(-alpha), g2)e( g^(-(rho0 + rho1) - zH(alpha) x^(d-1)), A ) e( Q3, g2^x ) = 1
// Had to move A from affine to projective and back to affine to get it to compile.
// No idea what difference this makes.
let eq1 = vec![
( (g1_p + g1_q.mul( alpha ) ).into_affine().into(), vk.poly_vk.prepared_h.clone() ),
((( vk.g1.mul(-rho0 - rho1) + vk.gxpen.mul(-zalpha) ).into_affine() ).into(), g2_z.into_projective().into_affine().into() ),
( (- g1_q).into(), vk.poly_vk.prepared_beta_h.clone() )
];
let check2 = Bls12_381::product_of_pairings(&eq1).is_one();
return check1 && check2
}

View File

@@ -1,104 +0,0 @@
use ark_bls12_381::{Bls12_381, Fr, G1Affine};
use ark_poly::{EvaluationDomain, GeneralEvaluationDomain, UVPolynomial, Polynomial};
use ark_poly_commit::kzg10::KZG10;
use ark_ec::{AffineCurve,ProjectiveCurve};
use std::{time::{Instant}};
mod tools;
mod caulk_single_setup;
mod caulk_single_unity;
mod pedersen;
mod caulk_single;
mod multiopen;
use crate::tools::{read_line, kzg_open_g1, random_field,UniPoly381};
use crate::caulk_single_setup::{caulk_single_setup};
use crate::caulk_single::{caulk_single_prove, caulk_single_verify};
use crate::multiopen::{multiple_open};
pub type KzgBls12_381 = KZG10<Bls12_381, UniPoly381>;
#[allow(non_snake_case)]
fn main() {
// setting public parameters
// current kzg setup should be changed with output from a setup ceremony
println!("What is the bitsize of the degree of the polynomial inside the commitment? ");
let p: usize = read_line();
let max_degree: usize = (1<<p) +2;
let actual_degree: usize = (1<<p)-1;
// run the setup
let now = Instant::now();
let pp = caulk_single_setup(max_degree, actual_degree);
println!("Time to setup single openings of table size {:?} = {:?}", actual_degree + 1, now.elapsed());
//polynomial and commitment
let now = Instant::now();
// deterministic randomness. Should never be used in practice.
let rng = &mut ark_std::test_rng();
let c_poly = UniPoly381::rand(actual_degree, rng);
let (g1_C, _) = KzgBls12_381::commit( &pp.poly_ck, &c_poly, None, None).unwrap();
let g1_C = g1_C.0;
println!("Time to KZG commit one element from table size {:?} = {:?}", actual_degree + 1, now.elapsed());
//point at which we will open c_com
let input_domain: GeneralEvaluationDomain<Fr> = EvaluationDomain::new(actual_degree).unwrap();
println!("Which position in the vector should we open at? ");
let position: usize = read_line();
assert!(0 < position, "This position does not exist in this vector.");
assert!(position <= (actual_degree+1), "This position does not exist in this vector.");
let omega_i = input_domain.element(position);
//Deciding whether to open all positions or just the one position.
println!("Should we open all possible positions? Opening all possible positions is slow. Please input either YES or NO" );
let open_all: String = read_line();
let g1_q: G1Affine;
if (open_all == "NO") || (open_all == "No") || (open_all == "no") {
// Q = g1_q = g^( (c(x) - c(w_i)) / (x - w_i) )
let now = Instant::now();
let a = kzg_open_g1(&pp.poly_ck, & c_poly, None, [& omega_i].to_vec() );
println!("Time to KZG open one element from table size {:?} = {:?}", actual_degree + 1, now.elapsed());
g1_q = a.1;
}
else {
assert!( (open_all == "YES") || (open_all == "Yes") || (open_all == "yes") , "Console input is invalid");
//compute all openings
let now = Instant::now();
let g1_qs = multiple_open(&c_poly, &pp.poly_ck, p);
g1_q = g1_qs[position];
println!("Time to compute all KZG openings {:?}", now.elapsed());
}
// z = c(w_i) and cm = g^z h^r for random r
let z = c_poly.evaluate(&omega_i);
let r = random_field::<Fr>();
let cm = (pp.ped_g.mul( z ) + pp.ped_h.mul( r )).into_affine();
// run the prover
println!("We are now ready to run the prover. How many times should we run it?" );
let number_of_openings: usize = read_line();
let now = Instant::now();
let mut proof_evaluate = caulk_single_prove(&pp, &g1_C, &cm, position, &g1_q, &z, &r );
for _ in 1..(number_of_openings-1) {
proof_evaluate = caulk_single_prove(&pp, &g1_C, &cm, position, &g1_q, &z, &r );
}
println!("Time to evaluate {} single openings of table size {:?} = {:?}", number_of_openings,actual_degree + 1, now.elapsed());
// run the verifier
println!( "The proof verifies = {:?}", caulk_single_verify(&pp.verifier_pp, &g1_C, &cm, &proof_evaluate) );
let now = Instant::now();
for _ in 0..(number_of_openings-1) {
caulk_single_verify(&pp.verifier_pp, &g1_C, &cm, &proof_evaluate);
}
println!("Time to verify {} single openings of table size {:?} = {:?}", number_of_openings, actual_degree + 1, now.elapsed());
}

View File

@@ -1,408 +0,0 @@
/*
This file includes an algorithm for calculating n openings of a KZG vector commitment of size n in n log(n) time.
The algorithm is by Feist and khovratovich.
It is useful for preprocessing.
The full algorithm is described here https://github.com/khovratovich/Kate/blob/master/Kate_amortized.pdf
*/
use std::str::FromStr;
//use std::time::{Instant};
use std::vec::Vec;
use ark_bls12_381::{Bls12_381,Fr,FrParameters,G1Affine,G1Projective};
use ark_poly::univariate::DensePolynomial;
use ark_ff::{PrimeField, Fp256, Field};
use ark_poly::{EvaluationDomain, GeneralEvaluationDomain, UVPolynomial};
use ark_ec::{AffineCurve,ProjectiveCurve};
use ark_poly_commit::kzg10::*;
//compute all pre-proofs using DFT
// h_i= c_d[x^{d-i-1}]+c_{d-1}[x^{d-i-2}]+c_{d-2}[x^{d-i-3}]+\cdots + c_{i+2}[x]+c_{i+1}[1]
pub fn compute_h(
c_poly: &DensePolynomial<Fp256<FrParameters>>, //c(X) degree up to d<2^p , i.e. c_poly has at most d+1 coeffs non-zero
poly_ck: &Powers<Bls12_381>, //SRS
p: usize
)->Vec<G1Projective>
{
let mut coeffs = c_poly.coeffs().to_vec();
let dom_size = 1<<p;
let fpzero = Fp256::from_str("0").unwrap();
coeffs.resize(dom_size,fpzero);
//let now = Instant::now();
//1. x_ext = [[x^(d-1)], [x^{d-2},...,[x],[1], d+2 [0]'s]
let mut x_ext = vec![];
for i in 0..=dom_size-2{
x_ext.push( poly_ck.powers_of_g[dom_size-2-i].into_projective());
}
let g1inf = poly_ck.powers_of_g[0].mul(fpzero);
x_ext.resize(2*dom_size,g1inf); //filling 2d+2 neutral elements
let y = dft_g1(&x_ext, p+1);
//println!("Step 1 computed in {:?}", now.elapsed());
//2. c_ext = [c_d, d zeroes, c_d,c_{0},c_1,...,c_{d-2},c_{d-1}]
//let now = Instant::now();
let mut c_ext = vec![];
c_ext.push(coeffs[coeffs.len()-1]);
c_ext.resize(dom_size,fpzero);
c_ext.push(coeffs[coeffs.len()-1]);
for i in 0..coeffs.len()-1{
c_ext.push(coeffs[i]);
}
assert_eq!(c_ext.len(),2*dom_size);
let v = dft_opt(&c_ext, p+1);
//println!("Step 2 computed in {:?}", now.elapsed());
//3. u = y o v
//let now = Instant::now();
let u = y.into_iter()
.zip(v.into_iter())
.map(|(a,b)|{a.mul(b.into_repr())})
.collect();
// println!("Step 3 computed in {:?}", now.elapsed());
//4. h_ext = idft_{2d+2}(u)
//let now = Instant::now();
let h_ext = idft_g1(&u, p+1);
//println!("Step 4 computed in {:?}", now.elapsed());
return h_ext[0..dom_size].to_vec();
}
//compute DFT of size @dom_size over vector of G1 elements
//q_i = h_0 + h_1w^i + h_2w^{2i}+\cdots + h_{dom_size-1}w^{(dom_size-1)i} for 0<= i< dom_size=2^p
pub fn dft_g1(
h: &Vec<G1Projective>,
p: usize
)->Vec<G1Projective>
{
let dom_size = 1<<p;
assert_eq!(h.len(),dom_size); //we do not support inputs of size not power of 2
let input_domain: GeneralEvaluationDomain<Fr> = EvaluationDomain::new(dom_size).unwrap();
let mut l = dom_size/2;
let mut m: usize=1;
//Stockham FFT
let mut xvec = vec![h.to_vec()];
for t in 1..=p{
let mut xt= xvec[t-1].clone();
for j in 0..l{
for k in 0..m{
let c0 = xvec[t-1][k+j*m].clone();
let c1 = &xvec[t-1][k+j*m+l*m];
xt[k+2*j*m] = c0+c1;
let wj_2l=input_domain.element((j*dom_size/(2*l))%dom_size);
xt[k+2*j*m+m]= (c0-c1).mul(wj_2l.into_repr());
}
}
l = l/2;
m = m*2;
xvec.push(xt.to_vec());
}
return xvec[p].to_vec();
}
//compute DFT of size @dom_size over vector of Fr elements
//q_i = h_0 + h_1w^i + h_2w^{2i}+\cdots + h_{dom_size-1}w^{(dom_size-1)i} for 0<= i< dom_size=2^p
pub fn dft_opt(
h: &Vec<Fr>,
p: usize
)->Vec<Fr>
{
let dom_size = 1<<p;
assert_eq!(h.len(),dom_size); //we do not support inputs of size not power of 2
let input_domain: GeneralEvaluationDomain<Fr> = EvaluationDomain::new(dom_size).unwrap();
let mut l = dom_size/2;
let mut m: usize=1;
//Stockham FFT
let mut xvec = vec![h.to_vec()];
for t in 1..=p{
let mut xt= xvec[t-1].clone();
for j in 0..l{
for k in 0..m{
let c0 = xvec[t-1][k+j*m].clone();
let c1 = &xvec[t-1][k+j*m+l*m];
xt[k+2*j*m] = c0+c1;
let wj_2l=input_domain.element((j*dom_size/(2*l))%dom_size);
xt[k+2*j*m+m]= (c0-c1)*(wj_2l);
}
}
l = l/2;
m = m*2;
xvec.push(xt.to_vec());
}
return xvec[p].to_vec();
}
//compute all openings to c_poly using a smart formula
pub fn multiple_open(
c_poly: &DensePolynomial<Fp256<FrParameters>>, //c(X)
poly_ck: &Powers<Bls12_381>, //SRS
p: usize
)->Vec<G1Affine>
{
let degree=c_poly.coeffs.len()-1;
let input_domain: GeneralEvaluationDomain<Fr> = EvaluationDomain::new(degree).unwrap();
//let now = Instant::now();
let h2 = compute_h(c_poly,poly_ck,p);
//println!("H2 computed in {:?}", now.elapsed());
//assert_eq!(h,h2);
let dom_size = input_domain.size();
assert_eq!(1<<p,dom_size);
assert_eq!(degree+1,dom_size);
/*let now = Instant::now();
let q = DFTG1(&h,p);
println!("Q computed in {:?}", now.elapsed());*/
//let now = Instant::now();
let q2 = dft_g1(&h2,p);
//println!("Q2 computed in {:?}", now.elapsed());
//assert_eq!(q,q2);
let mut res: Vec<G1Affine> = vec![];
for i in 0..dom_size{
res.push(q2[i].into_affine());
}
return res;
}
//compute idft of size @dom_size over vector of G1 elements
//q_i = (h_0 + h_1w^-i + h_2w^{-2i}+\cdots + h_{dom_size-1}w^{-(dom_size-1)i})/dom_size for 0<= i< dom_size=2^p
pub fn idft_g1(
h: &Vec<G1Projective>,
p: usize
)->Vec<G1Projective>
{
let dom_size = 1<<p;
assert_eq!(h.len(),dom_size); //we do not support inputs of size not power of 2
let input_domain: GeneralEvaluationDomain<Fr> = EvaluationDomain::new(dom_size).unwrap();
let mut l = dom_size/2;
let mut m: usize=1;
let mut dom_fr = Fr::from_str("1").unwrap();
//Stockham FFT
let mut xvec = vec![h.to_vec()];
for t in 1..=p{
let mut xt= xvec[t-1].clone();
for j in 0..l{
for k in 0..m{
let c0 = xvec[t-1][k+j*m].clone();
let c1 = &xvec[t-1][k+j*m+l*m];
xt[k+2*j*m] = c0+c1;
let wj_2l=input_domain.element((dom_size-(j*dom_size/(2*l))%dom_size)%dom_size);
xt[k+2*j*m+m]= (c0-c1).mul(wj_2l.into_repr()); //Difference #1 to forward DFT
}
}
l = l/2;
m = m*2;
dom_fr = dom_fr+dom_fr;
xvec.push(xt.to_vec());
}
let res = xvec[p]
.to_vec()
.iter()
.map(|x|{x
.mul(dom_fr
.inverse()
.unwrap().into_repr())})
.collect();
return res;
}
#[cfg(test)]
pub mod tests {
use crate::*;
use crate::caulk_single_setup::caulk_single_setup;
use crate::multiopen::*;
use crate::tools::{kzg_open_g1};
use ark_poly::univariate::DensePolynomial;
use ark_ff::Fp256;
pub fn commit_direct(
c_poly: &DensePolynomial<Fp256<FrParameters>>, //c(X)
poly_ck: &Powers<Bls12_381>, //SRS
)-> G1Affine
{
assert!(c_poly.coeffs.len()<=poly_ck.powers_of_g.len());
let mut com = poly_ck.powers_of_g[0].mul(c_poly.coeffs[0]);
for i in 1..c_poly.coeffs.len()
{
com = com + poly_ck.powers_of_g[i].mul(c_poly.coeffs[i]);
}
return com.into_affine();
}
//compute all openings to c_poly by mere calling `open` N times
pub fn multiple_open_naive(
c_poly: &DensePolynomial<Fp256<FrParameters>>,
c_com_open: &Randomness< Fp256<FrParameters>, DensePolynomial<Fp256<FrParameters>> >,
poly_ck: &Powers<Bls12_381>,
degree: usize
)
->Vec<G1Affine>
{
let input_domain: GeneralEvaluationDomain<Fr> = EvaluationDomain::new(degree).unwrap();
let mut res: Vec<G1Affine> = vec![];
for i in 0..input_domain.size(){
let omega_i = input_domain.element(i);
res.push( kzg_open_g1_test(&c_poly, &omega_i, &c_com_open, &poly_ck).w);
}
return res;
}
////////////////////////////////////////////////
pub fn kzg_open_g1_test(
p: &DensePolynomial<Fp256<FrParameters>>,
omega_5: &Fp256<FrParameters>,
polycom_open: &Randomness< Fp256<FrParameters>, DensePolynomial<Fp256<FrParameters>> >,
poly_ck: &Powers<Bls12_381>
) -> Proof< Bls12_381 > {
let rng = &mut ark_std::test_rng();
let (witness_polynomial, _random_witness_polynomial) =
KzgBls12_381::compute_witness_polynomial(p, omega_5.clone(), polycom_open).unwrap();
let (temp0, _temp1) = KZG10::commit(poly_ck, &witness_polynomial, None, Some(rng)).unwrap();
let poly_open: Proof< Bls12_381 > = Proof { w: temp0.0 , random_v: None, };
return poly_open
}
//compute KZG proof Q = g1_q = g^( (c(x) - c(w^i)) / (x - w^i) ) where x is secret, w^i is the point where we open, and c(X) is the committed polynomial
pub fn single_open_default(
c_poly: &DensePolynomial<Fp256<FrParameters>>, //c(X)
c_com_open: &Randomness< Fp256<FrParameters>, DensePolynomial<Fp256<FrParameters>> >, //
poly_ck: &Powers<Bls12_381>,
i: usize, //
degree: usize
)
->G1Affine
{
let input_domain: GeneralEvaluationDomain<Fr> = EvaluationDomain::new(degree).unwrap();
let omega_i = input_domain.element(i);
let c_poly_open = kzg_open_g1_test(&c_poly, &omega_i, &c_com_open, &poly_ck);
return c_poly_open.w ;
}
//KZG proof/opening at point y for c(X) = sum_i c_i X^i
//(1)T_y(X) = sum_i t_i X^i
//(2) t_{deg-1} = c_deg
//(3) t_j = c_{j+1} + y*t_{j+1}
pub fn single_open_fast(
c_poly: &DensePolynomial<Fp256<FrParameters>>, //c(X)
poly_ck: &Powers<Bls12_381>, //SRS
i: usize, //y=w^i
degree: usize //degree of c(X)
)
->G1Affine
{
//computing opening point
let input_domain: GeneralEvaluationDomain<Fr> = EvaluationDomain::new(degree).unwrap();
let y = input_domain.element(i);
//compute quotient
let mut t_poly = c_poly.clone();
t_poly.coeffs.remove(0); //shifting indices
for j in (0..t_poly.len()-1).rev(){
t_poly.coeffs[j] = c_poly.coeffs[j+1] + y*t_poly.coeffs[j+1]
}
//commit
let (t_com,_) = KzgBls12_381::commit( &poly_ck, &t_poly, None, None).unwrap();
return t_com.0;
}
pub fn test_single()
{
// setting public parameters
// current kzg setup should be changed with output from a setup ceremony
let max_degree: usize = 100;
let actual_degree: usize = 63;
let pp = caulk_single_setup(max_degree, actual_degree);
// Setting up test instance to run evaluate on.
// test randomness for c_poly is same everytime.
// test index equals 5 everytime
// g_c = g^(c(x))
let rng = &mut ark_std::test_rng();
let c_poly = UniPoly381::rand(actual_degree, rng);
let (c_com, c_com_open) = KzgBls12_381::commit( &pp.poly_ck, &c_poly, None, None).unwrap();
let i: usize = 6;
let q = single_open_default(&c_poly,&c_com_open,&pp.poly_ck,i,actual_degree);
let q2 = single_open_fast(&c_poly,&pp.poly_ck,i,actual_degree);
assert_eq!(q,q2);
}
pub fn test_dft(
h: &Vec<G1Projective>,
p: usize)
{
let c_dft = dft_g1(h,p);
let c_back = idft_g1(&c_dft,p);
assert_eq!(h,&c_back);
println!("DFT test passed");
}
pub fn test_commit()
{
// current kzg setup should be changed with output from a setup ceremony
let max_degree: usize = 100;
let actual_degree: usize = 63;
let pp = caulk_single_setup(max_degree, actual_degree);
// Setting up test instance to run evaluate on.
// test randomness for c_poly is same everytime.
// g_c = g^(c(x))
let rng = &mut ark_std::test_rng();
let c_poly = UniPoly381::rand(actual_degree, rng);
let (c_com, c_com_open) = KzgBls12_381::commit( &pp.poly_ck, &c_poly, None, None).unwrap();
let g_c1 = c_com.0;
let g_c2 = commit_direct(&c_poly, &pp.poly_ck);
assert_eq!(g_c1,g_c2);
println!("commit test passed")
}
#[test]
pub fn test_multi()
{
// current kzg setup should be changed with output from a setup ceremony
let p: usize = 9;
let max_degree: usize = 1<<p+1;
let actual_degree: usize = (1<<p)-1;
let pp = caulk_single_setup(max_degree, actual_degree);
// Setting up test instance to run evaluate on.
// test randomness for c_poly is same everytime.
// test index equals 5 everytime
// g_c = g^(c(x))
let rng = &mut ark_std::test_rng();
let c_poly = UniPoly381::rand(actual_degree, rng);
let (c_com, c_com_open) = KzgBls12_381::commit( &pp.poly_ck, &c_poly, None, None).unwrap();
let g_c = c_com.0;
let now = Instant::now();
let q = multiple_open_naive(&c_poly,&c_com_open,&pp.poly_ck,actual_degree);
println!("Multi naive computed in {:?}", now.elapsed());
let now = Instant::now();
let q2 = multiple_open(&c_poly,&pp.poly_ck,p);
println!("Multi advanced computed in {:?}", now.elapsed());
assert_eq!(q,q2);
}
}

View File

@@ -1,71 +0,0 @@
/*
This file includes a prover and verifier for demonstrating knowledge of an opening of a Pedersen commitment.
The protocol is informally described in Appendix A.2, Proof of Opening of a Pedersen Commitment
*/
use ark_ec::{ProjectiveCurve, AffineCurve};
use ark_ff::{Fp256, PrimeField};
use ark_bls12_381::{G1Affine, FrParameters, Fr};
use ark_std::Zero;
use crate::tools::{hash_caulk_single, random_field};
// Structure of proof output by prove_pedersen
pub struct ProofPed {
pub g1_r: G1Affine,
pub t1: Fp256<FrParameters>,
pub t2: Fp256<FrParameters>,
}
// prove knowledge of a and b such that cm = g^a h^b
pub fn prove_pedersen(
g1: &G1Affine,
h1: &G1Affine,
hash_input: &mut Fr,
cm: &G1Affine,
a: &Fp256<FrParameters>,
b: &Fp256<FrParameters>,
) -> ProofPed {
// R = g^s1 h^s2
let s1: Fr = random_field::<Fr>();
let s2: Fr = random_field::<Fr>();
let g1_r = (g1.mul( s1.into_repr() ) + h1.mul( s2.into_repr() )).into_affine();
// c = Hash(cm, R)
let c = hash_caulk_single::<Fr>( hash_input.clone(), Some(& [cm.clone(), g1_r].to_vec()), None, None );
*hash_input = c.clone();
let t1 = s1 + c * a;
let t2 = s2 + c * b;
let proof = ProofPed {
g1_r: g1_r, t1: t1, t2: t2
};
return proof
}
// Verify that prover knows a and b such that cm = g^a h^b
pub fn verify_pedersen(
g1: &G1Affine,
h1: &G1Affine,
hash_input: &mut Fr,
cm: &G1Affine,
proof: &ProofPed,
) -> bool {
// compute c = Hash(cm, R)
let c = hash_caulk_single::<Fr>( hash_input.clone(), Some(& [cm.clone(), proof.g1_r.clone()].to_vec()), None, None );
*hash_input = c.clone();
// check that R g^(-t1) h^(-t2) cm^(c) = 1
let check = proof.g1_r.into_projective() + g1.mul( - proof.t1 )
+ h1.mul( - proof.t2 ) + cm.mul( c );
return check.is_zero()
}

View File

@@ -1,337 +0,0 @@
/*
This file includes backend tools:
(1) read_line() is for taking inputs from the user
(2) kzg_open_g1 is for opening KZG commitments
(3) kzg_verify_g1 is for verifying KZG commitments
(4) hash_caulk_single is for hashing group and field elements into a field element
(5) random_field is for generating random field elements
*/
use ark_bls12_381::{Bls12_381, Fr, G1Affine, G2Affine, G1Projective};
use ark_ff::{PrimeField, Field};
use ark_poly::{univariate::DensePolynomial, UVPolynomial, Polynomial};
use ark_serialize::CanonicalSerialize;
use ark_std::{One, Zero};
use blake2s_simd::Params;
use rand::{Rng, SeedableRng, thread_rng};
use rand_chacha::ChaChaRng;
use std::{io, str::FromStr, error::Error};
use ark_poly_commit::kzg10::*;
use ark_poly::univariate::DensePolynomial as DensePoly;
use ark_ec::{PairingEngine, AffineCurve, ProjectiveCurve, msm::VariableBaseMSM};
pub type UniPoly381 = DensePoly<<Bls12_381 as PairingEngine>::Fr>;
pub type KzgBls12_381 = KZG10<Bls12_381, UniPoly381>;
// Function for reading inputs from the command line.
pub fn read_line<T: FromStr>() -> T
where <T as FromStr>::Err: Error + 'static
{
let mut input = String::new();
io::stdin().read_line(&mut input).expect("Failed to get console input.");
let output: T = input.trim().parse().expect("Console input is invalid.");
output
}
////////////////////////////////////////////////
//
//copied from arkworks
fn convert_to_bigints<F: PrimeField>(p: &Vec<F>) -> Vec<F::BigInt> {
let coeffs = ark_std::cfg_iter!(p)
.map(|s| s.into_repr())
.collect::<Vec<_>>();
coeffs
}
/////////////////////////////////////////////////////////////////////
// KZG opening and verifying
/////////////////////////////////////////////////////////////////////
/*
KZG.Open( srs_KZG, f(X), deg, (alpha1, alpha2, ..., alphan) )
returns ([f(alpha1), ..., f(alphan)], pi)
Algorithm described in Section 4.6.1, Multiple Openings
*/
pub fn kzg_open_g1(poly_ck: &Powers<Bls12_381>,
poly: &DensePolynomial<Fr>,
max_deg: Option<&usize>,
points: Vec<&Fr>) -> (Vec<Fr>, G1Affine) {
let mut evals = Vec::new();
let mut proofs = Vec::new();
for i in 0..points.len() {
let (eval, pi) = kzg_open_g1_single( poly_ck, poly, max_deg, points[i] );
evals.push( eval );
proofs.push( pi );
}
let mut res: G1Projective = G1Projective::zero(); //default value
for j in 0..points.len()
{
let w_j= points[j].clone();
//1. Computing coefficient [1/prod]
let mut prod =Fr::one();
for k in 0..points.len() {
let w_k = points[k];
if k!=j{
prod = prod*(w_j-w_k);
}
}
//2. Summation
let q_add = proofs[j].mul(prod.inverse().unwrap()); //[1/prod]Q_{j}
res = res + q_add;
}
return (evals, res.into_affine());
}
//KZG.Open( srs_KZG, f(X), deg, alpha ) returns (f(alpha), pi)
fn kzg_open_g1_single(poly_ck: &Powers<Bls12_381>,
poly: &DensePolynomial<Fr>,
max_deg: Option<&usize>,
point: &Fr) -> (Fr, G1Affine) {
let eval = poly.evaluate( &point);
let global_max_deg = poly_ck.powers_of_g.len();
let mut d: usize = 0;
if max_deg == None {
d += global_max_deg;
}
else {
d += max_deg.unwrap();
}
let divisor = DensePolynomial::from_coefficients_vec(vec![-point.clone(), Fr::one()]);
let witness_polynomial = poly / &divisor;
assert!( poly_ck.powers_of_g[(global_max_deg - d)..].len() >= witness_polynomial.len());
let proof = VariableBaseMSM::multi_scalar_mul(&poly_ck.powers_of_g[(global_max_deg - d)..], &convert_to_bigints(&witness_polynomial.coeffs).as_slice() ).into_affine();
return (eval, proof)
}
/*
// KZG.Verify( srs_KZG, F, deg, (alpha1, alpha2, ..., alphan), (v1, ..., vn), pi )
Algorithm described in Section 4.6.1, Multiple Openings
*/
pub fn kzg_verify_g1( //Verify that @c_com is a commitment to C(X) such that C(x)=z
powers_of_g1: &Vec<G1Affine>, // generator of G1
powers_of_g2: &Vec<G2Affine>, // [1]_2, [x]_2, [x^2]_2, ...
c_com: G1Affine, //commitment
max_deg: Option<&usize>, // max degree
points: Vec<Fr>, // x such that eval = C(x)
evals: Vec<Fr>, //evaluation
pi: G1Affine, //proof
)
->bool{
// Interpolation set
// tau_i(X) = lagrange_tau[i] = polynomial equal to 0 at point[j] for j!= i and 1 at points[i]
let mut lagrange_tau = DensePolynomial::from_coefficients_slice(&[Fr::zero()]);
for i in 0..points.len() {
let mut temp : UniPoly381 = DensePolynomial::from_coefficients_slice(&[Fr::one()]);
for j in 0..points.len() {
if i != j {
temp = &temp * (&DensePolynomial::from_coefficients_slice(&[-points[j] ,Fr::one()]));
}
}
let lagrange_scalar = temp.evaluate(&points[i]).inverse().unwrap() * &evals[i] ;
lagrange_tau = lagrange_tau + &temp * (&DensePolynomial::from_coefficients_slice(&[lagrange_scalar])) ;
}
// commit to sum evals[i] tau_i(X)
assert!( powers_of_g1.len() >= lagrange_tau.len(), "KZG verifier doesn't have enough g1 powers" );
let g1_tau = VariableBaseMSM::multi_scalar_mul(&powers_of_g1[..lagrange_tau.len()], convert_to_bigints(&lagrange_tau.coeffs).as_slice());
// vanishing polynomial
// z_tau[i] = polynomial equal to 0 at point[j]
let mut z_tau = DensePolynomial::from_coefficients_slice(&[Fr::one()]);
for i in 0..points.len() {
z_tau = &z_tau * (&DensePolynomial::from_coefficients_slice(&[-points[i] ,Fr::one()]));
}
// commit to z_tau(X) in g2
assert!( powers_of_g2.len() >= z_tau.len(), "KZG verifier doesn't have enough g2 powers" );
let g2_z_tau = VariableBaseMSM::multi_scalar_mul(&powers_of_g2[..z_tau.len()], convert_to_bigints(&z_tau.coeffs).as_slice());
let global_max_deg = powers_of_g1.len();
let mut d: usize = 0;
if max_deg == None {
d += global_max_deg;
}
else {
d += max_deg.unwrap();
}
let pairing1 = Bls12_381::pairing(
c_com.into_projective()-g1_tau,
powers_of_g2[global_max_deg - d]
);
let pairing2 =Bls12_381::pairing(
pi,
g2_z_tau
);
return pairing1==pairing2;
}
/////////////////////////////////////////////////////////////////////
// Hashing
/////////////////////////////////////////////////////////////////////
// hashing to field copied from
// https://github.com/kobigurk/aggregatable-dkg/blob/main/src/signature/utils/hash.rs
fn rng_from_message(personalization: &[u8], message: &[u8]) -> ChaChaRng {
let hash = Params::new()
.hash_length(32)
.personal(personalization)
.to_state()
.update(message)
.finalize();
let mut seed = [0u8; 32];
seed.copy_from_slice(hash.as_bytes());
let rng = ChaChaRng::from_seed(seed);
rng
}
fn hash_to_field<F: PrimeField>(
personalization: &[u8],
message: &[u8],
) -> F {
let mut rng = rng_from_message(personalization, message);
loop {
let bytes: Vec<u8> = (0..F::zero().serialized_size())
.map(|_| rng.gen())
.collect();
if let Some(p) = F::from_random_bytes(&bytes) {
return p;
}
}
}
/* hash function that takes as input:
(1) some state (either equal to the last hash output or zero)
(2) a vector of g1 elements
(3) a vector of g2 elements
(4) a vector of field elements
It returns a field element.
*/
pub fn hash_caulk_single<F: PrimeField>(
state: Fr,
g1_elements: Option< &Vec<G1Affine>>,
g2_elements: Option< &Vec<G2Affine>>,
field_elements: Option< &Vec<Fr>> ) -> Fr
{
// PERSONALIZATION distinguishes this hash from other hashes that may be in the system
const PERSONALIZATION: &[u8] = b"CAULK1";
///////////////////////////////////////////////////////////
// Handling cases where no g1_elements or no g1_elements or no field elements are input
///////////////////////////////////////////////////////////
let g1_elements_len: usize;
let g2_elements_len: usize;
let field_elements_len: usize;
if g1_elements == None {
g1_elements_len = 0;
}
else {
g1_elements_len = g1_elements.unwrap().len();
}
if g2_elements == None {
g2_elements_len = 0;
}
else {
g2_elements_len = g2_elements.unwrap().len();
}
if field_elements == None {
field_elements_len = 0;
}
else {
field_elements_len = field_elements.unwrap().len();
}
///////////////////////////////////////////////////////////
// Transform inputs into bytes
///////////////////////////////////////////////////////////
let mut state_bytes = vec![];
state.serialize(&mut state_bytes).ok();
let mut g1_elements_bytes = Vec::new();
for i in 0..g1_elements_len {
let mut temp = vec![];
g1_elements.unwrap()[i].serialize( &mut temp ).ok();
g1_elements_bytes.append( &mut temp.clone() );
}
let mut g2_elements_bytes = Vec::new();
for i in 0..g2_elements_len {
let mut temp = vec![];
g2_elements.unwrap()[i].serialize( &mut temp ).ok();
g2_elements_bytes.append( &mut temp.clone() );
}
let mut field_elements_bytes = Vec::new();
for i in 0..field_elements_len {
let mut temp = vec![];
field_elements.unwrap()[i].serialize( &mut temp ).ok();
field_elements_bytes.append( &mut temp.clone() );
}
// Transform bytes into vector of bytes of the form expected by hash_to_field
let mut hash_input: Vec<u8> = state_bytes.clone();
for i in 0..g1_elements_bytes.len() {
hash_input = [ &hash_input as &[_], &[g1_elements_bytes[i]] ].concat();
}
for i in 0..g2_elements_bytes.len() {
hash_input = [ &hash_input as &[_], &[g2_elements_bytes[i]] ].concat();
}
for i in 0..field_elements_bytes.len() {
hash_input = [ &hash_input as &[_], &[field_elements_bytes[i]] ].concat();
}
// hash_to_field
return hash_to_field::<Fr>(
PERSONALIZATION,
&hash_input
);
}
/////////////////////////////////////////////////////////////////////
// Random field element
/////////////////////////////////////////////////////////////////////
// generating a random field element
pub fn random_field< F: PrimeField >() -> F {
let mut rng = thread_rng();
loop {
let bytes: Vec<u8> = (0..F::zero().serialized_size())
.map(|_| rng.gen())
.collect();
if let Some(p) = F::from_random_bytes(&bytes) {
return p;
}
}
}

138
examples/multi_lookup.rs Normal file
View File

@@ -0,0 +1,138 @@
use ark_bls12_381::{Bls12_381, Fr};
use ark_poly::{univariate::DensePolynomial, EvaluationDomain};
use ark_poly_commit::{Polynomial, UVPolynomial};
use ark_std::{test_rng, time::Instant, UniformRand};
use caulk::{
multi::{
compute_lookup_proof, get_poly_and_g2_openings, verify_lookup_proof, LookupInstance,
LookupProverInput,
},
KZGCommit, PublicParameters,
};
use rand::Rng;
use std::{cmp::max, error::Error, io, str::FromStr};
// Function for reading inputs from the command line.
fn read_line<T: FromStr>() -> T
where
<T as FromStr>::Err: Error + 'static,
{
let mut input = String::new();
io::stdin()
.read_line(&mut input)
.expect("Failed to get console input.");
let output: T = input.trim().parse().expect("Console input is invalid.");
output
}
#[allow(non_snake_case)]
fn main() {
let mut rng = test_rng();
// 1. Setup
// setting public parameters
// current kzg setup should be changed with output from a setup ceremony
println!("What is the bitsize of the degree of the polynomial inside the commitment? ");
let n: usize = read_line();
println!("How many positions m do you want to open the polynomial at? ");
let m: usize = read_line();
let N: usize = 1 << n;
let powers_size: usize = max(N + 2, 1024);
let actual_degree = N - 1;
let temp_m = n; // dummy
let now = Instant::now();
let mut pp = PublicParameters::<Bls12_381>::setup(&powers_size, &N, &temp_m, &n);
println!(
"Time to setup multi openings of table size {:?} = {:?}",
actual_degree + 1,
now.elapsed()
);
// 2. Poly and openings
let now = Instant::now();
let table = get_poly_and_g2_openings(&pp, actual_degree);
println!("Time to generate commitment table = {:?}", now.elapsed());
// 3. Setup
pp.regenerate_lookup_params(m);
// 4. Positions
// let mut rng = rand::thread_rng();
let mut positions: Vec<usize> = vec![];
for _ in 0..m {
// generate positions randomly in the set
// let i_j: usize = j*(actual_degree/m);
let i_j: usize = rng.gen_range(0..actual_degree);
positions.push(i_j);
}
println!("positions = {:?}", positions);
// 5. generating phi
let blinder = Fr::rand(&mut rng);
let a_m = DensePolynomial::from_coefficients_slice(&[blinder]);
let mut phi_poly = a_m.mul_by_vanishing_poly(pp.domain_m);
let c_poly_local = table.c_poly.clone();
for j in 0..m {
phi_poly = &phi_poly
+ &(&pp.lagrange_polynomials_m[j]
* c_poly_local.evaluate(&pp.domain_N.element(positions[j]))); // adding c(w^{i_j})*mu_j(X)
}
for j in m..pp.domain_m.size() {
phi_poly = &phi_poly
+ &(&pp.lagrange_polynomials_m[j] * c_poly_local.evaluate(&pp.domain_N.element(0)));
// adding c(w^{i_j})*mu_j(X)
}
// 6. Running proofs
let now = Instant::now();
let c_com = KZGCommit::<Bls12_381>::commit_g1(&pp.poly_ck, &table.c_poly);
let phi_com = KZGCommit::<Bls12_381>::commit_g1(&pp.poly_ck, &phi_poly);
println!("Time to generate inputs = {:?}", now.elapsed());
let lookup_instance = LookupInstance { c_com, phi_com };
let prover_input = LookupProverInput {
c_poly: table.c_poly.clone(),
phi_poly,
positions,
openings: table.openings.clone(),
};
println!("We are now ready to run the prover. How many times should we run it?");
let number_of_openings: usize = read_line();
let now = Instant::now();
let (proof, unity_proof) = compute_lookup_proof(&lookup_instance, &prover_input, &pp, &mut rng);
for _ in 1..number_of_openings {
_ = compute_lookup_proof(&lookup_instance, &prover_input, &pp, &mut rng);
}
println!(
"Time to evaluate {} times {} multi-openings of table size {:?} = {:?} ",
number_of_openings,
m,
N,
now.elapsed()
);
let now = Instant::now();
for _ in 0..number_of_openings {
verify_lookup_proof(&table.c_com, &phi_com, &proof, &unity_proof, &pp, &mut rng);
}
println!(
"Time to verify {} times {} multi-openings of table size {:?} = {:?} ",
number_of_openings,
m,
N,
now.elapsed()
);
assert!(
verify_lookup_proof(&table.c_com, &phi_com, &proof, &unity_proof, &pp, &mut rng),
"Result does not verify"
);
}

174
examples/single_opening.rs Normal file
View File

@@ -0,0 +1,174 @@
use ark_bls12_381::{Bls12_381, Fr, G1Affine};
use ark_ec::{AffineCurve, ProjectiveCurve};
use ark_poly::{
univariate::DensePolynomial, EvaluationDomain, GeneralEvaluationDomain, Polynomial,
UVPolynomial,
};
use ark_poly_commit::kzg10::KZG10;
use ark_std::{test_rng, UniformRand};
use caulk::{
caulk_single_prove, caulk_single_setup, caulk_single_verify, CaulkTranscript, KZGCommit,
};
use std::{error::Error, io, str::FromStr, time::Instant};
type UniPoly381 = DensePolynomial<Fr>;
type KzgBls12_381 = KZG10<Bls12_381, UniPoly381>;
// Function for reading inputs from the command line.
fn read_line<T: FromStr>() -> T
where
<T as FromStr>::Err: Error + 'static,
{
let mut input = String::new();
io::stdin()
.read_line(&mut input)
.expect("Failed to get console input.");
let output: T = input.trim().parse().expect("Console input is invalid.");
output
}
#[allow(non_snake_case)]
fn main() {
let mut rng = test_rng();
// setting public parameters
// current kzg setup should be changed with output from a setup ceremony
println!("What is the bitsize of the degree of the polynomial inside the commitment? ");
let p: usize = read_line();
let max_degree: usize = (1 << p) + 2;
let actual_degree: usize = (1 << p) - 1;
// run the setup
let now = Instant::now();
let pp = caulk_single_setup(max_degree, actual_degree, &mut rng);
println!(
"Time to setup single openings of table size {:?} = {:?}",
actual_degree + 1,
now.elapsed()
);
// polynomial and commitment
let now = Instant::now();
// deterministic randomness. Should never be used in practice.
let c_poly = UniPoly381::rand(actual_degree, &mut rng);
let (g1_C, _) = KzgBls12_381::commit(&pp.poly_ck, &c_poly, None, None).unwrap();
let g1_C = g1_C.0;
println!(
"Time to KZG commit one element from table size {:?} = {:?}",
actual_degree + 1,
now.elapsed()
);
// point at which we will open c_com
let input_domain: GeneralEvaluationDomain<Fr> = EvaluationDomain::new(actual_degree).unwrap();
println!("Which position in the vector should we open at? ");
let position: usize = read_line();
assert!(0 < position, "This position does not exist in this vector.");
assert!(
position <= (actual_degree + 1),
"This position does not exist in this vector."
);
let omega_i = input_domain.element(position);
// Deciding whether to open all positions or just the one position.
println!("Should we open all possible positions? Opening all possible positions is slow. Please input either YES or NO" );
let open_all: String = read_line();
let g1_q: G1Affine;
if (open_all == "NO") || (open_all == "No") || (open_all == "no") {
// Q = g1_q = g^( (c(x) - c(w_i)) / (x - w_i) )
let now = Instant::now();
let a = KZGCommit::open_g1_batch(&pp.poly_ck, &c_poly, None, &[omega_i]);
println!(
"Time to KZG open one element from table size {:?} = {:?}",
actual_degree + 1,
now.elapsed()
);
g1_q = a.1;
} else {
assert!(
(open_all == "YES") || (open_all == "Yes") || (open_all == "yes"),
"Console input is invalid"
);
// compute all openings
let now = Instant::now();
let g1_qs =
KZGCommit::<Bls12_381>::multiple_open::<G1Affine>(&c_poly, &pp.poly_ck.powers_of_g, p);
g1_q = g1_qs[position];
println!("Time to compute all KZG openings {:?}", now.elapsed());
}
// z = c(w_i) and cm = g^z h^r for random r
let z = c_poly.evaluate(&omega_i);
let r = Fr::rand(&mut rng);
let cm = (pp.verifier_pp.pedersen_param.g.mul(z) + pp.verifier_pp.pedersen_param.h.mul(r))
.into_affine();
let mut prover_transcript = CaulkTranscript::<Fr>::new();
let mut verifier_transcript = CaulkTranscript::<Fr>::new();
// run the prover
println!("We are now ready to run the prover. How many times should we run it?");
let number_of_openings: usize = read_line();
let now = Instant::now();
let mut proof_evaluate = caulk_single_prove(
&pp,
&mut prover_transcript,
&g1_C,
&cm,
position,
&g1_q,
&z,
&r,
&mut rng,
);
for _ in 1..(number_of_openings - 1) {
proof_evaluate = caulk_single_prove(
&pp,
&mut prover_transcript,
&g1_C,
&cm,
position,
&g1_q,
&z,
&r,
&mut rng,
);
}
println!(
"Time to evaluate {} single openings of table size {:?} = {:?}",
number_of_openings,
actual_degree + 1,
now.elapsed()
);
// run the verifier
println!(
"The proof verifies = {:?}",
caulk_single_verify(
&pp.verifier_pp,
&mut verifier_transcript,
&g1_C,
&cm,
&proof_evaluate,
)
);
let now = Instant::now();
for _ in 0..(number_of_openings - 1) {
caulk_single_verify(
&pp.verifier_pp,
&mut verifier_transcript,
&g1_C,
&cm,
&proof_evaluate,
);
}
println!(
"Time to verify {} single openings of table size {:?} = {:?}",
number_of_openings,
actual_degree + 1,
now.elapsed()
);
}

9
rustfmt.toml Normal file
View File

@@ -0,0 +1,9 @@
reorder_imports = true
wrap_comments = true
normalize_comments = true
use_try_shorthand = true
match_block_trailing_comma = true
use_field_init_shorthand = true
edition = "2018"
condense_wildcard_suffixes = true
imports_granularity = "Crate"

254
src/dft.rs Normal file
View File

@@ -0,0 +1,254 @@
// This file includes an algorithm for calculating n openings of a KZG vector
// commitment of size n in n log(n) time. The algorithm is by Feist and
// khovratovich. It is useful for preprocessing.
// The full algorithm is described here https://github.com/khovratovich/Kate/blob/master/Kate_amortized.pdf
use ark_ec::ProjectiveCurve;
use ark_ff::PrimeField;
use ark_poly::{
univariate::DensePolynomial, EvaluationDomain, GeneralEvaluationDomain, UVPolynomial,
};
use ark_std::{end_timer, start_timer};
use std::vec::Vec;
// compute all pre-proofs using DFT
// h_i= c_d[x^{d-i-1}]+c_{d-1}[x^{d-i-2}]+c_{d-2}[x^{d-i-3}]+\cdots +
// c_{i+2}[x]+c_{i+1}[1]
pub fn compute_h<F, G>(
c_poly: &DensePolynomial<F>, /* c(X) degree up to d<2^p , i.e. c_poly has at most d+1 coeffs
* non-zero */
powers: &[G], // SRS
p: usize,
) -> Vec<G>
where
F: PrimeField,
G: ProjectiveCurve,
{
let timer = start_timer!(|| "compute h");
let mut coeffs = c_poly.coeffs().to_vec();
let dom_size = 1 << p;
let fpzero = F::zero();
coeffs.resize(dom_size, fpzero);
// 1. x_ext = [[x^(d-1)], [x^{d-2},...,[x],[1], d+2 [0]'s]
let step1_timer = start_timer!(|| "step 1");
let mut x_ext: Vec<G> = powers.iter().take(dom_size - 1).rev().copied().collect();
x_ext.resize(2 * dom_size, G::zero()); // filling 2d+2 neutral elements
let y = group_dft::<F, G>(&x_ext, p + 1);
end_timer!(step1_timer);
// 2. c_ext = [c_d, d zeroes, c_d,c_{0},c_1,...,c_{d-2},c_{d-1}]
let step2_timer = start_timer!(|| "step 2");
let mut c_ext = vec![coeffs[coeffs.len() - 1]];
c_ext.resize(dom_size, fpzero);
c_ext.push(coeffs[coeffs.len() - 1]);
for &e in coeffs.iter().take(coeffs.len() - 1) {
c_ext.push(e);
}
assert_eq!(c_ext.len(), 2 * dom_size);
let v = field_dft::<F>(&c_ext, p + 1);
end_timer!(step2_timer);
// 3. u = y o v
let step3_timer = start_timer!(|| "step 3");
let u: Vec<_> = y
.into_iter()
.zip(v.into_iter())
.map(|(a, b)| a.mul(b.into_repr()))
.collect();
end_timer!(step3_timer);
// 4. h_ext = idft_{2d+2}(u)
let step4_timer = start_timer!(|| "step 4");
let h_ext = group_inv_dft::<F, G>(&u, p + 1);
end_timer!(step4_timer);
end_timer!(timer);
h_ext[0..dom_size].to_vec()
}
// compute DFT of size @dom_size over vector of Fr elements
// q_i = h_0 + h_1w^i + h_2w^{2i}+\cdots + h_{dom_size-1}w^{(dom_size-1)i} for
// 0<= i< dom_size=2^p
pub fn group_dft<F, G>(h: &[G], p: usize) -> Vec<G>
where
F: PrimeField,
G: ProjectiveCurve,
{
let dom_size = 1 << p;
let timer = start_timer!(|| format!("size {} group dft", dom_size));
assert_eq!(h.len(), dom_size); // we do not support inputs of size not power of 2
let input_domain: GeneralEvaluationDomain<F> = EvaluationDomain::new(dom_size).unwrap();
let mut l = dom_size / 2;
let mut m: usize = 1;
// Stockham FFT
let mut xvec = h.to_vec();
for _ in 0..p {
let mut xt = xvec.clone();
for j in 0..l {
for k in 0..m {
let c0 = xvec[k + j * m];
let c1 = xvec[k + j * m + l * m];
xt[k + 2 * j * m] = c0 + c1;
let wj_2l = input_domain.element((j * dom_size / (2 * l)) % dom_size);
xt[k + 2 * j * m + m] = (c0 - c1).mul(wj_2l.into_repr());
}
}
l /= 2;
m *= 2;
xvec = xt;
}
end_timer!(timer);
xvec
}
// compute DFT of size @dom_size over vector of Fr elements
// q_i = h_0 + h_1w^i + h_2w^{2i}+\cdots + h_{dom_size-1}w^{(dom_size-1)i} for
// 0<= i< dom_size=2^p
pub fn field_dft<F: PrimeField>(h: &[F], p: usize) -> Vec<F> {
let dom_size = 1 << p;
let timer = start_timer!(|| format!("size {} field dft", dom_size));
assert_eq!(h.len(), dom_size); // we do not support inputs of size not power of 2
let input_domain: GeneralEvaluationDomain<F> = EvaluationDomain::new(dom_size).unwrap();
let mut l = dom_size / 2;
let mut m: usize = 1;
// Stockham FFT
let mut xvec = h.to_vec();
for _ in 0..p {
let mut xt = xvec.clone();
for j in 0..l {
for k in 0..m {
let c0 = xvec[k + j * m];
let c1 = xvec[k + j * m + l * m];
xt[k + 2 * j * m] = c0 + c1;
let wj_2l = input_domain.element((j * dom_size / (2 * l)) % dom_size);
xt[k + 2 * j * m + m] = (c0 - c1) * (wj_2l);
}
}
l /= 2;
m *= 2;
xvec = xt;
}
end_timer!(timer);
xvec
}
// compute idft of size @dom_size over vector of G1 elements
// q_i = (h_0 + h_1w^-i + h_2w^{-2i}+\cdots +
// h_{dom_size-1}w^{-(dom_size-1)i})/dom_size for 0<= i< dom_size=2^p
pub fn group_inv_dft<F, G>(h: &[G], p: usize) -> Vec<G>
where
F: PrimeField,
G: ProjectiveCurve,
{
let dom_size = 1 << p;
let timer = start_timer!(|| format!("size {} group inverse dft", dom_size));
assert_eq!(h.len(), dom_size); // we do not support inputs of size not power of 2
let input_domain: GeneralEvaluationDomain<F> = EvaluationDomain::new(dom_size).unwrap();
let mut l = dom_size / 2;
let mut m: usize = 1;
// Stockham FFT
let mut xvec = h.to_vec();
for _ in 0..p {
let mut xt = xvec.clone();
for j in 0..l {
for k in 0..m {
let c0 = xvec[k + j * m];
let c1 = xvec[k + j * m + l * m];
xt[k + 2 * j * m] = c0 + c1;
let wj_2l = input_domain
.element((dom_size - (j * dom_size / (2 * l)) % dom_size) % dom_size);
xt[k + 2 * j * m + m] = (c0 - c1).mul(wj_2l.into_repr()); // Difference #1 to forward DFT
}
}
l /= 2;
m *= 2;
xvec = xt;
}
let domain_inverse = F::from(1u64 << p).inverse().unwrap().into_repr();
let res = xvec.iter().map(|x| x.mul(domain_inverse)).collect();
end_timer!(timer);
res
}
// compute idft of size @dom_size over vector of G1 elements
// q_i = (h_0 + h_1w^-i + h_2w^{-2i}+\cdots +
// h_{dom_size-1}w^{-(dom_size-1)i})/dom_size for 0<= i< dom_size=2^p
pub fn field_inv_dft<F: PrimeField>(h: &[F], p: usize) -> Vec<F> {
let dom_size = 1 << p;
let timer = start_timer!(|| format!("size {} field inverse dft", dom_size));
assert_eq!(h.len(), dom_size); // we do not support inputs of size not power of 2
let input_domain: GeneralEvaluationDomain<F> = EvaluationDomain::new(dom_size).unwrap();
let mut l = dom_size / 2;
let mut m: usize = 1;
// Stockham FFT
let mut xvec = h.to_vec();
for _ in 0..p {
let mut xt = xvec.clone();
for j in 0..l {
for k in 0..m {
let c0 = xvec[k + j * m];
let c1 = xvec[k + j * m + l * m];
xt[k + 2 * j * m] = c0 + c1;
let wj_2l = input_domain
.element((dom_size - (j * dom_size / (2 * l)) % dom_size) % dom_size);
xt[k + 2 * j * m + m] = (c0 - c1) * wj_2l; // Difference #1 to
// forward DFT
}
}
l /= 2;
m *= 2;
xvec = xt;
}
let domain_inverse = F::from(1u64 << p).inverse().unwrap();
let res = xvec.iter().map(|&x| x * domain_inverse).collect();
end_timer!(timer);
res
}
#[cfg(test)]
pub mod tests {
use super::*;
use ark_bls12_377::Bls12_377;
use ark_bls12_381::Bls12_381;
use ark_ec::PairingEngine;
use ark_std::{test_rng, UniformRand};
#[test]
fn test_dft() {
test_dft_helper::<Bls12_381>();
test_dft_helper::<Bls12_377>();
}
fn test_dft_helper<E: PairingEngine>() {
let mut rng = test_rng();
for i in 2..6 {
let size = 1 << i;
let h: Vec<E::Fr> = (0..size).map(|_| E::Fr::rand(&mut rng)).collect();
let c_dft = field_dft::<E::Fr>(&h, i);
let c_back = field_inv_dft::<E::Fr>(&c_dft, i);
assert_eq!(h, c_back);
let h: Vec<E::G1Projective> =
(0..size).map(|_| E::G1Projective::rand(&mut rng)).collect();
let c_dft = group_dft::<E::Fr, E::G1Projective>(&h, i);
let c_back = group_inv_dft::<E::Fr, E::G1Projective>(&c_dft, i);
assert_eq!(h, c_back);
let h: Vec<E::G2Projective> =
(0..size).map(|_| E::G2Projective::rand(&mut rng)).collect();
let c_dft = group_dft::<E::Fr, E::G2Projective>(&h, i);
let c_back = group_inv_dft::<E::Fr, E::G2Projective>(&c_dft, i);
assert_eq!(h, c_back);
}
}
}

789
src/kzg.rs Normal file
View File

@@ -0,0 +1,789 @@
// This file includes backend tools:
// (1) read_line() is for taking inputs from the user
// (2) kzg_open_g1 is for opening KZG commitments
// (3) kzg_verify_g1 is for verifying KZG commitments
// (4) hash_caulk_single is for hashing group and field elements into a field
// element (5) random_field is for generating random field elements
use crate::{compute_h, group_dft, util::convert_to_bigints};
use ark_ec::{msm::VariableBaseMSM, AffineCurve, PairingEngine, ProjectiveCurve};
use ark_ff::{Field, PrimeField};
use ark_poly::{
univariate::DensePolynomial, EvaluationDomain, GeneralEvaluationDomain, Polynomial,
UVPolynomial,
};
use ark_poly_commit::kzg10::*;
use ark_std::{end_timer, start_timer, One, Zero};
use std::marker::PhantomData;
/////////////////////////////////////////////////////////////////////
// KZG opening and verifying
/////////////////////////////////////////////////////////////////////
pub struct KZGCommit<E: PairingEngine> {
phantom: PhantomData<E>,
}
impl<E: PairingEngine> KZGCommit<E> {
pub fn commit_g1(powers: &Powers<E>, polynomial: &DensePolynomial<E::Fr>) -> E::G1Affine {
let timer = start_timer!(|| "kzg g1 commit");
let (com, _randomness) = KZG10::<E, _>::commit(powers, polynomial, None, None).unwrap();
end_timer!(timer);
com.0
}
pub fn commit_g2(g2_powers: &[E::G2Affine], poly: &DensePolynomial<E::Fr>) -> E::G2Affine {
let timer = start_timer!(|| "kzg g2 commit");
let poly_coeffs: Vec<<E::Fr as PrimeField>::BigInt> =
poly.coeffs.iter().map(|&x| x.into_repr()).collect();
let res = VariableBaseMSM::multi_scalar_mul(g2_powers, &poly_coeffs).into_affine();
end_timer!(timer);
res
}
// Function to commit to f(X,Y)
// here f = [ [a0, a1, a2], [b1, b2, b3] ] represents (a0 + a1 Y + a2 Y^2 ) + X
// (b1 + b2 Y + b3 Y^2)
//
// First we unwrap to get a vector of form [a0, a1, a2, b0, b1, b2]
// Then we commit to f as a commitment to f'(X) = a0 + a1 X + a2 X^2 + b0 X^3 +
// b1 X^4 + b2 X^5
//
// We also need to know the maximum degree of (a0 + a1 Y + a2 Y^2 ) to prevent
// overflow errors.
//
// This is described in Section 4.6.2
pub fn bipoly_commit(
pp: &crate::multi::PublicParameters<E>,
polys: &[DensePolynomial<E::Fr>],
deg_x: usize,
) -> E::G1Affine {
let timer = start_timer!(|| "kzg bipoly commit");
let mut poly_formatted = Vec::new();
for poly in polys {
let temp = convert_to_bigints(&poly.coeffs);
poly_formatted.extend_from_slice(&temp);
for _ in poly.len()..deg_x {
poly_formatted.push(E::Fr::zero().into_repr());
}
}
assert!(pp.poly_ck.powers_of_g.len() >= poly_formatted.len());
let g1_poly =
VariableBaseMSM::multi_scalar_mul(&pp.poly_ck.powers_of_g, poly_formatted.as_slice())
.into_affine();
end_timer!(timer);
g1_poly
}
// compute all openings to c_poly using a smart formula
// This Code implements an algorithm for calculating n openings of a KZG vector
// commitment of size n in n log(n) time. The algorithm is by Feist and
// Khovratovich. It is useful for preprocessing.
// The full algorithm is described here https://github.com/khovratovich/Kate/blob/master/Kate_amortized.pdf
pub fn multiple_open<G>(
c_poly: &DensePolynomial<E::Fr>, // c(X)
powers: &[G], // SRS
p: usize,
) -> Vec<G>
where
G: AffineCurve<ScalarField = E::Fr> + Sized,
{
let timer = start_timer!(|| "multiple open");
let degree = c_poly.coeffs.len() - 1;
let input_domain: GeneralEvaluationDomain<E::Fr> = EvaluationDomain::new(degree).unwrap();
let h_timer = start_timer!(|| "compute h");
let powers: Vec<G::Projective> = powers.iter().map(|x| x.into_projective()).collect();
let h2 = compute_h(c_poly, &powers, p);
end_timer!(h_timer);
let dom_size = input_domain.size();
assert_eq!(1 << p, dom_size);
assert_eq!(degree + 1, dom_size);
let dft_timer = start_timer!(|| "G1 dft");
let q2 = group_dft::<E::Fr, G::Projective>(&h2, p);
end_timer!(dft_timer);
let normalization_timer = start_timer!(|| "batch normalization");
let res = G::Projective::batch_normalization_into_affine(q2.as_ref());
end_timer!(normalization_timer);
end_timer!(timer);
res
}
////////////////////////////////////////////////
// KZG.Open( srs_KZG, f(X, Y), deg, alpha )
// returns ([f(alpha, x)]_1, pi)
// Algorithm described in Section 4.6.2, KZG for Bivariate Polynomials
pub fn partial_open_g1(
pp: &crate::multi::PublicParameters<E>,
polys: &[DensePolynomial<E::Fr>],
deg_x: usize,
point: &E::Fr,
) -> (E::G1Affine, E::G1Affine, DensePolynomial<E::Fr>) {
let timer = start_timer!(|| "kzg partial open g1");
let mut poly_partial_eval = DensePolynomial::from_coefficients_vec(vec![E::Fr::zero()]);
let mut alpha = E::Fr::one();
for coeff in polys {
let pow_alpha = DensePolynomial::from_coefficients_vec(vec![alpha]);
poly_partial_eval += &(&pow_alpha * coeff);
alpha *= point;
}
let eval = VariableBaseMSM::multi_scalar_mul(
&pp.poly_ck.powers_of_g,
convert_to_bigints(&poly_partial_eval.coeffs).as_slice(),
)
.into_affine();
let mut witness_bipolynomial = Vec::new();
let poly_reverse: Vec<_> = polys.iter().rev().collect();
witness_bipolynomial.push(poly_reverse[0].clone());
let alpha = DensePolynomial::from_coefficients_vec(vec![*point]);
for i in 1..(poly_reverse.len() - 1) {
witness_bipolynomial.push(poly_reverse[i] + &(&alpha * &witness_bipolynomial[i - 1]));
}
witness_bipolynomial.reverse();
let proof = Self::bipoly_commit(pp, &witness_bipolynomial, deg_x);
end_timer!(timer);
(eval, proof, poly_partial_eval)
}
// KZG.Open( srs_KZG, f(X), deg, (alpha1, alpha2, ..., alphan) )
// returns ([f(alpha1), ..., f(alphan)], pi)
// Algorithm described in Section 4.6.1, Multiple Openings
pub fn open_g1_batch(
poly_ck: &Powers<E>,
poly: &DensePolynomial<E::Fr>,
max_deg: Option<&usize>,
points: &[E::Fr],
) -> (Vec<E::Fr>, E::G1Affine) {
let timer = start_timer!(|| "kzg batch open g1");
let mut evals = Vec::new();
let mut proofs = Vec::new();
for p in points.iter() {
let (eval, pi) = Self::open_g1_single(poly_ck, poly, max_deg, p);
evals.push(eval);
proofs.push(pi);
}
let mut res = E::G1Projective::zero(); // default value
for j in 0..points.len() {
let w_j = points[j];
// 1. Computing coefficient [1/prod]
let mut prod = E::Fr::one();
for (k, p) in points.iter().enumerate() {
if k != j {
prod *= w_j - p;
}
}
// 2. Summation
let q_add = proofs[j].mul(prod.inverse().unwrap()); //[1/prod]Q_{j}
res += q_add;
}
end_timer!(timer);
(evals, res.into_affine())
}
// KZG.Open( srs_KZG, f(X), deg, alpha ) returns (f(alpha), pi)
fn open_g1_single(
poly_ck: &Powers<E>,
poly: &DensePolynomial<E::Fr>,
max_deg: Option<&usize>,
point: &E::Fr,
) -> (E::Fr, E::G1Affine) {
let timer = start_timer!(|| "kzg open g1");
let eval = poly.evaluate(point);
let global_max_deg = poly_ck.powers_of_g.len();
let mut d: usize = 0;
if max_deg == None {
d += global_max_deg;
} else {
d += max_deg.unwrap();
}
let divisor = DensePolynomial::from_coefficients_vec(vec![-*point, E::Fr::one()]);
let witness_polynomial = poly / &divisor;
assert!(poly_ck.powers_of_g[(global_max_deg - d)..].len() >= witness_polynomial.len());
let proof = VariableBaseMSM::multi_scalar_mul(
&poly_ck.powers_of_g[(global_max_deg - d)..],
convert_to_bigints(&witness_polynomial.coeffs).as_slice(),
)
.into_affine();
end_timer!(timer);
(eval, proof)
}
// KZG.Verify( srs_KZG, F, deg, (alpha1, alpha2, ..., alphan), (v1, ..., vn), pi
// ) Algorithm described in Section 4.6.1, Multiple Openings
pub fn verify_g1(
// Verify that @c_com is a commitment to C(X) such that C(x)=z
powers_of_g1: &[E::G1Affine], // generator of G1
powers_of_g2: &[E::G2Affine], // [1]_2, [x]_2, [x^2]_2, ...
c_com: &E::G1Affine, // commitment
max_deg: Option<&usize>, // max degree
points: &[E::Fr], // x such that eval = C(x)
evals: &[E::Fr], // evaluation
pi: &E::G1Affine, // proof
) -> bool {
let timer = start_timer!(|| "kzg verify g1");
let pairing_inputs = Self::verify_g1_defer_pairing(
powers_of_g1,
powers_of_g2,
c_com,
max_deg,
points,
evals,
pi,
);
let pairing_timer = start_timer!(|| "pairing product");
let prepared_pairing_inputs = vec![
(
E::G1Prepared::from(pairing_inputs[0].0.into_affine()),
E::G2Prepared::from(pairing_inputs[0].1.into_affine()),
),
(
E::G1Prepared::from(pairing_inputs[1].0.into_affine()),
E::G2Prepared::from(pairing_inputs[1].1.into_affine()),
),
];
let res = E::product_of_pairings(prepared_pairing_inputs.iter()).is_one();
end_timer!(pairing_timer);
end_timer!(timer);
res
}
// KZG.Verify( srs_KZG, F, deg, (alpha1, alpha2, ..., alphan), (v1, ..., vn), pi
// ) Algorithm described in Section 4.6.1, Multiple Openings
pub fn verify_g1_defer_pairing(
// Verify that @c_com is a commitment to C(X) such that C(x)=z
powers_of_g1: &[E::G1Affine], // generator of G1
powers_of_g2: &[E::G2Affine], // [1]_2, [x]_2, [x^2]_2, ...
c_com: &E::G1Affine, // commitment
max_deg: Option<&usize>, // max degree
points: &[E::Fr], // x such that eval = C(x)
evals: &[E::Fr], // evaluation
pi: &E::G1Affine, // proof
) -> Vec<(E::G1Projective, E::G2Projective)> {
let timer = start_timer!(|| "kzg verify g1 (deferring pairing)");
// Interpolation set
// tau_i(X) = lagrange_tau[i] = polynomial equal to 0 at point[j] for j!= i and
// 1 at points[i]
let mut lagrange_tau = DensePolynomial::from_coefficients_slice(&[E::Fr::zero()]);
let mut prod = DensePolynomial::from_coefficients_slice(&[E::Fr::one()]);
let mut components = vec![];
for &p in points.iter() {
let poly = DensePolynomial::from_coefficients_slice(&[-p, E::Fr::one()]);
prod = &prod * (&poly);
components.push(poly);
}
for i in 0..points.len() {
let mut temp = &prod / &components[i];
let lagrange_scalar = temp.evaluate(&points[i]).inverse().unwrap() * evals[i];
temp.coeffs.iter_mut().for_each(|x| *x *= lagrange_scalar);
lagrange_tau = lagrange_tau + temp;
}
// commit to sum evals[i] tau_i(X)
assert!(
powers_of_g1.len() >= lagrange_tau.len(),
"KZG verifier doesn't have enough g1 powers"
);
let g1_tau = VariableBaseMSM::multi_scalar_mul(
&powers_of_g1[..lagrange_tau.len()],
convert_to_bigints(&lagrange_tau.coeffs).as_slice(),
);
// vanishing polynomial
let z_tau = prod;
// commit to z_tau(X) in g2
assert!(
powers_of_g2.len() >= z_tau.len(),
"KZG verifier doesn't have enough g2 powers"
);
let g2_z_tau = VariableBaseMSM::multi_scalar_mul(
&powers_of_g2[..z_tau.len()],
convert_to_bigints(&z_tau.coeffs).as_slice(),
);
let global_max_deg = powers_of_g1.len();
let mut d: usize = 0;
if max_deg == None {
d += global_max_deg;
} else {
d += max_deg.unwrap();
}
let res = vec![
(
g1_tau - c_com.into_projective(),
powers_of_g2[global_max_deg - d].into_projective(),
),
(pi.into_projective(), g2_z_tau),
];
end_timer!(timer);
res
}
// KZG.Verify( srs_KZG, F, deg, alpha, F_alpha, pi )
// Algorithm described in Section 4.6.2, KZG for Bivariate Polynomials
// Be very careful here. Verification is only valid if it is paired with a
// degree check.
pub fn partial_verify_g1(
srs: &crate::multi::PublicParameters<E>,
c_com: &E::G1Affine, // commitment
deg_x: usize,
point: &E::Fr,
partial_eval: &E::G1Affine,
pi: &E::G1Affine, // proof
) -> bool {
let timer = start_timer!(|| "kzg partial verify g1");
let pairing_inputs =
Self::partial_verify_g1_defer_pairing(srs, c_com, deg_x, point, partial_eval, pi);
let pairing_timer = start_timer!(|| "pairing product");
let prepared_pairing_inputs = vec![
(
E::G1Prepared::from(pairing_inputs[0].0.into_affine()),
E::G2Prepared::from(pairing_inputs[0].1.into_affine()),
),
(
E::G1Prepared::from(pairing_inputs[1].0.into_affine()),
E::G2Prepared::from(pairing_inputs[1].1.into_affine()),
),
];
let res = E::product_of_pairings(prepared_pairing_inputs.iter()).is_one();
end_timer!(pairing_timer);
end_timer!(timer);
res
}
// KZG.Verify( srs_KZG, F, deg, alpha, F_alpha, pi )
// Algorithm described in Section 4.6.2, KZG for Bivariate Polynomials
// Be very careful here. Verification is only valid if it is paired with a
// degree check.
pub fn partial_verify_g1_defer_pairing(
srs: &crate::multi::PublicParameters<E>,
c_com: &E::G1Affine, // commitment
deg_x: usize,
point: &E::Fr,
partial_eval: &E::G1Affine,
pi: &E::G1Affine, // proof
) -> Vec<(E::G1Projective, E::G2Projective)> {
let timer = start_timer!(|| "kzg partial verify g1 (deferring pairing)");
let res = vec![
(
partial_eval.into_projective() - c_com.into_projective(),
srs.g2_powers[0].into_projective(),
),
(
pi.into_projective(),
srs.g2_powers[deg_x].into_projective() - srs.g2_powers[0].mul(*point),
),
];
end_timer!(timer);
res
}
// Algorithm for aggregating KZG proofs into a single proof
// Described in Section 4.6.3 Subset openings
// compute Q =\sum_{j=1}^m \frac{Q_{i_j}}}{\prod_{1\leq k\leq m,\; k\neq
// j}(\omega^{i_j}-\omega^{i_k})}
pub fn aggregate_proof_g2(
openings: &[E::G2Affine], // Q_i
positions: &[usize], // i_j
input_domain: &GeneralEvaluationDomain<E::Fr>,
) -> E::G2Affine {
let timer = start_timer!(|| "kzg aggregate proof");
let m = positions.len();
let mut res = openings[0].into_projective(); // default value
for j in 0..m {
let i_j = positions[j];
let w_ij = input_domain.element(i_j);
// 1. Computing coefficient [1/prod]
let mut prod = E::Fr::one();
for (k, &pos) in positions.iter().enumerate().take(m) {
let w_ik = input_domain.element(pos);
if k != j {
prod *= w_ij - w_ik;
}
}
// 2. Summation
let q_add = openings[i_j].mul(prod.inverse().unwrap()); //[1/prod]Q_{j}
if j == 0 {
res = q_add;
} else {
res += q_add;
}
}
let res = res.into_affine();
end_timer!(timer);
res
}
}
pub fn generate_lagrange_polynomials_subset<E: PairingEngine>(
positions: &[usize],
srs: &crate::multi::PublicParameters<E>,
) -> Vec<DensePolynomial<E::Fr>> {
let timer = start_timer!(|| "generate lagrange poly subset");
let mut tau_polys = vec![];
let m = positions.len();
for j in 0..m {
let mut tau_j = DensePolynomial::from_coefficients_slice(&[E::Fr::one()]); // start from tau_j =1
for k in 0..m {
if k != j {
// tau_j = prod_{k\neq j} (X-w^(i_k))/(w^(i_j)-w^(i_k))
let denum = srs.domain_N.element(positions[j]) - srs.domain_N.element(positions[k]);
let denum = E::Fr::one() / denum;
tau_j = &tau_j
* &DensePolynomial::from_coefficients_slice(&[
-srs.domain_N.element(positions[k]) * denum, //-w^(i_k))/(w^(i_j)-w^(i_k)
denum, // 1//(w^(i_j)-w^(i_k))
]);
}
}
tau_polys.push(tau_j.clone());
}
end_timer!(timer);
tau_polys
}
#[cfg(test)]
pub mod tests {
use super::{generate_lagrange_polynomials_subset, KZGCommit, *};
use crate::caulk_single_setup;
use ark_bls12_377::Bls12_377;
use ark_bls12_381::Bls12_381;
use ark_ec::{AffineCurve, PairingEngine, ProjectiveCurve};
use ark_poly::{univariate::DensePolynomial, EvaluationDomain, Polynomial, UVPolynomial};
use ark_poly_commit::kzg10::KZG10;
use ark_std::{test_rng, One, Zero};
use std::time::Instant;
#[test]
fn test_lagrange() {
test_lagrange_helper::<Bls12_377>();
test_lagrange_helper::<Bls12_381>();
}
#[allow(non_snake_case)]
fn test_lagrange_helper<E: PairingEngine>() {
let p: usize = 8; // bitlength of poly degree
let max_degree: usize = (1 << p) + 2;
let m: usize = 8;
let N: usize = 1 << p;
let now = Instant::now();
let pp = crate::multi::PublicParameters::<E>::setup(&max_degree, &N, &m, &p);
println!("time to setup {:?}", now.elapsed());
let mut positions: Vec<usize> = vec![];
for i in 0..m {
// generate positions evenly distributed in the set
let i_j: usize = i * (max_degree / m);
positions.push(i_j);
}
let tau_polys = generate_lagrange_polynomials_subset(&positions, &pp);
for j in 0..m {
for k in 0..m {
if k == j {
assert_eq!(
tau_polys[j].evaluate(&pp.domain_N.element(positions[k])),
E::Fr::one()
)
} else {
assert_eq!(
tau_polys[j].evaluate(&pp.domain_N.element(positions[k])),
E::Fr::zero()
)
}
}
}
}
#[test]
#[allow(non_snake_case)]
pub fn test_Q_g2() {
test_Q_g2_helper::<Bls12_381>();
test_Q_g2_helper::<Bls12_377>();
}
#[allow(non_snake_case)]
pub fn test_Q_g2_helper<E: PairingEngine>() {
let rng = &mut ark_std::test_rng();
// current kzg setup should be changed with output from a setup ceremony
let p: usize = 6; // bitlength of poly degree
let max_degree: usize = (1 << p) + 2;
let actual_degree: usize = (1 << p) - 1;
let m: usize = 1 << (p / 2);
let N: usize = 1 << p;
let pp = crate::multi::PublicParameters::setup(&max_degree, &N, &m, &p);
// Setting up test instance to run evaluate on.
// test randomness for c_poly is same everytime.
// test index equals 5 everytime
// g_c = g^(c(x))
let c_poly = DensePolynomial::<E::Fr>::rand(actual_degree, rng);
let c_com = KZGCommit::<E>::commit_g1(&pp.poly_ck, &c_poly);
let now = Instant::now();
let openings = KZGCommit::<E>::multiple_open::<E::G2Affine>(&c_poly, &pp.g2_powers, p);
println!("Multi advanced computed in {:?}", now.elapsed());
let mut positions: Vec<usize> = vec![];
for i in 0..m {
let i_j: usize = i * (max_degree / m);
positions.push(i_j);
}
let now = Instant::now();
// Compute proof
let Q: E::G2Affine =
KZGCommit::<E>::aggregate_proof_g2(&openings, &positions, &pp.domain_N);
println!(
"Full proof for {:?} positions computed in {:?}",
m,
now.elapsed()
);
// Compute commitment to C_I
let mut C_I = DensePolynomial::from_coefficients_slice(&[E::Fr::zero()]); // C_I = sum_j c_j*tau_j
let tau_polys = generate_lagrange_polynomials_subset(&positions, &pp);
for j in 0..m {
C_I = &C_I + &(&tau_polys[j] * c_poly.evaluate(&pp.domain_N.element(positions[j])));
// sum_j c_j*tau_j
}
let c_I_com = KZGCommit::<E>::commit_g1(&pp.poly_ck, &C_I);
// Compute commitment to z_I
let mut z_I = DensePolynomial::from_coefficients_slice(&[E::Fr::one()]);
for j in 0..m {
z_I = &z_I
* &DensePolynomial::from_coefficients_slice(&[
-pp.domain_N.element(positions[j]),
E::Fr::one(),
]);
}
let z_I_com = KZGCommit::<E>::commit_g1(&pp.poly_ck, &z_I);
// pairing check
let pairing1 = E::pairing(
(c_com.into_projective() - c_I_com.into_projective()).into_affine(),
pp.g2_powers[0],
);
let pairing2 = E::pairing(z_I_com, Q);
assert_eq!(pairing1, pairing2);
}
#[test]
fn test_single() {
test_single_helper::<Bls12_381>();
test_single_helper::<Bls12_377>();
}
fn test_single_helper<E: PairingEngine>() {
let mut rng = test_rng();
// setting public parameters
// current kzg setup should be changed with output from a setup ceremony
let max_degree: usize = 100;
let actual_degree: usize = 63;
let pp = caulk_single_setup(max_degree, actual_degree, &mut rng);
// Setting up test instance to run evaluate on.
// test randomness for c_poly is same everytime.
// test index equals 5 everytime
// g_c = g^(c(x))
let rng = &mut test_rng();
let c_poly = DensePolynomial::<E::Fr>::rand(actual_degree, rng);
let (_c_com, c_com_open) = KZG10::<E, _>::commit(&pp.poly_ck, &c_poly, None, None).unwrap();
let i: usize = 6;
let q = single_open_default(&c_poly, &c_com_open, &pp.poly_ck, i, actual_degree);
let q2 = single_open_fast(&c_poly, &pp.poly_ck, i, actual_degree);
assert_eq!(q, q2);
}
#[test]
pub fn test_multi() {
test_multi_helper::<Bls12_381>();
test_multi_helper::<Bls12_377>();
}
pub fn test_multi_helper<E: PairingEngine>() {
let mut rng = test_rng();
// current kzg setup should be changed with output from a setup ceremony
let p: usize = 9;
let max_degree: usize = 1 << p + 1;
let actual_degree: usize = (1 << p) - 1;
let pp = caulk_single_setup(max_degree, actual_degree, &mut rng);
// Setting up test instance to run evaluate on.
// test randomness for c_poly is same everytime.
// test index equals 5 everytime
// g_c = g^(c(x))
let c_poly = DensePolynomial::<E::Fr>::rand(actual_degree, &mut rng);
let (c_com, c_com_open) = KZG10::<E, _>::commit(&pp.poly_ck, &c_poly, None, None).unwrap();
let _g_c = c_com.0;
let now = Instant::now();
let q = multiple_open_naive(&c_poly, &c_com_open, &pp.poly_ck, actual_degree);
println!("Multi naive computed in {:?}", now.elapsed());
let now = Instant::now();
let q2 = KZGCommit::<E>::multiple_open::<E::G1Affine>(&c_poly, &pp.poly_ck.powers_of_g, p);
println!("Multi advanced computed in {:?}", now.elapsed());
assert_eq!(q, q2);
}
#[test]
fn test_commit() {
test_commit_helper::<Bls12_381>();
test_commit_helper::<Bls12_377>();
}
pub fn test_commit_helper<E: PairingEngine>() {
let mut rng = test_rng();
// current kzg setup should be changed with output from a setup ceremony
let max_degree: usize = 100;
let actual_degree: usize = 63;
let pp = caulk_single_setup(max_degree, actual_degree, &mut rng);
// Setting up test instance to run evaluate on.
// test randomness for c_poly is same everytime.
// g_c = g^(c(x))
let c_poly = DensePolynomial::<E::Fr>::rand(actual_degree, &mut rng);
let (c_com, _c_com_open) = KZG10::<E, _>::commit(&pp.poly_ck, &c_poly, None, None).unwrap();
let g_c1 = c_com.0;
let g_c2 = commit_direct(&c_poly, &pp.poly_ck);
assert_eq!(g_c1, g_c2);
println!("commit test passed")
}
/// Various functions that are used for testing
fn commit_direct<E: PairingEngine>(
c_poly: &DensePolynomial<E::Fr>, // c(X)
poly_ck: &Powers<E>, // SRS
) -> E::G1Affine {
assert!(c_poly.coeffs.len() <= poly_ck.powers_of_g.len());
let mut com = poly_ck.powers_of_g[0].mul(c_poly.coeffs[0]);
for i in 1..c_poly.coeffs.len() {
com = com + poly_ck.powers_of_g[i].mul(c_poly.coeffs[i]);
}
com.into_affine()
}
// compute all openings to c_poly by mere calling `open` N times
fn multiple_open_naive<E: PairingEngine>(
c_poly: &DensePolynomial<E::Fr>,
c_com_open: &Randomness<E::Fr, DensePolynomial<E::Fr>>,
poly_ck: &Powers<E>,
degree: usize,
) -> Vec<E::G1Affine> {
let input_domain: GeneralEvaluationDomain<E::Fr> = EvaluationDomain::new(degree).unwrap();
let mut res: Vec<E::G1Affine> = vec![];
for i in 0..input_domain.size() {
let omega_i = input_domain.element(i);
res.push(kzg_open_g1_test::<E>(&c_poly, &omega_i, &c_com_open, &poly_ck).w);
}
res
}
////////////////////////////////////////////////
fn kzg_open_g1_test<E: PairingEngine>(
p: &DensePolynomial<E::Fr>,
omega_5: &E::Fr,
polycom_open: &Randomness<E::Fr, DensePolynomial<E::Fr>>,
poly_ck: &Powers<E>,
) -> Proof<E> {
let rng = &mut ark_std::test_rng();
let (witness_polynomial, _random_witness_polynomial) =
KZG10::<E, _>::compute_witness_polynomial(p, omega_5.clone(), polycom_open).unwrap();
let (temp0, _temp1) = KZG10::commit(poly_ck, &witness_polynomial, None, Some(rng)).unwrap();
Proof {
w: temp0.0,
random_v: None,
}
}
// compute KZG proof Q = g1_q = g^( (c(x) - c(w^i)) / (x - w^i) ) where x is
// secret, w^i is the point where we open, and c(X) is the committed polynomial
fn single_open_default<E: PairingEngine>(
c_poly: &DensePolynomial<E::Fr>, // c(X)
c_com_open: &Randomness<E::Fr, DensePolynomial<E::Fr>>, //
poly_ck: &Powers<E>,
i: usize, //
degree: usize,
) -> E::G1Affine {
let input_domain: GeneralEvaluationDomain<E::Fr> = EvaluationDomain::new(degree).unwrap();
let omega_i = input_domain.element(i);
let c_poly_open = kzg_open_g1_test(&c_poly, &omega_i, &c_com_open, &poly_ck);
c_poly_open.w
}
// KZG proof/opening at point y for c(X) = sum_i c_i X^i
//(1)T_y(X) = sum_i t_i X^i
//(2) t_{deg-1} = c_deg
//(3) t_j = c_{j+1} + y*t_{j+1}
fn single_open_fast<E: PairingEngine>(
c_poly: &DensePolynomial<E::Fr>, // c(X)
poly_ck: &Powers<E>, // SRS
i: usize, // y=w^i
degree: usize, // degree of c(X)
) -> E::G1Affine {
// computing opening point
let input_domain: GeneralEvaluationDomain<E::Fr> = EvaluationDomain::new(degree).unwrap();
let y = input_domain.element(i);
// compute quotient
let mut t_poly = c_poly.clone();
t_poly.coeffs.remove(0); // shifting indices
for j in (0..t_poly.len() - 1).rev() {
t_poly.coeffs[j] = c_poly.coeffs[j + 1] + y * t_poly.coeffs[j + 1]
}
// commit
let (t_com, _) = KZG10::commit(&poly_ck, &t_poly, None, None).unwrap();
t_com.0
}
}

18
src/lib.rs Normal file
View File

@@ -0,0 +1,18 @@
mod dft;
mod kzg;
pub mod multi;
mod pedersen;
mod single;
mod transcript;
pub(crate) mod util;
pub use dft::*;
pub use kzg::KZGCommit;
pub use multi::{
compute_lookup_proof, prove_multiunity,
setup::{LookupParameters, VerifierPublicParameters},
verify_lookup_proof, verify_multiunity, verify_multiunity_defer_pairing, PublicParameters,
};
pub use pedersen::PedersenParam;
pub use single::{caulk_single_prove, caulk_single_verify, setup::caulk_single_setup};
pub use transcript::CaulkTranscript;

919
src/multi/mod.rs Normal file
View File

@@ -0,0 +1,919 @@
// This file includes the Caulk prover and verifier for single openings.
// The protocol is described in Figure 3.
pub mod setup;
mod unity;
use crate::{kzg::generate_lagrange_polynomials_subset, CaulkTranscript, KZGCommit};
use ark_ec::{AffineCurve, PairingEngine, ProjectiveCurve};
use ark_ff::{Field, PrimeField};
use ark_poly::{
univariate::DensePolynomial, EvaluationDomain, Evaluations as EvaluationsOnDomain, Evaluations,
GeneralEvaluationDomain, Polynomial, UVPolynomial,
};
use ark_serialize::{CanonicalDeserialize, CanonicalSerialize};
use ark_std::{cfg_into_iter, end_timer, rand::RngCore, start_timer, One, UniformRand, Zero};
#[cfg(feature = "parallel")]
use rayon::iter::{IntoParallelIterator, ParallelIterator};
pub use setup::PublicParameters;
use std::{
convert::TryInto,
fs::File,
io::{Read, Write},
ops::MulAssign,
time::Instant,
vec::Vec,
};
pub use unity::{
prove_multiunity, verify_multiunity, verify_multiunity_defer_pairing, ProofMultiUnity,
};
pub struct LookupInstance<C: AffineCurve> {
pub c_com: C, // polynomial C(X) that represents a table
pub phi_com: C, // polynomial phi(X) that represents the values to look up
}
pub struct LookupProverInput<E: PairingEngine> {
pub c_poly: DensePolynomial<E::Fr>, // polynomial C(X) that represents a table
pub phi_poly: DensePolynomial<E::Fr>, // polynomial phi(X) that represents the values to look up
pub positions: Vec<usize>, //
pub openings: Vec<E::G2Affine>,
}
#[derive(Debug, PartialEq)]
// Data structure to be stored in a file: polynomial, its commitment, and its
// openings (for certain SRS)
pub struct TableInput<E: PairingEngine> {
pub c_poly: DensePolynomial<E::Fr>,
pub c_com: E::G1Affine,
pub openings: Vec<E::G2Affine>,
}
// Lookup proof data structure
#[allow(non_snake_case)]
pub struct LookupProof<E: PairingEngine> {
pub C_I_com: E::G1Affine, // Commitment to C_I(X)
pub H1_com: E::G2Affine, // Commitment to H_1(X)
pub H2_com: E::G1Affine, // Commitment to H_2(X)
pub u_com: E::G1Affine, // Commitment to u(X)
pub z_I_com: E::G1Affine, // Commitment to z_I(X)
pub v1: E::Fr,
pub v2: E::Fr,
pub pi1: E::G1Affine,
pub pi2: E::G1Affine,
pub pi3: E::G1Affine,
}
impl<E: PairingEngine> TableInput<E> {
fn store(&self, path: &str) {
// 1. Polynomial
let mut o_bytes = vec![];
let mut f = File::create(path).expect("Unable to create file");
let len: u32 = self.c_poly.len().try_into().unwrap();
let len_bytes = len.to_be_bytes();
f.write_all(&len_bytes).expect("Unable to write data");
let len32: usize = len.try_into().unwrap();
for i in 0..len32 {
self.c_poly.coeffs[i]
.serialize_uncompressed(&mut o_bytes)
.ok();
}
f.write_all(&o_bytes).expect("Unable to write data");
// 2. Commitment
o_bytes.clear();
self.c_com.serialize_uncompressed(&mut o_bytes).ok();
f.write_all(&o_bytes).expect("Unable to write data");
// 3. Openings
o_bytes.clear();
let len: u32 = self.openings.len().try_into().unwrap();
let len_bytes = len.to_be_bytes();
f.write_all(&len_bytes).expect("Unable to write data");
let len32: usize = len.try_into().unwrap();
for i in 0..len32 {
self.openings[i].serialize_uncompressed(&mut o_bytes).ok();
}
f.write_all(&o_bytes).expect("Unable to write data");
}
fn load(path: &str) -> TableInput<E> {
const FR_UNCOMPR_SIZE: usize = 32;
const G1_UNCOMPR_SIZE: usize = 96;
const G2_UNCOMPR_SIZE: usize = 192;
let mut data = Vec::new();
let mut f = File::open(path).expect("Unable to open file");
f.read_to_end(&mut data).expect("Unable to read data");
// 1. reading c_poly
let mut cur_counter: usize = 0;
let len_bytes: [u8; 4] = (&data[0..4]).try_into().unwrap();
let len: u32 = u32::from_be_bytes(len_bytes);
let mut coeffs = vec![];
let len32: usize = len.try_into().unwrap();
cur_counter += 4;
for i in 0..len32 {
let buf_bytes =
&data[cur_counter + i * FR_UNCOMPR_SIZE..cur_counter + (i + 1) * FR_UNCOMPR_SIZE];
let tmp = E::Fr::deserialize_unchecked(buf_bytes).unwrap();
coeffs.push(tmp);
}
cur_counter += len32 * FR_UNCOMPR_SIZE;
// 2. c_com
let buf_bytes = &data[cur_counter..cur_counter + G1_UNCOMPR_SIZE];
let c_com = E::G1Affine::deserialize_unchecked(buf_bytes).unwrap();
cur_counter += G1_UNCOMPR_SIZE;
// 3 openings
let len_bytes: [u8; 4] = (&data[cur_counter..cur_counter + 4]).try_into().unwrap();
let len: u32 = u32::from_be_bytes(len_bytes);
let mut openings = vec![];
let len32: usize = len.try_into().unwrap();
cur_counter += 4;
for _ in 0..len32 {
let buf_bytes = &data[cur_counter..cur_counter + G2_UNCOMPR_SIZE];
let tmp = E::G2Affine::deserialize_unchecked(buf_bytes).unwrap();
openings.push(tmp);
cur_counter += G2_UNCOMPR_SIZE;
}
TableInput {
c_poly: DensePolynomial { coeffs },
c_com,
openings,
}
}
}
#[allow(non_snake_case)]
pub fn compute_lookup_proof<E: PairingEngine, R: RngCore>(
instance: &LookupInstance<E::G1Affine>,
input: &LookupProverInput<E>,
srs: &PublicParameters<E>,
rng: &mut R,
) -> (LookupProof<E>, ProofMultiUnity<E>) {
let timer = start_timer!(|| "lookup proof generation");
let m = input.positions.len();
///////////////////////////////////////////////////////////////////
// 1. Blinders
///////////////////////////////////////////////////////////////////
let step_1_timer = start_timer!(|| "step 1");
// provers blinders for zero-knowledge
let r1 = E::Fr::rand(rng);
let r2 = E::Fr::rand(rng);
let r3 = E::Fr::rand(rng);
let r4 = E::Fr::rand(rng);
let r5 = E::Fr::rand(rng);
let r6 = E::Fr::rand(rng);
let r7 = E::Fr::rand(rng);
end_timer!(step_1_timer);
///////////////////////////////////////////////////////////////////
// 2. Compute z_I(X) = r1 prod_{i in I} (X - w^i)
///////////////////////////////////////////////////////////////////
let step_2_timer = start_timer!(|| "step 2");
// z_I includes each position only once.
let mut positions_no_repeats = Vec::new();
for i in 0..input.positions.len() {
if positions_no_repeats.contains(&input.positions[i]) {
} else {
positions_no_repeats.push(input.positions[i]);
}
}
// insert 0 into z_I so that we can pad when m is not a power of 2.
if positions_no_repeats.contains(&0usize) {
} else {
positions_no_repeats.push(0usize);
}
// z_I(X)
let mut z_I = DensePolynomial::from_coefficients_slice(&[r1]);
for &pos in positions_no_repeats.iter() {
z_I = &z_I
* &DensePolynomial::from_coefficients_slice(&[
-srs.domain_N.element(pos),
E::Fr::one(),
]);
}
end_timer!(step_2_timer);
///////////////////////////////////////////////////////////////////
// 3. Compute C_I(X) = (r_2+r_3X + r4X^2)*Z_I(X) + sum_j c_j*tau_j(X)
///////////////////////////////////////////////////////////////////
let step_3_timer = start_timer!(|| "step 3");
let mut c_I_poly = DensePolynomial::from_coefficients_slice(&[E::Fr::zero()]);
// tau_polys[i] = 1 at positions_no_repeats[i] and 0 at positions_no_repeats[j]
// Takes m^2 time, or 36ms when m = 32. Can be done in m log^2(m) time if this
// ever becomes a bottleneck. See https://people.csail.mit.edu/devadas/pubs/scalable_thresh.pdf
let tau_polys = generate_lagrange_polynomials_subset(&positions_no_repeats, srs);
// C_I(X) = sum_j c_j*tau_j(X)
// Takes m^2 time, or 38ms when m = 32. Can be done faster if we store c_poly
// evaluations.
for j in 0..positions_no_repeats.len() {
c_I_poly = &c_I_poly
+ &(&tau_polys[j]
* input
.c_poly
.evaluate(&srs.domain_N.element(positions_no_repeats[j]))); // sum_j c_j*tau_j
}
// extra_blinder = r2 + r3 X + r4 X^2
let extra_blinder = DensePolynomial::from_coefficients_slice(&[r2, r3, r4]);
// C_I(X) = C_I(X) + z_I(X) * (r2 + r3 X + r4 X^2)
c_I_poly = &c_I_poly + &(&z_I * &extra_blinder);
end_timer!(step_3_timer);
///////////////////////////////////////////////////////////////////
// 4. Compute H1
///////////////////////////////////////////////////////////////////
let step_4_timer = start_timer!(|| "step 4");
// Compute [Q(x)]_2 by aggregating kzg proofs such that
// Q(X) = ( C(X) - sum_{i in I} c_{i+1} tau_i(X) ) / ( prod_{i in I} (X -
// w^i) )
let g2_Q =
KZGCommit::<E>::aggregate_proof_g2(&input.openings, &positions_no_repeats, &srs.domain_N);
// blind_com = [ r2 + r3 x + r4 x^2 ]_2
let blind_com = KZGCommit::<E>::commit_g2(&srs.g2_powers, &extra_blinder);
// H1_com = [ r1^{-1} Q(x) ]_2 - blind_com
let H1_com = (g2_Q.mul(r1.inverse().unwrap()) - blind_com.into_projective()).into_affine();
end_timer!(step_4_timer);
///////////////////////////////////////////////////////////////////
// 5. Compute u(X) = sum_j w^{i_j} mu_j(X) + (r5 + r6 X + r7 X^2) z_{Vm}(X)
///////////////////////////////////////////////////////////////////
let step_5_timer = start_timer!(|| "step 5");
// u(X) = sum_j w^{i_j} mu_j(X)
let mut u_vals = vec![];
for j in 0..m {
u_vals.push(srs.domain_N.element(input.positions[j]));
}
// u(X) = u(X) + (r5 + r6 X + r7 X^2) z_{Vm}(X)
let extra_blinder2 = DensePolynomial::from_coefficients_slice(&[r5, r6, r7]);
let u_poly = &EvaluationsOnDomain::from_vec_and_domain(u_vals.clone(), srs.domain_m)
.interpolate()
+ &(extra_blinder2.mul_by_vanishing_poly(srs.domain_m));
end_timer!(step_5_timer);
///////////////////////////////////////////////////////////////////
// 6. Commitments
///////////////////////////////////////////////////////////////////
let step_6_timer = start_timer!(|| "step 6");
let z_I_com = KZGCommit::<E>::commit_g1(&srs.poly_ck, &z_I);
let C_I_com = KZGCommit::<E>::commit_g1(&srs.poly_ck, &c_I_poly);
let u_com = KZGCommit::<E>::commit_g1(&srs.poly_ck, &u_poly);
end_timer!(step_6_timer);
///////////////////////////////////////////////////////////////////
// 7 Prepare unity proof
///////////////////////////////////////////////////////////////////
let step_7_timer = start_timer!(|| "step 7");
// transcript initialised to zero
let mut transcript = CaulkTranscript::new();
// let now = Instant::now();
let unity_proof = prove_multiunity(srs, &mut transcript, &u_com, &u_vals, extra_blinder2);
// println!("Time to prove unity {:?}", now.elapsed());
// quick test can be uncommented to check if unity proof verifies
// let unity_check = verify_multiunity( &srs, &mut Fr::zero(), u_com.0.clone(),
// &unity_proof ); println!("unity_check = {}", unity_check);
end_timer!(step_7_timer);
///////////////////////////////////////////////////////////////////
// 8. Hash outputs to get chi
///////////////////////////////////////////////////////////////////
let step_8_timer = start_timer!(|| "step 8");
transcript.append_element(b"c_com", &instance.c_com);
transcript.append_element(b"phi_com", &instance.phi_com);
transcript.append_element(b"u_bar_alpha", &unity_proof.g1_u_bar_alpha);
transcript.append_element(b"h2_alpha", &unity_proof.g1_h_2_alpha);
transcript.append_element(b"pi_1", &unity_proof.pi_1);
transcript.append_element(b"pi_2", &unity_proof.pi_2);
transcript.append_element(b"pi_3", &unity_proof.pi_3);
transcript.append_element(b"pi_4", &unity_proof.pi_4);
transcript.append_element(b"pi_5", &unity_proof.pi_5);
transcript.append_element(b"C_I_com", &C_I_com);
transcript.append_element(b"z_I_com", &z_I_com);
transcript.append_element(b"u_com", &u_com);
transcript.append_element(b"h1_com", &H1_com);
transcript.append_element(b"v1", &unity_proof.v1);
transcript.append_element(b"v2", &unity_proof.v2);
transcript.append_element(b"v3", &unity_proof.v3);
let chi = transcript.get_and_append_challenge(b"chi");
end_timer!(step_8_timer);
///////////////////////////////////////////////////////////////////
// 9. Compute z_I( u(X) )
///////////////////////////////////////////////////////////////////
let step_9_timer = start_timer!(|| "step 9");
// Need a bigger domain to compute z_I(u(X)) over.
// Has size O(m^2)
let domain_m_sq: GeneralEvaluationDomain<E::Fr> =
GeneralEvaluationDomain::new(z_I.len() * u_poly.len() + 2).unwrap();
// id_poly(X) = 0 for sigma_i < m and 1 for sigma_i > m
// used for when m is not a power of 2
let mut id_poly = DensePolynomial::from_coefficients_slice(&[E::Fr::one()]);
id_poly = &id_poly - &srs.id_poly;
// Compute z_I( u(X) + w^0 id(X) )
// Computing z_I(u(X)) is done naively and could be faster. Currently this is
// not a bottleneck
let evals: Vec<E::Fr> = cfg_into_iter!(0..domain_m_sq.size())
.map(|k| {
z_I.evaluate(
&(u_poly.evaluate(&domain_m_sq.element(k))
+ id_poly.evaluate(&domain_m_sq.element(k))),
)
})
.collect();
let z_I_u_poly = Evaluations::from_vec_and_domain(evals, domain_m_sq).interpolate();
end_timer!(step_9_timer);
///////////////////////////////////////////////////////////////////
// 10. Compute C_I(u(X))-phi(X)
///////////////////////////////////////////////////////////////////
let step_10_timer = start_timer!(|| "step 10");
// Compute C_I( u(X) )
// Computing C_I(u(X)) is done naively and could be faster. Currently this is
// not a bottleneck
// Actually compute c_I( u(X) + id(X) ) in case m is not a power of 2
let evals: Vec<E::Fr> = cfg_into_iter!(0..domain_m_sq.size())
.map(|k| {
c_I_poly.evaluate(
&(u_poly.evaluate(&domain_m_sq.element(k))
+ id_poly.evaluate(&domain_m_sq.element(k))),
)
})
.collect();
// c_I_u_poly = C_I( u(X) ) - phi(X)
let c_I_u_poly =
&Evaluations::from_vec_and_domain(evals, domain_m_sq).interpolate() - &input.phi_poly;
end_timer!(step_10_timer);
///////////////////////////////////////////////////////////////////
// 11. Compute H2
///////////////////////////////////////////////////////////////////
let step_11_timer = start_timer!(|| "step 11");
// temp_poly(X) = z_I(u(X)) + chi [ C_I(u(X)) - phi(X) ]
let temp_poly = &z_I_u_poly + &(&c_I_u_poly * chi);
// H2(X) = temp_poly / z_Vm(X)
let (H2_poly, rem) = temp_poly.divide_by_vanishing_poly(srs.domain_m).unwrap();
assert!(
rem == DensePolynomial::from_coefficients_slice(&[E::Fr::zero()]),
"H_2(X) doesn't divide"
);
end_timer!(step_11_timer);
///////////////////////////////////////////////////////////////////
// 12. Compute commitments to H2
///////////////////////////////////////////////////////////////////
let step_12_timer = start_timer!(|| "step 12");
let H2_com = KZGCommit::<E>::commit_g1(&srs.poly_ck, &H2_poly);
// println!("Time to commit to H2 {:?}", now.elapsed());
end_timer!(step_12_timer);
///////////////////////////////////////////////////////////////////
// 13. Hash outputs to get alpha
///////////////////////////////////////////////////////////////////
let step_13_timer = start_timer!(|| "step 13");
transcript.append_element(b"h2", &H2_com);
let alpha = transcript.get_and_append_challenge(b"alpha");
// last hash so don't need to update hash_input
// hash_input = alpha.clone();
end_timer!(step_13_timer);
///////////////////////////////////////////////////////////////////
// 14. Open u at alpha, get v1
///////////////////////////////////////////////////////////////////
let step_14_timer = start_timer!(|| "step 14");
let (evals1, pi1) = KZGCommit::<E>::open_g1_batch(&srs.poly_ck, &u_poly, None, &[alpha]);
let v1 = evals1[0];
end_timer!(step_14_timer);
///////////////////////////////////////////////////////////////////
// 15. Compute p1(X) and open at v1
///////////////////////////////////////////////////////////////////
let step_15_timer = start_timer!(|| "step 15");
// v1_id = u(alpha) + id(alpha) for when m is not a power of 2
let v1_id = v1 + id_poly.evaluate(&alpha);
// p1(X) = z_IX() + chi cI(X)
let p1_poly = &z_I + &(&c_I_poly * chi);
let (evals2, pi2) = KZGCommit::<E>::open_g1_batch(&srs.poly_ck, &p1_poly, None, &[v1_id]);
end_timer!(step_15_timer);
///////////////////////////////////////////////////////////////////
// 16. Compute p2(X) and open p2 at alpha
///////////////////////////////////////////////////////////////////
let step_16_timer = start_timer!(|| "step 16");
// p2(X) = zI(u(alpha)) + chi C_I( u(alpha) )
let mut p2_poly = DensePolynomial::from_coefficients_slice(&[
z_I.evaluate(&v1_id) + chi * c_I_poly.evaluate(&v1_id)
]);
// p2(X) = p2(X) - chi phi(X)
p2_poly = &p2_poly - &(&input.phi_poly * chi);
// p2(X) = p2(X) - zVm(alpha) H2(X)
let zVm: DensePolynomial<E::Fr> = srs.domain_m.vanishing_polynomial().into();
p2_poly = &p2_poly - &(&H2_poly * zVm.evaluate(&alpha));
// Open p2(X) at alpha
let (evals3, pi3) = KZGCommit::<E>::open_g1_batch(&srs.poly_ck, &p2_poly, None, &[alpha]);
// check that p2_poly(alpha) = 0
assert!(evals3[0] == E::Fr::zero(), "p2(alpha) does not equal 0");
end_timer!(step_16_timer);
///////////////////////////////////////////////////////////////////
// 17. Compose proof
///////////////////////////////////////////////////////////////////
let proof = LookupProof {
C_I_com,
H1_com,
H2_com,
z_I_com,
u_com,
v1,
v2: evals2[0],
pi1,
pi2,
pi3,
};
end_timer!(timer);
(proof, unity_proof)
}
#[allow(non_snake_case)]
pub fn verify_lookup_proof<E: PairingEngine, R: RngCore>(
c_com: &E::G1Affine,
phi_com: &E::G1Affine,
proof: &LookupProof<E>,
unity_proof: &ProofMultiUnity<E>,
srs: &PublicParameters<E>,
rng: &mut R,
) -> bool {
let timer = start_timer!(|| "lookup proof verification");
///////////////////////////////////////////////////////////////////
// 1. check unity
///////////////////////////////////////////////////////////////////
// hash_input initialised to zero
let mut transcript = CaulkTranscript::new();
let unity_check =
verify_multiunity_defer_pairing(srs, &mut transcript, &proof.u_com, unity_proof);
///////////////////////////////////////////////////////////////////
// 2. Hash outputs to get chi
///////////////////////////////////////////////////////////////////
transcript.append_element(b"c_com", c_com);
transcript.append_element(b"phi_com", phi_com);
transcript.append_element(b"u_bar_alpha", &unity_proof.g1_u_bar_alpha);
transcript.append_element(b"h2_alpha", &unity_proof.g1_h_2_alpha);
transcript.append_element(b"pi_1", &unity_proof.pi_1);
transcript.append_element(b"pi_2", &unity_proof.pi_2);
transcript.append_element(b"pi_3", &unity_proof.pi_3);
transcript.append_element(b"pi_4", &unity_proof.pi_4);
transcript.append_element(b"pi_5", &unity_proof.pi_5);
transcript.append_element(b"C_I_com", &proof.C_I_com);
transcript.append_element(b"z_I_com", &proof.z_I_com);
transcript.append_element(b"u_com", &proof.u_com);
transcript.append_element(b"h1_com", &proof.H1_com);
transcript.append_element(b"v1", &unity_proof.v1);
transcript.append_element(b"v2", &unity_proof.v2);
transcript.append_element(b"v3", &unity_proof.v3);
let chi = transcript.get_and_append_challenge(b"chi");
///////////////////////////////////////////////////////////////////
// 3. Hash outputs to get alpha
///////////////////////////////////////////////////////////////////
transcript.append_element(b"h2", &proof.H2_com);
let alpha = transcript.get_and_append_challenge(b"alpha");
// last hash so don't need to update hash_input
// hash_input = alpha.clone();
///////////////////////////////////////////////////////////////////
// 4. Check pi_1
///////////////////////////////////////////////////////////////////
// KZG.Verify(srs_KZG, [u]_1, deg = bot, alpha, v1, pi1)
let check1 = KZGCommit::<E>::verify_g1_defer_pairing(
&srs.poly_ck.powers_of_g,
&srs.g2_powers,
&proof.u_com,
None,
&[alpha],
&[proof.v1],
&proof.pi1,
);
///////////////////////////////////////////////////////////////////
// 5. Check pi_2
///////////////////////////////////////////////////////////////////
// v1_id = u(alpha)+ id(alpha) for when m is not a power of 2
let v1_id = proof.v1 + (E::Fr::one() - srs.id_poly.evaluate(&alpha));
// [P1]_1 = [z_I]_1 + chi [c_I]_1
let p1_com = (proof.z_I_com.into_projective() + proof.C_I_com.mul(chi)).into_affine();
// KZG.Verify(srs_KZG, [P1]_1, deg = bot, v1_id, v2, pi2)
let check2 = KZGCommit::<E>::verify_g1_defer_pairing(
&srs.poly_ck.powers_of_g,
&srs.g2_powers,
&p1_com,
None,
&[v1_id],
&[proof.v2],
&proof.pi2,
);
///////////////////////////////////////////////////////////////////
// 6. Check pi_3
///////////////////////////////////////////////////////////////////
// z_Vm(X)
let zVm: DensePolynomial<E::Fr> = srs.domain_m.vanishing_polynomial().into(); // z_V_m(alpah)
// [P2]_1 = [v2]_1 - chi cm - zVm(alpha) [H_2]_1
let p2_com = (
srs.poly_ck.powers_of_g[0].mul(proof.v2 ) // [v2]_1
- phi_com.mul( chi ) //[phi]_1
- proof.H2_com.mul(zVm.evaluate(&alpha))
// [H2]_1 * zVm(alpha)
)
.into_affine();
// KZG.Verify(srs_KZG, [P2]_1, deg = bot, alpha, 0, pi3)
let check3 = KZGCommit::<E>::verify_g1_defer_pairing(
&srs.poly_ck.powers_of_g,
&srs.g2_powers,
&p2_com,
None,
&[alpha],
&[E::Fr::zero()],
&proof.pi3,
);
///////////////////////////////////////////////////////////////////
// 7. prepare final pairing
///////////////////////////////////////////////////////////////////
// pairing1 = e([C]_1 - [C_I]_1, [1]_2)
let final_pairing = vec![
(
proof.C_I_com.into_projective() - c_com.into_projective(),
srs.g2_powers[0].into_projective(),
),
(
proof.z_I_com.into_projective(),
proof.H1_com.into_projective(),
),
];
///////////////////////////////////////////////////////////////////
// 7. Check pairing products
///////////////////////////////////////////////////////////////////
let pairing_timer = start_timer!(|| "pairing product");
let mut pairing_inputs: Vec<(E::G1Projective, E::G2Projective)> = [
unity_check.as_slice(),
check1.as_slice(),
check2.as_slice(),
check3.as_slice(),
final_pairing.as_slice(),
]
.concat();
let mut zeta = E::Fr::rand(rng);
let mut prepared_pairing_inputs = vec![];
for i in 0..pairing_inputs.len() / 2 {
if i != 0 {
pairing_inputs[i * 2].0.mul_assign(zeta);
pairing_inputs[i * 2 + 1].0.mul_assign(zeta);
}
zeta.square_in_place();
prepared_pairing_inputs.push((
E::G1Prepared::from(pairing_inputs[i * 2].0.into_affine()),
E::G2Prepared::from(pairing_inputs[i * 2].1.into_affine()),
));
prepared_pairing_inputs.push((
E::G1Prepared::from(pairing_inputs[i * 2 + 1].0.into_affine()),
E::G2Prepared::from(pairing_inputs[i * 2 + 1].1.into_affine()),
));
}
let res = E::product_of_pairings(prepared_pairing_inputs.iter()).is_one();
end_timer!(pairing_timer);
end_timer!(timer);
res
}
#[allow(non_snake_case)]
#[allow(dead_code)]
pub fn generate_lookup_input<E: PairingEngine, R: RngCore>(
rng: &mut R,
) -> (
LookupProverInput<E>,
PublicParameters<E>, // SRS
) {
let timer = start_timer!(|| "generate lookup input");
let n: usize = 8; // bitlength of poly degree
let m: usize = 4;
// let m: usize = (1<<(n/2-1)); //should be power of 2
let N: usize = 1 << n;
let max_degree: usize = if N > 2 * m * m { N - 1 } else { 2 * m * m };
let actual_degree = N - 1;
let now = Instant::now();
let pp = PublicParameters::<E>::setup(&max_degree, &N, &m, &n);
println!("Time to setup {:?}", now.elapsed());
let c_poly = DensePolynomial::<E::Fr>::rand(actual_degree, rng);
let mut positions: Vec<usize> = vec![];
for j in 0..m {
// generate positions evenly distributed in the set
let i_j: usize = j * (N / m);
positions.push(i_j);
}
// generating phi
let blinder = E::Fr::rand(rng);
let a_m = DensePolynomial::from_coefficients_slice(&[blinder]);
let mut phi_poly = a_m.mul_by_vanishing_poly(pp.domain_m);
for (j, &pos) in positions.iter().enumerate().take(m) {
phi_poly += &(&pp.lagrange_polynomials_m[j] * c_poly.evaluate(&pp.domain_N.element(pos)));
// adding c(w^{i_j})*mu_j(X)
}
for j in m..pp.domain_m.size() {
phi_poly =
&phi_poly + &(&pp.lagrange_polynomials_m[j] * c_poly.evaluate(&pp.domain_N.element(0)));
}
let now = Instant::now();
let openings = KZGCommit::<E>::multiple_open::<E::G2Affine>(&c_poly, &pp.g2_powers, n);
println!("Time to generate openings {:?}", now.elapsed());
end_timer!(timer);
(
LookupProverInput {
c_poly,
phi_poly,
positions,
openings,
},
pp,
)
}
pub fn get_poly_and_g2_openings<E: PairingEngine>(
pp: &PublicParameters<E>,
actual_degree: usize,
) -> TableInput<E> {
// try opening the file. If it exists load the setup from there, otherwise
// generate
let path = format!(
"polys/poly_{}_openings_{}_{}.setup",
actual_degree,
pp.N,
E::Fq::size_in_bits()
);
let res = File::open(path.clone());
match res {
Ok(_) => {
let now = Instant::now();
let table = TableInput::load(&path);
println!("Time to load openings = {:?}", now.elapsed());
table
},
Err(_) => {
let rng = &mut ark_std::test_rng();
let c_poly = DensePolynomial::<E::Fr>::rand(actual_degree, rng);
let c_comx = KZGCommit::<E>::commit_g1(&pp.poly_ck, &c_poly);
let now = Instant::now();
let openings =
KZGCommit::<E>::multiple_open::<E::G2Affine>(&c_poly, &pp.g2_powers, pp.n);
println!("Time to generate openings = {:?}", now.elapsed());
let table = TableInput {
c_poly,
c_com: c_comx,
openings,
};
table.store(&path);
table
},
}
}
#[cfg(test)]
mod tests {
use super::*;
use ark_bls12_377::Bls12_377;
use ark_bls12_381::Bls12_381;
use ark_ff::PrimeField;
#[test]
fn test_store() {
test_store_helper::<Bls12_381>();
test_store_helper::<Bls12_377>();
}
#[allow(non_snake_case)]
pub fn test_store_helper<E: PairingEngine>() {
// 1. Setup
let n: usize = 6;
let N: usize = 1 << n;
let powers_size: usize = N + 2; // SRS SIZE
let temp_m = n; // dummy
let pp = PublicParameters::<E>::setup(&powers_size, &N, &temp_m, &n);
let actual_degree = N - 1;
let path = format!("tmp/poly_openings_{}.log", E::Fq::size_in_bits());
// 2. Store
let rng = &mut ark_std::test_rng();
let c_poly = DensePolynomial::<E::Fr>::rand(actual_degree, rng);
let c_com = KZGCommit::<E>::commit_g1(&pp.poly_ck, &c_poly);
let openings = KZGCommit::<E>::multiple_open::<E::G2Affine>(&c_poly, &pp.g2_powers, pp.n);
let table = TableInput::<E> {
c_poly,
c_com,
openings,
};
table.store(&path);
// 3. Load
let table_loaded = TableInput::load(&path);
// 4. Test
assert_eq!(table, table_loaded);
std::fs::remove_file(&path).expect("File can not be deleted");
}
#[allow(non_snake_case)]
#[test]
fn test_multiple_lookups() {
do_multiple_lookups::<Bls12_381>();
do_multiple_lookups::<Bls12_377>();
}
#[allow(non_snake_case)]
fn do_multiple_lookups<E: PairingEngine>() {
let mut rng = ark_std::test_rng();
const MIN_LOG_N: usize = 7;
const MAX_LOG_N: usize = 9;
const EPS: usize = 1;
const MIN_LOG_M: usize = 2;
const MAX_LOG_M: usize = 5;
for n in MIN_LOG_N..=MAX_LOG_N {
// 1. Setup
let N: usize = 1 << n;
let powers_size: usize = N + 2; // SRS SIZE
println!("N={}", N);
let temp_m = n; // dummy
let mut pp = PublicParameters::<E>::setup(&powers_size, &N, &temp_m, &n);
let actual_degree = N - EPS;
// println!("time for powers of tau {:?} for N={:?}", now.elapsed(),N);
// 2. Poly and openings
let table = get_poly_and_g2_openings(&pp, actual_degree);
for logm in MIN_LOG_M..=std::cmp::min(MAX_LOG_M, n / 2 - 1) {
// 3. Setup
let now = Instant::now();
let mut m = 1 << logm;
m = m + 1;
println!("m={}", m);
pp.regenerate_lookup_params(m);
println!("Time to generate aux domain {:?}", now.elapsed());
// 4. Positions
let mut positions: Vec<usize> = vec![];
for j in 0..m {
// generate positions evenly distributed in the set
let i_j: usize = j * (actual_degree / m);
positions.push(i_j);
}
// 5. generating phi
let blinder = E::Fr::rand(&mut rng);
let a_m = DensePolynomial::from_coefficients_slice(&[blinder]);
let mut phi_poly = a_m.mul_by_vanishing_poly(pp.domain_m);
let c_poly_local = table.c_poly.clone();
for j in 0..m {
phi_poly = &phi_poly
+ &(&pp.lagrange_polynomials_m[j]
* c_poly_local.evaluate(&pp.domain_N.element(positions[j])));
// adding c(w^{i_j})*mu_j(X)
}
for j in m..pp.domain_m.size() {
phi_poly = &phi_poly
+ &(&pp.lagrange_polynomials_m[j]
* c_poly_local.evaluate(&pp.domain_N.element(0)));
// adding c(w^{i_j})*mu_j(X)
}
// 6. Running proofs
let now = Instant::now();
let c_com = KZGCommit::<E>::commit_g1(&pp.poly_ck, &table.c_poly);
let phi_com = KZGCommit::<E>::commit_g1(&pp.poly_ck, &phi_poly);
println!("Time to generate inputs = {:?}", now.elapsed());
let lookup_instance = LookupInstance { c_com, phi_com };
let prover_input = LookupProverInput {
c_poly: table.c_poly.clone(),
phi_poly,
positions,
openings: table.openings.clone(),
};
let now = Instant::now();
let (proof, unity_proof) =
compute_lookup_proof::<E, _>(&lookup_instance, &prover_input, &pp, &mut rng);
println!("Time to generate proof for = {:?}", now.elapsed());
let now = Instant::now();
let res = verify_lookup_proof(
&table.c_com,
&phi_com,
&proof,
&unity_proof,
&pp,
&mut rng,
);
println!("Time to verify proof for = {:?}", now.elapsed());
assert!(res);
println!("Lookup test succeeded");
}
}
}
#[allow(non_snake_case)]
#[test]
fn test_lookup() {
do_lookup::<Bls12_381>();
do_lookup::<Bls12_377>();
}
fn do_lookup<E: PairingEngine>() {
let mut rng = ark_std::test_rng();
let now = Instant::now();
let (prover_input, srs) = generate_lookup_input(&mut rng);
println!(
"Time to generate parameters for n={:?} = {:?}",
srs.n,
now.elapsed()
);
// kzg_test(&srs);
let c_com = KZGCommit::<E>::commit_g1(&srs.poly_ck, &prover_input.c_poly);
let phi_com = KZGCommit::<E>::commit_g1(&srs.poly_ck, &prover_input.phi_poly);
let lookup_instance = LookupInstance { c_com, phi_com };
let now = Instant::now();
let (proof, unity_proof) =
compute_lookup_proof(&lookup_instance, &prover_input, &srs, &mut rng);
println!(
"Time to generate proof for m={:?} = {:?}",
srs.m,
now.elapsed()
);
let now = Instant::now();
let res = verify_lookup_proof(&c_com, &phi_com, &proof, &unity_proof, &srs, &mut rng);
println!(
"Time to verify proof for n={:?} = {:?}",
srs.n,
now.elapsed()
);
assert!(res);
println!("Lookup test succeeded");
}
}

357
src/multi/setup.rs Normal file
View File

@@ -0,0 +1,357 @@
use crate::util::trim;
use ark_ec::{AffineCurve, PairingEngine, ProjectiveCurve};
use ark_ff::{PrimeField, UniformRand};
use ark_poly::{
univariate::DensePolynomial, EvaluationDomain, Evaluations as EvaluationsOnDomain,
GeneralEvaluationDomain,
};
use ark_poly_commit::kzg10::*;
use ark_serialize::{CanonicalDeserialize, CanonicalSerialize};
use ark_std::{cfg_into_iter, One, Zero};
#[cfg(feature = "parallel")]
use rayon::iter::{IntoParallelIterator, ParallelIterator};
use std::{
convert::TryInto,
fs::File,
io::{Read, Write},
time::Instant,
};
// structure of public parameters
#[allow(non_snake_case)]
pub struct PublicParameters<E: PairingEngine> {
pub poly_ck: Powers<'static, E>,
pub domain_m: GeneralEvaluationDomain<E::Fr>,
pub domain_n: GeneralEvaluationDomain<E::Fr>,
pub domain_N: GeneralEvaluationDomain<E::Fr>,
pub verifier_pp: VerifierPublicParameters<E>,
pub lagrange_polynomials_n: Vec<DensePolynomial<E::Fr>>,
pub lagrange_polynomials_m: Vec<DensePolynomial<E::Fr>>,
pub id_poly: DensePolynomial<E::Fr>,
pub N: usize,
pub m: usize,
pub n: usize,
pub g2_powers: Vec<E::G2Affine>,
}
pub struct LookupParameters<F: PrimeField> {
m: usize,
lagrange_polynomials_m: Vec<DensePolynomial<F>>,
domain_m: GeneralEvaluationDomain<F>,
id_poly: DensePolynomial<F>,
}
impl<F: PrimeField> LookupParameters<F> {
fn new(m: usize) -> Self {
let domain_m: GeneralEvaluationDomain<F> = GeneralEvaluationDomain::new(m).unwrap();
// id_poly(X) = 1 for omega_m in range and 0 for omega_m not in range.
let mut id_vec = Vec::new();
for _ in 0..m {
id_vec.push(F::one());
}
for _ in m..domain_m.size() {
id_vec.push(F::zero());
}
let id_poly = EvaluationsOnDomain::from_vec_and_domain(id_vec, domain_m).interpolate();
let mut lagrange_polynomials_m: Vec<DensePolynomial<F>> = Vec::new();
for i in 0..domain_m.size() {
let evals: Vec<F> = cfg_into_iter!(0..domain_m.size())
.map(|k| if k == i { F::one() } else { F::zero() })
.collect();
lagrange_polynomials_m
.push(EvaluationsOnDomain::from_vec_and_domain(evals, domain_m).interpolate());
}
Self {
m,
lagrange_polynomials_m,
domain_m,
id_poly,
}
}
}
// smaller set of public parameters used by verifier
pub struct VerifierPublicParameters<E: PairingEngine> {
pub poly_vk: VerifierKey<E>,
pub domain_m_size: usize,
}
impl<E: PairingEngine> PublicParameters<E> {
pub fn regenerate_lookup_params(&mut self, m: usize) {
let lp = LookupParameters::new(m);
self.m = lp.m;
self.lagrange_polynomials_m = lp.lagrange_polynomials_m;
self.domain_m = lp.domain_m;
self.id_poly = lp.id_poly;
}
// store powers of g in a file
pub fn store(&self, path: &str) {
// 1. Powers of g
let mut g_bytes = vec![];
let mut f = File::create(path).expect("Unable to create file");
let deg: u32 = self.poly_ck.powers_of_g.len().try_into().unwrap();
let deg_bytes = deg.to_be_bytes();
f.write_all(&deg_bytes).expect("Unable to write data");
let deg32: usize = deg.try_into().unwrap();
for i in 0..deg32 {
self.poly_ck.powers_of_g[i]
.into_projective()
.into_affine()
.serialize_uncompressed(&mut g_bytes)
.ok();
}
f.write_all(&g_bytes).expect("Unable to write data");
// 2. Powers of gammag
let deg_gamma: u32 = self.poly_ck.powers_of_gamma_g.len().try_into().unwrap();
let mut gg_bytes = vec![];
let deg_bytes = deg_gamma.to_be_bytes();
f.write_all(&deg_bytes).expect("Unable to write data");
let deg32: usize = deg.try_into().unwrap();
for i in 0..deg32 {
self.poly_ck.powers_of_gamma_g[i]
.into_projective()
.into_affine()
.serialize_uncompressed(&mut gg_bytes)
.ok();
}
f.write_all(&gg_bytes).expect("Unable to write data");
// 3. Verifier key
let mut h_bytes = vec![];
self.verifier_pp
.poly_vk
.h
.serialize_uncompressed(&mut h_bytes)
.ok();
self.verifier_pp
.poly_vk
.beta_h
.serialize_uncompressed(&mut h_bytes)
.ok();
f.write_all(&h_bytes).expect("Unable to write data");
// 4. g2 powers
let mut g2_bytes = vec![];
let deg2: u32 = self.g2_powers.len().try_into().unwrap();
let deg2_bytes = deg2.to_be_bytes();
f.write_all(&deg2_bytes).expect("Unable to write data");
let deg2_32: usize = deg2.try_into().unwrap();
for i in 0..deg2_32 {
self.g2_powers[i]
.into_projective()
.into_affine()
.serialize_uncompressed(&mut g2_bytes)
.ok();
}
f.write_all(&g2_bytes).expect("Unable to write data");
}
// load powers of g from a file
pub fn load(path: &str) -> (Powers<'static, E>, VerifierKey<E>, Vec<E::G2Affine>) {
const G1_UNCOMPR_SIZE: usize = 96;
const G2_UNCOMPR_SIZE: usize = 192;
let mut data = Vec::new();
let mut f = File::open(path).expect("Unable to open file");
f.read_to_end(&mut data).expect("Unable to read data");
// 1. reading g powers
let mut cur_counter: usize = 0;
let deg_bytes: [u8; 4] = (&data[0..4]).try_into().unwrap();
let deg: u32 = u32::from_be_bytes(deg_bytes);
let mut powers_of_g = vec![];
let deg32: usize = deg.try_into().unwrap();
cur_counter += 4;
for i in 0..deg32 {
let buf_bytes =
&data[cur_counter + i * G1_UNCOMPR_SIZE..cur_counter + (i + 1) * G1_UNCOMPR_SIZE];
let tmp = E::G1Affine::deserialize_unchecked(buf_bytes).unwrap();
powers_of_g.push(tmp);
}
cur_counter += deg32 * G1_UNCOMPR_SIZE;
// 2. reading gamma g powers
let deg_bytes: [u8; 4] = (&data[cur_counter..cur_counter + 4]).try_into().unwrap();
let deg: u32 = u32::from_be_bytes(deg_bytes);
let mut powers_of_gamma_g = vec![];
let deg32: usize = deg.try_into().unwrap();
cur_counter += 4;
for i in 0..deg32 {
let buf_bytes =
&data[cur_counter + i * G1_UNCOMPR_SIZE..cur_counter + (i + 1) * G1_UNCOMPR_SIZE];
let tmp = E::G1Affine::deserialize_unchecked(buf_bytes).unwrap();
powers_of_gamma_g.push(tmp);
}
cur_counter += deg32 * G1_UNCOMPR_SIZE;
// 3. reading verifier key
let buf_bytes = &data[cur_counter..cur_counter + G2_UNCOMPR_SIZE];
let h = E::G2Affine::deserialize_unchecked(buf_bytes).unwrap();
cur_counter += G2_UNCOMPR_SIZE;
let buf_bytes = &data[cur_counter..cur_counter + G2_UNCOMPR_SIZE];
let beta_h = E::G2Affine::deserialize_unchecked(buf_bytes).unwrap();
cur_counter += G2_UNCOMPR_SIZE;
// 4. reading G2 powers
let deg2_bytes: [u8; 4] = (&data[cur_counter..cur_counter + 4]).try_into().unwrap();
let deg2: u32 = u32::from_be_bytes(deg2_bytes);
let mut g2_powers = vec![];
let deg2_32: usize = deg2.try_into().unwrap();
cur_counter += 4;
for _ in 0..deg2_32 {
let buf_bytes = &data[cur_counter..cur_counter + G2_UNCOMPR_SIZE];
let tmp = E::G2Affine::deserialize_unchecked(buf_bytes).unwrap();
g2_powers.push(tmp);
cur_counter += G2_UNCOMPR_SIZE;
}
let vk = VerifierKey {
g: powers_of_g[0],
gamma_g: powers_of_gamma_g[0],
h,
beta_h,
prepared_h: h.into(),
prepared_beta_h: beta_h.into(),
};
let powers = Powers {
powers_of_g: ark_std::borrow::Cow::Owned(powers_of_g),
powers_of_gamma_g: ark_std::borrow::Cow::Owned(powers_of_gamma_g),
};
(powers, vk, g2_powers)
}
// setup algorithm for index_hiding_polycommit
// also includes a bunch of precomputation.
// @max_degree max degree of table polynomial C(X), also the size of the trusted
// setup @N domain size on which proofs are constructed. Should not be
// smaller than max_degree @m lookup size. Can be changed later
// @n suppl domain for the unity proofs. Should be at least 6+log N
#[allow(non_snake_case)]
pub fn setup(max_degree: &usize, N: &usize, m: &usize, n: &usize) -> PublicParameters<E> {
// Setup algorithm. To be replaced by output of a universal setup before being
// production ready.
// let mut srs = KzgBls12_381::setup(4, true, rng).unwrap();
let poly_ck: Powers<'static, E>;
let poly_vk: VerifierKey<E>;
let mut g2_powers: Vec<E::G2Affine> = Vec::new();
// try opening the file. If it exists load the setup from there, otherwise
// generate
let path = format!("srs/srs_{}_{}.setup", max_degree, E::Fq::size_in_bits());
let res = File::open(path.clone());
let store_to_file: bool;
match res {
Ok(_) => {
let now = Instant::now();
let (_poly_ck, _poly_vk, _g2_powers) = PublicParameters::load(&path);
println!("time to load powers = {:?}", now.elapsed());
store_to_file = false;
g2_powers = _g2_powers;
poly_ck = _poly_ck;
poly_vk = _poly_vk;
},
Err(_) => {
let rng = &mut ark_std::test_rng();
let now = Instant::now();
let srs =
KZG10::<E, DensePolynomial<E::Fr>>::setup(*max_degree, true, rng).unwrap();
println!("time to setup powers = {:?}", now.elapsed());
// trim down to size
let (poly_ck2, poly_vk2) = trim::<E, DensePolynomial<E::Fr>>(&srs, *max_degree);
poly_ck = Powers {
powers_of_g: ark_std::borrow::Cow::Owned(poly_ck2.powers_of_g.into()),
powers_of_gamma_g: ark_std::borrow::Cow::Owned(
poly_ck2.powers_of_gamma_g.into(),
),
};
poly_vk = poly_vk2;
// need some powers of g2
// arkworks setup doesn't give these powers but the setup does use a fixed
// randomness to generate them. so we can generate powers of g2
// directly.
let beta = E::Fr::rand(rng);
let mut temp = poly_vk.h;
for _ in 0..poly_ck.powers_of_g.len() {
g2_powers.push(temp);
temp = temp.mul(beta).into_affine();
}
store_to_file = true;
},
}
// domain where openings {w_i}_{i in I} are embedded
let domain_n: GeneralEvaluationDomain<E::Fr> = GeneralEvaluationDomain::new(*n).unwrap();
let domain_N: GeneralEvaluationDomain<E::Fr> = GeneralEvaluationDomain::new(*N).unwrap();
// precomputation to speed up prover
// lagrange_polynomials[i] = polynomial equal to 0 at w^j for j!= i and 1 at
// w^i
let mut lagrange_polynomials_n: Vec<DensePolynomial<E::Fr>> = Vec::new();
for i in 0..domain_n.size() {
let evals: Vec<E::Fr> = cfg_into_iter!(0..domain_n.size())
.map(|k| if k == i { E::Fr::one() } else { E::Fr::zero() })
.collect();
lagrange_polynomials_n
.push(EvaluationsOnDomain::from_vec_and_domain(evals, domain_n).interpolate());
}
let lp = LookupParameters::new(*m);
let verifier_pp = VerifierPublicParameters {
poly_vk,
domain_m_size: lp.domain_m.size(),
};
let pp = PublicParameters {
poly_ck,
domain_m: lp.domain_m,
domain_n,
lagrange_polynomials_n,
lagrange_polynomials_m: lp.lagrange_polynomials_m,
id_poly: lp.id_poly,
domain_N,
verifier_pp,
N: *N,
n: *n,
m: lp.m,
g2_powers,
};
if store_to_file {
pp.store(&path);
}
pp
}
}
#[test]
#[allow(non_snake_case)]
pub fn test_load() {
use ark_bls12_381::Bls12_381;
let n: usize = 4;
let N: usize = 1 << n;
let powers_size: usize = 4 * N; // SRS SIZE
let temp_m = n; // dummy
let pp = PublicParameters::<Bls12_381>::setup(&powers_size, &N, &temp_m, &n);
let path = "powers.log";
pp.store(path);
let loaded = PublicParameters::<Bls12_381>::load(path);
assert_eq!(pp.poly_ck.powers_of_g, loaded.0.powers_of_g);
assert_eq!(pp.poly_ck.powers_of_gamma_g, loaded.0.powers_of_gamma_g);
assert_eq!(pp.verifier_pp.poly_vk.h, loaded.1.h);
assert_eq!(pp.verifier_pp.poly_vk.beta_h, loaded.1.beta_h);
assert_eq!(pp.g2_powers, loaded.2);
std::fs::remove_file(&path).expect("File can not be deleted");
}

564
src/multi/unity.rs Normal file
View File

@@ -0,0 +1,564 @@
// This file includes the Caulk's unity prover and verifier for multi openings.
// The protocol is described in Figure 4.
use super::setup::PublicParameters;
use crate::{util::convert_to_bigints, CaulkTranscript, KZGCommit};
use ark_ec::{msm::VariableBaseMSM, AffineCurve, PairingEngine, ProjectiveCurve};
use ark_ff::Field;
use ark_poly::{
univariate::DensePolynomial, EvaluationDomain, Evaluations as EvaluationsOnDomain, Polynomial,
UVPolynomial,
};
use ark_std::{end_timer, start_timer, One, UniformRand, Zero};
use rand::RngCore;
use std::ops::MulAssign;
// output structure of prove_unity
pub struct ProofMultiUnity<E: PairingEngine> {
pub g1_u_bar: E::G1Affine,
pub g1_h_1: E::G1Affine,
pub g1_h_2: E::G1Affine,
pub g1_u_bar_alpha: E::G1Affine,
pub g1_h_2_alpha: E::G1Affine,
pub v1: E::Fr,
pub v2: E::Fr,
pub v3: E::Fr,
pub pi_1: E::G1Affine,
pub pi_2: E::G1Affine,
pub pi_3: E::G1Affine,
pub pi_4: E::G1Affine,
pub pi_5: E::G1Affine,
}
// Prove knowledge of vec_u_evals such that g1_u = g1^(sum_j u_j mu_j(x)) and
// u_j^N = 1
#[allow(non_snake_case)]
pub fn prove_multiunity<E: PairingEngine>(
pp: &PublicParameters<E>,
transcript: &mut CaulkTranscript<E::Fr>,
g1_u: &E::G1Affine,
vec_u_evals: &[E::Fr],
u_poly_quotient: DensePolynomial<E::Fr>,
) -> ProofMultiUnity<E> {
let timer = start_timer!(|| "prove multiunity");
// The test_rng is deterministic. Should be replaced with actual random
// generator.
let rng_arkworks = &mut ark_std::test_rng();
let n = pp.n;
let deg_blinders = 11 / n;
let z_Vm: DensePolynomial<E::Fr> = pp.domain_m.vanishing_polynomial().into();
let mut vec_u_evals = vec_u_evals.to_vec();
//////////////////////////////////////////////////////////////////////////////////////////////////////////
// 1. Compute polynomials u_s(X) = vec_u_polys[s] such that u_s( nu_i ) =
// w_i^{2^s}
//////////////////////////////////////////////////////////////////////////////////////////////////////////
let step1_timer = start_timer!(|| "step 1");
let mut vec_u_polys = vec![
EvaluationsOnDomain::from_vec_and_domain(vec_u_evals.to_vec(), pp.domain_m).interpolate()
+ (&z_Vm * &u_poly_quotient),
];
for _ in 1..pp.domain_n.size() {
for u_eval in vec_u_evals.iter_mut() {
*u_eval = u_eval.square();
}
vec_u_polys.push(
EvaluationsOnDomain::from_vec_and_domain(vec_u_evals.to_vec(), pp.domain_m)
.interpolate()
+ (&z_Vm * &DensePolynomial::<E::Fr>::rand(deg_blinders, rng_arkworks)),
);
}
end_timer!(step1_timer);
//////////////////////////////////////////////////////////////////////////////////////////////////////////
// 2. Compute U_bar(X,Y) = sum_{s= 1}^n u_{s-1} rho_s(Y)
//////////////////////////////////////////////////////////////////////////////////////////////////////////
let step2_timer = start_timer!(|| "step 2");
// bivariate polynomials such that bipoly_U_bar[j] = a_j(Y) where U_bar(X,Y) =
// sum_j X^j a_j(Y)
let mut bipoly_U_bar = Vec::new();
// vec_u_polys[0] has an extended degree because it is blinded so use
// vec_u_polys[1] for the length
for j in 0..vec_u_polys[1].len() {
/*
Denoting u_{s-1}(X) = sum_j u_{s-1, j} X^j then
temp is a_j(Y) = sum_{s=1}^n u_{s-1, j} * rho_s(Y)
*/
let mut temp = DensePolynomial::from_coefficients_slice(&[E::Fr::zero()]);
for (s, u_poly) in vec_u_polys.iter().enumerate().take(n).skip(1) {
let u_s_j = DensePolynomial::from_coefficients_slice(&[u_poly[j]]);
temp += &(&u_s_j * &pp.lagrange_polynomials_n[s]);
}
// add a_j(X) to U_bar(X,Y)
bipoly_U_bar.push(temp);
}
end_timer!(step2_timer);
//////////////////////////////////////////////////////////////////////////////////////////////////////////
// 3. Hs(X) = u_{s-1}^2(X) - u_s(X)
//////////////////////////////////////////////////////////////////////////////////////////////////////////
let step3_timer = start_timer!(|| "step 3");
// id_poly(X) = 1 for omega_m in range and 0 for omega_m not in range.
let id_poly = pp.id_poly.clone();
// Hs(X) = (u_{s-1}^2(X) - u_s(X)) / zVm(X). Abort if doesn't divide.
let mut vec_H_s_polys: Vec<DensePolynomial<E::Fr>> = Vec::new();
for s in 1..n {
let (poly_H_s, remainder) = (&(&vec_u_polys[s - 1] * &vec_u_polys[s - 1])
- &vec_u_polys[s])
.divide_by_vanishing_poly(pp.domain_m)
.unwrap();
assert!(remainder.is_zero());
vec_H_s_polys.push(poly_H_s);
}
// Hn(X) = u_{n-1}^2(X) - id(X) / zVm(X). Abort if doesn't divide.
let (poly_H_s, remainder) = (&(&vec_u_polys[n - 1] * &vec_u_polys[n - 1]) - &id_poly)
.divide_by_vanishing_poly(pp.domain_m)
.unwrap();
assert!(remainder.is_zero());
vec_H_s_polys.push(poly_H_s);
end_timer!(step3_timer);
//////////////////////////////////////////////////////////////////////////////////////////////////////////
// 4. h_2(X,Y) = sum_{s=1}^n rho_s(Y) H_s(X)
//////////////////////////////////////////////////////////////////////////////////////////////////////////
let step4_timer = start_timer!(|| "step 4");
// h_2[j] = a_j(Y) where h_2(X,Y) = sum_j X^j a_j(Y)
let mut bipoly_h_2 = Vec::new();
// first add H_1(X) rho_1(Y)
for j in 0..vec_H_s_polys[0].len() {
let h_0_j = DensePolynomial::from_coefficients_slice(&[vec_H_s_polys[0][j]]);
bipoly_h_2.push(&h_0_j * &pp.lagrange_polynomials_n[0]);
}
// In case length of H_1(X) and H_2(X) is different pad with zeros.
for _ in vec_H_s_polys[0].len()..vec_H_s_polys[1].len() {
let h_0_j = DensePolynomial::from_coefficients_slice(&[E::Fr::zero()]);
bipoly_h_2.push(h_0_j);
}
// h_2(X,Y) = sum_j ( sum_s H_{s,j} * rho_s(Y) ) X^j
for (j, coeff) in bipoly_h_2
.iter_mut()
.enumerate()
.take(vec_H_s_polys[1].len())
{
// h_2[j] = sum_s H_{s,j} * rho_s(Y)
for (s, H_s_poly) in vec_H_s_polys.iter().enumerate().take(n).skip(1) {
let h_s_j = DensePolynomial::from_coefficients_slice(&[H_s_poly[j]]);
// h_2[j] += H_{s,j} * rho_s(Y)
*coeff += &(&h_s_j * &pp.lagrange_polynomials_n[s]);
}
}
end_timer!(step4_timer);
//////////////////////////////////////////////////////////////////////////////////////////////////////////
// 5. Commit to U_bar(X^n, X) and h_2(X^n, X)
//////////////////////////////////////////////////////////////////////////////////////////////////////////
let step5_timer = start_timer!(|| "step 5");
let g1_u_bar = KZGCommit::<E>::bipoly_commit(pp, &bipoly_U_bar, pp.domain_n.size());
let g1_h_2 = KZGCommit::<E>::bipoly_commit(pp, &bipoly_h_2, pp.domain_n.size());
end_timer!(step5_timer);
////////////////////////////
// 6. alpha = Hash(g1_u, g1_u_bar, g1_h_2)
////////////////////////////
let step6_timer = start_timer!(|| "step 6");
transcript.append_element(b"u", g1_u);
transcript.append_element(b"u_bar", &g1_u_bar);
transcript.append_element(b"h2", &g1_h_2);
let alpha = transcript.get_and_append_challenge(b"alpha");
end_timer!(step6_timer);
//////////////////////////////////////////////////////////////////////////////////////////////////////////
// 7. Compute h_1(Y)
//////////////////////////////////////////////////////////////////////////////////////////////////////////
let step7_timer = start_timer!(|| "step 7");
// poly_U_alpha = sum_{s=1}^n u_{s-1}(alpha) rho_s(Y)
let mut poly_U_alpha = DensePolynomial::from_coefficients_slice(&[E::Fr::zero()]);
// poly_Usq_alpha = sum_{s=1}^n u_{s-1}^2(alpha) rho_s(Y)
let mut poly_Usq_alpha = DensePolynomial::from_coefficients_slice(&[E::Fr::zero()]);
for (s, u_poly) in vec_u_polys.iter().enumerate().take(n) {
let u_s_alpha = u_poly.evaluate(&alpha);
let mut temp = DensePolynomial::from_coefficients_slice(&[u_s_alpha]);
poly_U_alpha += &(&temp * &pp.lagrange_polynomials_n[s]);
temp = DensePolynomial::from_coefficients_slice(&[u_s_alpha.square()]);
poly_Usq_alpha += &(&temp * &pp.lagrange_polynomials_n[s]);
}
// divide h1(Y) = [ U^2(alpha,Y) - sum_{s=1}^n u_{s-1}^2(alpha) rho_s(Y) ) ] /
// zVn(Y) return an error if division fails
let (poly_h_1, remainder) = (&(&poly_U_alpha * &poly_U_alpha) - &poly_Usq_alpha)
.divide_by_vanishing_poly(pp.domain_n)
.unwrap();
assert!(remainder.is_zero(), "poly_h_1 does not divide");
end_timer!(step7_timer);
//////////////////////////////////////////////////////////////////////////////////////////////////////////
// 8. Commit to h_1(Y)
//////////////////////////////////////////////////////////////////////////////////////////////////////////
let step8_timer = start_timer!(|| "step 8");
assert!(pp.poly_ck.powers_of_g.len() >= poly_h_1.len());
let g1_h_1 = VariableBaseMSM::multi_scalar_mul(
&pp.poly_ck.powers_of_g,
convert_to_bigints(&poly_h_1.coeffs).as_slice(),
)
.into_affine();
end_timer!(step8_timer);
////////////////////////////
// 9. beta = Hash( g1_h_1 )
////////////////////////////
let step9_timer = start_timer!(|| "step 9");
transcript.append_element(b"h1", &g1_h_1);
let beta = transcript.get_and_append_challenge(b"beta");
end_timer!(step9_timer);
//////////////////////////////////////////////////////////////////////////////////////////////////////////
// 10. Compute p(Y) = (U^2(alpha, beta) - h1(Y) zVn(beta) ) - (u_bar(alpha, beta
// sigma^(-1)) + id(alpha) rho_n(Y)) - zVm(alpha )h2(alpha,Y)
//////////////////////////////////////////////////////////////////////////////////////////////////////////
let step10_timer = start_timer!(|| "step 10");
// p(Y) = U^2(alpha, beta)
let u_alpha_beta = poly_U_alpha.evaluate(&beta);
let mut poly_p = DensePolynomial::from_coefficients_slice(&[u_alpha_beta.square()]);
////////////////////////////
// p(Y) = p(Y) - ( u_bar(alpha, beta sigma) + id(alpha) rho_n(beta))
// u_bar_alpha_shiftbeta = u_bar(alpha, beta sigma)
let mut u_bar_alpha_shiftbeta = E::Fr::zero();
let beta_shift = beta * pp.domain_n.element(1);
for (s, u_ploy) in vec_u_polys.iter().enumerate().take(n).skip(1) {
let u_s_alpha = u_ploy.evaluate(&alpha);
u_bar_alpha_shiftbeta += u_s_alpha * pp.lagrange_polynomials_n[s].evaluate(&beta_shift);
}
// temp = u_bar(alpha, beta sigma) + id(alpha) rho_n(beta)
let temp = u_bar_alpha_shiftbeta
+ (id_poly.evaluate(&alpha) * pp.lagrange_polynomials_n[n - 1].evaluate(&beta));
let temp = DensePolynomial::from_coefficients_slice(&[temp]);
poly_p = &poly_p - &temp;
////////////////////////////
// p(Y) = p(Y) - h1(Y) zVn(beta)
let z_Vn: DensePolynomial<E::Fr> = pp.domain_n.vanishing_polynomial().into();
let temp = &DensePolynomial::from_coefficients_slice(&[z_Vn.evaluate(&beta)]) * &poly_h_1;
poly_p = &poly_p - &temp;
////////////////////////////
// p(Y) = p(Y) - z_Vm(alpha) h_2(alpha, Y)
// poly_h_2_alpha = h_2(alpha, Y)
let mut poly_h_2_alpha = DensePolynomial::from_coefficients_slice(&[E::Fr::zero()]);
for (s, H_s_poly) in vec_H_s_polys.iter().enumerate() {
let h_s_j = DensePolynomial::from_coefficients_slice(&[H_s_poly.evaluate(&alpha)]);
poly_h_2_alpha = &poly_h_2_alpha + &(&h_s_j * &pp.lagrange_polynomials_n[s]);
}
let temp =
&DensePolynomial::from_coefficients_slice(&[z_Vm.evaluate(&alpha)]) * &poly_h_2_alpha;
poly_p = &poly_p - &temp;
// check p(beta) = 0
assert!(poly_p.evaluate(&beta) == E::Fr::zero());
end_timer!(step10_timer);
//////////////////////////////////////////////////////////////////////////////////////////////////////////
// 11. Open KZG commitments
//////////////////////////////////////////////////////////////////////////////////////////////////////////
let step11_timer = start_timer!(|| "step 11");
// KZG.Open( srs, u(X), deg = bot, X = alpha )
let (evals_1, pi_1) = KZGCommit::open_g1_batch(&pp.poly_ck, &vec_u_polys[0], None, &[alpha]);
// KZG.Open( srs, U_bar(X,Y), deg = bot, X = alpha )
let (g1_u_bar_alpha, pi_2, poly_u_bar_alpha) =
KZGCommit::partial_open_g1(pp, &bipoly_U_bar, pp.domain_n.size(), &alpha);
// KZG.Open( srs, h_2(X,Y), deg = bot, X = alpha )
let (g1_h_2_alpha, pi_3, _) =
KZGCommit::partial_open_g1(pp, &bipoly_h_2, pp.domain_n.size(), &alpha);
// KZG.Open( srs, U_bar(alpha,Y), deg = bot, Y = [1, beta, beta * sigma] )
// should evaluate to (0, v2, v3)
let (evals_2, pi_4) = KZGCommit::open_g1_batch(
&pp.poly_ck,
&poly_u_bar_alpha,
Some(&(pp.domain_n.size() - 1)),
&[E::Fr::one(), beta, beta * pp.domain_n.element(1)],
);
assert!(evals_2[0] == E::Fr::zero());
// KZG.Open(srs, p(Y), deg = n-1, Y = beta)
let (evals_3, pi_5) = KZGCommit::open_g1_batch(
&pp.poly_ck,
&poly_p,
Some(&(pp.domain_n.size() - 1)),
&[beta],
);
assert!(evals_3[0] == E::Fr::zero());
end_timer!(step11_timer);
end_timer!(timer);
ProofMultiUnity {
g1_u_bar,
g1_h_1,
g1_h_2,
g1_u_bar_alpha,
g1_h_2_alpha,
v1: evals_1[0],
v2: evals_2[1],
v3: evals_2[2],
pi_1,
pi_2,
pi_3,
pi_4,
pi_5,
}
}
// Verify that the prover knows vec_u_evals such that g1_u = g1^(sum_j u_j
// mu_j(x)) and u_j^N = 1
#[allow(non_snake_case)]
pub fn verify_multiunity<E: PairingEngine, R: RngCore>(
pp: &PublicParameters<E>,
transcript: &mut CaulkTranscript<E::Fr>,
g1_u: &E::G1Affine,
pi_unity: &ProofMultiUnity<E>,
rng: &mut R,
) -> bool {
let timer = start_timer!(|| "verify multiunity");
let mut pairing_inputs = verify_multiunity_defer_pairing(pp, transcript, g1_u, pi_unity);
assert_eq!(pairing_inputs.len(), 10);
let pairing_timer = start_timer!(|| "pairing product");
let mut zeta = E::Fr::rand(rng);
pairing_inputs[2].0.mul_assign(zeta);
pairing_inputs[3].0.mul_assign(zeta);
zeta.square_in_place();
pairing_inputs[4].0.mul_assign(zeta);
pairing_inputs[5].0.mul_assign(zeta);
zeta.square_in_place();
pairing_inputs[6].0.mul_assign(zeta);
pairing_inputs[7].0.mul_assign(zeta);
zeta.square_in_place();
pairing_inputs[8].0.mul_assign(zeta);
pairing_inputs[9].0.mul_assign(zeta);
let prepared_pairing_inputs: Vec<(E::G1Prepared, E::G2Prepared)> = pairing_inputs
.iter()
.map(|(g1, g2)| {
(
E::G1Prepared::from(g1.into_affine()),
E::G2Prepared::from(g2.into_affine()),
)
})
.collect();
let res = E::product_of_pairings(prepared_pairing_inputs.iter()).is_one();
end_timer!(pairing_timer);
end_timer!(timer);
res
}
// Verify that the prover knows vec_u_evals such that g1_u = g1^(sum_j u_j
// mu_j(x)) and u_j^N = 1
#[allow(non_snake_case)]
pub fn verify_multiunity_defer_pairing<E: PairingEngine>(
pp: &PublicParameters<E>,
transcript: &mut CaulkTranscript<E::Fr>,
g1_u: &E::G1Affine,
pi_unity: &ProofMultiUnity<E>,
) -> Vec<(E::G1Projective, E::G2Projective)> {
let timer = start_timer!(|| "verify multiunity (deferring pairing)");
////////////////////////////
// alpha = Hash(g1_u, g1_u_bar, g1_h_2)
////////////////////////////
transcript.append_element(b"u", g1_u);
transcript.append_element(b"u_bar", &pi_unity.g1_u_bar);
transcript.append_element(b"h2", &pi_unity.g1_h_2);
let alpha = transcript.get_and_append_challenge(b"alpha");
////////////////////////////
// beta = Hash( g1_h_1 )
////////////////////////////
transcript.append_element(b"h1", &pi_unity.g1_h_1);
let beta = transcript.get_and_append_challenge(b"beta");
/////////////////////////////
// Compute [P]_1
////////////////////////////
let u_alpha_beta = pi_unity.v1 * pp.lagrange_polynomials_n[0].evaluate(&beta) + pi_unity.v2;
// g1_P = [ U^2 - (v3 + id(alpha)* pn(beta) )]_1
let mut g1_P = pp.poly_ck.powers_of_g[0].mul(
u_alpha_beta * u_alpha_beta
- (pi_unity.v3
+ (pp.id_poly.evaluate(&alpha)
* pp.lagrange_polynomials_n[pp.n - 1].evaluate(&beta))),
);
// g1_P = g1_P - h1 zVn(beta)
let zVn = pp.domain_n.vanishing_polynomial();
g1_P -= pi_unity.g1_h_1.mul(zVn.evaluate(&beta));
// g1_P = g1_P - h2_alpha zVm(alpha)
let zVm = pp.domain_m.vanishing_polynomial();
g1_P -= pi_unity.g1_h_2_alpha.mul(zVm.evaluate(&alpha));
/////////////////////////////
// Check the KZG openings
////////////////////////////
let check1 = KZGCommit::<E>::verify_g1_defer_pairing(
&pp.poly_ck.powers_of_g,
&pp.g2_powers,
g1_u,
None,
&[alpha],
&[pi_unity.v1],
&pi_unity.pi_1,
);
let check2 = KZGCommit::partial_verify_g1_defer_pairing(
pp,
&pi_unity.g1_u_bar,
pp.domain_n.size(),
&alpha,
&pi_unity.g1_u_bar_alpha,
&pi_unity.pi_2,
);
let check3 = KZGCommit::partial_verify_g1_defer_pairing(
pp,
&pi_unity.g1_h_2,
pp.domain_n.size(),
&alpha,
&pi_unity.g1_h_2_alpha,
&pi_unity.pi_3,
);
let check4 = KZGCommit::<E>::verify_g1_defer_pairing(
&pp.poly_ck.powers_of_g,
&pp.g2_powers,
&pi_unity.g1_u_bar_alpha,
Some(&(pp.domain_n.size() - 1)),
&[E::Fr::one(), beta, beta * pp.domain_n.element(1)],
&[E::Fr::zero(), pi_unity.v2, pi_unity.v3],
&pi_unity.pi_4,
);
let check5 = KZGCommit::<E>::verify_g1_defer_pairing(
&pp.poly_ck.powers_of_g,
&pp.g2_powers,
&g1_P.into_affine(),
Some(&(pp.domain_n.size() - 1)),
&[beta],
&[E::Fr::zero()],
&pi_unity.pi_5,
);
let res = [
check1.as_slice(),
check2.as_slice(),
check3.as_slice(),
check4.as_slice(),
check5.as_slice(),
]
.concat();
end_timer!(timer);
res
}
#[cfg(test)]
pub mod tests {
use super::{prove_multiunity, verify_multiunity};
use crate::{util::convert_to_bigints, CaulkTranscript};
use ark_bls12_377::Bls12_377;
use ark_bls12_381::Bls12_381;
use ark_ec::{msm::VariableBaseMSM, PairingEngine, ProjectiveCurve};
use ark_poly::{
univariate::DensePolynomial, EvaluationDomain, Evaluations as EvaluationsOnDomain,
UVPolynomial,
};
use ark_std::test_rng;
use rand::Rng;
use std::time::Instant;
#[test]
fn test_unity() {
test_unity_helper::<Bls12_377>();
test_unity_helper::<Bls12_381>();
}
#[allow(non_snake_case)]
fn test_unity_helper<E: PairingEngine>() {
let mut rng = test_rng();
let n: usize = 8; // bitlength of poly degree
let max_degree: usize = (1 << n) + 2;
let N: usize = (1 << n) - 1;
let m_bitsize: usize = 3;
let m: usize = (1 << m_bitsize) - 1;
// run the setup
let now = Instant::now();
let pp = crate::multi::PublicParameters::<E>::setup(&max_degree, &N, &m, &n);
println!(
"time to setup single openings of table size {:?} = {:?}",
N + 1,
now.elapsed()
);
////////////////////////////////////////////////////////////////////////////////////
// generating values for testing
////////////////////////////////////////////////////////////////////////////////////
// choose [u1, ..., um] such that uj**N = 1
let mut vec_u_evals: Vec<E::Fr> = Vec::new();
for _ in 0..m {
let j = rng.gen_range(0..pp.domain_N.size());
vec_u_evals.push(pp.domain_N.element(j));
}
// choose random quotient polynomial of degree 1.
let u_poly_quotient = DensePolynomial::<E::Fr>::rand(5, &mut rng);
// X^m - 1
let z_Vm: DensePolynomial<E::Fr> = pp.domain_m.vanishing_polynomial().into();
// commit to polynomial u(X) = sum_j uj muj(X) + u_quotient(X) z_Vm(X)
let u_poly = &EvaluationsOnDomain::from_vec_and_domain(vec_u_evals.clone(), pp.domain_m)
.interpolate()
+ &(&u_poly_quotient * &z_Vm);
assert!(pp.poly_ck.powers_of_g.len() >= u_poly.len());
let g1_u = VariableBaseMSM::multi_scalar_mul(
&pp.poly_ck.powers_of_g,
convert_to_bigints(&u_poly.coeffs).as_slice(),
)
.into_affine();
////////////////////////////////////////////////////////////////////////////////////
// run the prover
////////////////////////////////////////////////////////////////////////////////////
let mut prover_transcript = CaulkTranscript::new();
let pi_unity = prove_multiunity::<E>(
&pp,
&mut prover_transcript,
&g1_u,
&vec_u_evals,
u_poly_quotient,
);
////////////////////////////////////////////////////////////////////////////////////
// run the verifier
////////////////////////////////////////////////////////////////////////////////////
let mut verifier_transcript = CaulkTranscript::new();
println!(
"unity proof verifies {:?}",
verify_multiunity::<E, _>(&pp, &mut verifier_transcript, &g1_u, &pi_unity, &mut rng)
);
}
}

75
src/pedersen.rs Normal file
View File

@@ -0,0 +1,75 @@
// This file includes a prover and verifier for demonstrating knowledge of an
// opening of a Pedersen commitment. The protocol is informally described in
// Appendix A.2, Proof of Opening of a Pedersen Commitment
use crate::CaulkTranscript;
use ark_ec::{AffineCurve, ProjectiveCurve};
use ark_ff::PrimeField;
use ark_std::{end_timer, rand::RngCore, start_timer, UniformRand};
use std::marker::PhantomData;
// Parameters for pedersen commitment
pub struct PedersenParam<C: AffineCurve> {
pub g: C,
pub h: C,
}
// Structure of proof output by prove_pedersen
pub struct PedersenProof<C: AffineCurve> {
pub g1_r: C,
pub t1: C::ScalarField,
pub t2: C::ScalarField,
}
pub struct PedersenCommit<C: AffineCurve> {
phantom: PhantomData<C>,
}
impl<C: AffineCurve> PedersenCommit<C> {
// prove knowledge of a and b such that cm = g^a h^b
pub fn prove<R: RngCore>(
param: &PedersenParam<C>,
transcript: &mut CaulkTranscript<C::ScalarField>,
cm: &C,
a: &C::ScalarField,
b: &C::ScalarField,
rng: &mut R,
) -> PedersenProof<C> {
let timer = start_timer!(|| "prove pedersen commit");
// R = g^s1 h^s2
let s1 = C::ScalarField::rand(rng);
let s2 = C::ScalarField::rand(rng);
let g1_r = (param.g.mul(s1) + param.h.mul(s2.into_repr())).into_affine();
// c = Hash(cm, R)
transcript.append_element(b"commitment", cm);
transcript.append_element(b"g1_r", &g1_r);
let c = transcript.get_and_append_challenge(b"get c");
let t1 = s1 + c * a;
let t2 = s2 + c * b;
end_timer!(timer);
PedersenProof { g1_r, t1, t2 }
}
// Verify that prover knows a and b such that cm = g^a h^b
pub fn verify(
param: &PedersenParam<C>,
transcript: &mut CaulkTranscript<C::ScalarField>,
cm: &C,
proof: &PedersenProof<C>,
) -> bool {
let timer = start_timer!(|| "verify pedersen commit");
// compute c = Hash(cm, R)
transcript.append_element(b"commitment", cm);
transcript.append_element(b"g1_r", &proof.g1_r);
let c = transcript.get_and_append_challenge(b"get c");
// check that R g^(-t1) h^(-t2) cm^(c) = 1
let res = proof.g1_r.into_projective() + cm.mul(c)
== param.g.mul(proof.t1) + param.h.mul(proof.t2);
end_timer!(timer);
res
}
}

295
src/single/mod.rs Normal file
View File

@@ -0,0 +1,295 @@
// This file includes the Caulk prover and verifier for single openings.
// The protocol is described in Figure 1.
pub mod setup;
pub mod unity;
use crate::{
pedersen::{PedersenCommit, PedersenProof},
CaulkTranscript,
};
use ark_ec::{AffineCurve, PairingEngine, ProjectiveCurve};
use ark_ff::{Field, PrimeField};
use ark_poly::{EvaluationDomain, GeneralEvaluationDomain};
use ark_std::{end_timer, rand::RngCore, start_timer, One, UniformRand, Zero};
use setup::{PublicParameters, VerifierPublicParameters};
use std::ops::Neg;
use unity::{
caulk_single_unity_prove, caulk_single_unity_verify, CaulkProofUnity, PublicParametersUnity,
VerifierPublicParametersUnity,
};
// Structure of opening proofs output by prove.
#[allow(non_snake_case)]
pub struct CaulkProof<E: PairingEngine> {
pub g2_z: E::G2Affine,
pub g1_T: E::G1Affine,
pub g2_S: E::G2Affine,
pub pi_ped: PedersenProof<E::G1Affine>,
pub pi_unity: CaulkProofUnity<E>,
}
// Proves knowledge of (i, Q, z, r) such that
// 1) Q is a KZG opening proof that g1_C opens to z at i
// 2) cm = g^z h^r
// Takes as input opening proof Q. Does not need knowledge of contents of C =
// g1_C.
#[allow(non_snake_case)]
#[allow(clippy::too_many_arguments)]
pub fn caulk_single_prove<E: PairingEngine, R: RngCore>(
pp: &PublicParameters<E>,
transcript: &mut CaulkTranscript<E::Fr>,
g1_C: &E::G1Affine,
cm: &E::G1Affine,
index: usize,
g1_q: &E::G1Affine,
v: &E::Fr,
r: &E::Fr,
rng: &mut R,
) -> CaulkProof<E> {
let timer = start_timer!(|| "single proof");
// provers blinders for zero-knowledge
let a = E::Fr::rand(rng);
let s = E::Fr::rand(rng);
let domain_H: GeneralEvaluationDomain<E::Fr> =
GeneralEvaluationDomain::new(pp.verifier_pp.domain_H_size).unwrap();
///////////////////////////////
// Compute [z]_2, [T]_1, and [S]_2
///////////////////////////////
// [z]_2 = [ a (x - omega^i) ]_2
let g2_z = (pp.verifier_pp.poly_vk.beta_h.mul(a)
+ pp.verifier_pp.poly_vk.h.mul(-a * domain_H.element(index)))
.into_affine();
// [T]_1 = [ ( a^(-1) Q + s h]_1 for Q precomputed KZG opening.
let g1_T =
(g1_q.mul(a.inverse().unwrap()) + pp.verifier_pp.pedersen_param.h.mul(s)).into_affine();
// [S]_2 = [ - r - s z ]_2
let g2_S = (pp.verifier_pp.poly_vk.h.mul((-*r).into_repr()) + g2_z.mul((-s).into_repr()))
.into_affine();
///////////////////////////////
// Pedersen prove
///////////////////////////////
// hash the instance and the proof elements to determine hash inputs for
// Pedersen prover
transcript.append_element(b"0", &E::Fr::zero());
transcript.append_element(b"C", g1_C);
transcript.append_element(b"T", &g1_T);
transcript.append_element(b"z", &g2_z);
transcript.append_element(b"S", &g2_S);
// proof that cm = g^z h^rs
let pi_ped = PedersenCommit::prove(&pp.verifier_pp.pedersen_param, transcript, cm, v, r, rng);
///////////////////////////////
// Unity prove
///////////////////////////////
// hash the last round of the pedersen proof to determine hash input to the
// unity prover
transcript.append_element(b"t1", &pi_ped.t1);
transcript.append_element(b"t2", &pi_ped.t2);
// Setting up the public parameters for the unity prover
let pp_unity = PublicParametersUnity::from(pp);
// proof that A = [a x - b ]_2 for a^n = b^n
let pi_unity = caulk_single_unity_prove(
&pp_unity,
transcript,
&g2_z,
&a,
&(a * domain_H.element(index)),
rng,
);
end_timer!(timer);
CaulkProof {
g2_z,
g1_T,
g2_S,
pi_ped,
pi_unity,
}
}
// Verifies that the prover knows of (i, Q, z, r) such that
// 1) Q is a KZG opening proof that g1_C opens to z at i
// 2) cm = g^z h^r
#[allow(non_snake_case)]
pub fn caulk_single_verify<E: PairingEngine>(
vk: &VerifierPublicParameters<E>,
transcript: &mut CaulkTranscript<E::Fr>,
g1_C: &E::G1Affine,
cm: &E::G1Affine,
proof: &CaulkProof<E>,
) -> bool {
let timer = start_timer!(|| "single verify");
///////////////////////////////
// Pairing check
///////////////////////////////
// check that e( - C + cm, [1]_2) + e( [T]_1, [z]_2 ) + e( [h]_1, [S]_2 ) = 1
let eq1: Vec<(E::G1Prepared, E::G2Prepared)> = vec![
((g1_C.neg() + *cm).into(), vk.poly_vk.prepared_h.clone()),
((proof.g1_T).into(), proof.g2_z.into()),
(vk.pedersen_param.h.into(), proof.g2_S.into()),
];
let check1 = E::product_of_pairings(&eq1).is_one();
///////////////////////////////
// Pedersen check
///////////////////////////////
// hash the instance and the proof elements to determine hash inputs for
// Pedersen prover
transcript.append_element(b"0", &E::Fr::zero());
transcript.append_element(b"C", g1_C);
transcript.append_element(b"T", &proof.g1_T);
transcript.append_element(b"z", &proof.g2_z);
transcript.append_element(b"S", &proof.g2_S);
// verify that cm = [v + r h]
let check2 = PedersenCommit::verify(&vk.pedersen_param, transcript, cm, &proof.pi_ped);
///////////////////////////////
// Unity check
///////////////////////////////
// hash the last round of the pedersen proof to determine hash input to the
// unity prover
transcript.append_element(b"t1", &proof.pi_ped.t1);
transcript.append_element(b"t2", &proof.pi_ped.t2);
let vk_unity = VerifierPublicParametersUnity::from(vk);
// Verify that g2_z = [ ax - b ]_1 for (a/b)**N = 1
let check3 = caulk_single_unity_verify(&vk_unity, transcript, &proof.g2_z, &proof.pi_unity);
end_timer!(timer);
check1 && check2 && check3
}
#[cfg(test)]
mod tests {
use crate::{
caulk_single_prove, caulk_single_setup, caulk_single_verify, CaulkTranscript, KZGCommit,
};
use ark_bls12_381::{Bls12_381, Fr, G1Affine};
use ark_ec::{AffineCurve, ProjectiveCurve};
use ark_poly::{
univariate::DensePolynomial, EvaluationDomain, GeneralEvaluationDomain, Polynomial,
UVPolynomial,
};
use ark_poly_commit::kzg10::KZG10;
use ark_std::{test_rng, UniformRand};
type UniPoly381 = DensePolynomial<Fr>;
type KzgBls12_381 = KZG10<Bls12_381, UniPoly381>;
#[test]
#[allow(non_snake_case)]
fn test_caulk_single_end_to_end() {
for p in 4..7 {
let mut rng = test_rng();
// setting public parameters
// current kzg setup should be changed with output from a setup ceremony
let max_degree: usize = (1 << p) + 2;
let actual_degree: usize = (1 << p) - 1;
// run the setup
let pp = caulk_single_setup(max_degree, actual_degree, &mut rng);
// polynomial and commitment
// deterministic randomness. Should never be used in practice.
let c_poly = UniPoly381::rand(actual_degree, &mut rng);
let (g1_C, _) = KzgBls12_381::commit(&pp.poly_ck, &c_poly, None, None).unwrap();
let g1_C = g1_C.0;
// point at which we will open c_com
let input_domain: GeneralEvaluationDomain<Fr> =
EvaluationDomain::new(actual_degree).unwrap();
let position = 1;
let omega_i = input_domain.element(position);
// z = c(w_i) and cm = g^z h^r for random r
let z = c_poly.evaluate(&omega_i);
let r = Fr::rand(&mut rng);
let cm = (pp.verifier_pp.pedersen_param.g.mul(z)
+ pp.verifier_pp.pedersen_param.h.mul(r))
.into_affine();
let mut prover_transcript = CaulkTranscript::<Fr>::new();
let mut verifier_transcript = CaulkTranscript::<Fr>::new();
// open single position at 0
{
let a = KZGCommit::open_g1_batch(&pp.poly_ck, &c_poly, None, &[omega_i]);
let g1_q = a.1;
// run the prover
let proof_evaluate = caulk_single_prove(
&pp,
&mut prover_transcript,
&g1_C,
&cm,
position,
&g1_q,
&z,
&r,
&mut rng,
);
// run the verifier
assert!(caulk_single_verify(
&pp.verifier_pp,
&mut verifier_transcript,
&g1_C,
&cm,
&proof_evaluate,
));
}
// compute all openings
{
let g1_qs = KZGCommit::<Bls12_381>::multiple_open::<G1Affine>(
&c_poly,
&pp.poly_ck.powers_of_g,
p,
);
let g1_q = g1_qs[position];
// run the prover
let proof_evaluate = caulk_single_prove(
&pp,
&mut prover_transcript,
&g1_C,
&cm,
position,
&g1_q,
&z,
&r,
&mut rng,
);
// run the verifier
assert!(caulk_single_verify(
&pp.verifier_pp,
&mut verifier_transcript,
&g1_C,
&cm,
&proof_evaluate,
));
}
}
}
}

187
src/single/setup.rs Normal file
View File

@@ -0,0 +1,187 @@
// This file includes the setup algorithm for Caulk with single openings.
// A full description of the setup is not formally given in the paper.
use crate::{util::trim, PedersenParam};
use ark_ec::{AffineCurve, PairingEngine, ProjectiveCurve};
use ark_ff::{Field, UniformRand};
use ark_poly::{
univariate::DensePolynomial, EvaluationDomain, Evaluations as EvaluationsOnDomain,
GeneralEvaluationDomain, UVPolynomial,
};
use ark_poly_commit::kzg10::*;
use ark_std::{cfg_into_iter, end_timer, rand::RngCore, start_timer, One, Zero};
#[cfg(feature = "parallel")]
use rayon::iter::{IntoParallelIterator, ParallelIterator};
use std::cmp::max;
// structure of public parameters
#[allow(non_snake_case)]
pub struct PublicParameters<E: PairingEngine> {
pub poly_ck: Powers<'static, E>,
pub poly_ck_d: E::G1Affine,
pub lagrange_polynomials_Vn: Vec<DensePolynomial<E::Fr>>,
pub verifier_pp: VerifierPublicParameters<E>,
}
// smaller set of public parameters used by verifier
#[allow(non_snake_case)]
pub struct VerifierPublicParameters<E: PairingEngine> {
pub poly_ck_pen: E::G1Affine,
pub lagrange_scalars_Vn: Vec<E::Fr>,
pub poly_prod: DensePolynomial<E::Fr>,
pub poly_vk: VerifierKey<E>,
pub pedersen_param: PedersenParam<E::G1Affine>,
pub g1_x: E::G1Affine,
pub domain_H_size: usize,
pub logN: usize,
pub domain_Vn: GeneralEvaluationDomain<E::Fr>,
pub powers_of_g2: Vec<E::G2Affine>,
}
// setup algorithm for Caulk with single openings
// also includes a bunch of precomputation.
#[allow(non_snake_case)]
pub fn caulk_single_setup<E: PairingEngine, R: RngCore>(
max_degree: usize,
actual_degree: usize,
rng: &mut R,
) -> PublicParameters<E> {
let total_time = start_timer!(|| "total srs setup");
// domain where vector commitment is defined
let domain_H: GeneralEvaluationDomain<E::Fr> =
GeneralEvaluationDomain::new(actual_degree).unwrap();
let logN: usize = ((actual_degree as f32).log(2.0)).ceil() as usize;
// smaller domain for unity proofs with generator w
let domain_Vn: GeneralEvaluationDomain<E::Fr> = GeneralEvaluationDomain::new(6 + logN).unwrap();
// Determining how big an srs we need.
// Need an srs of size actual_degree to commit to the polynomial.
// Need an srs of size 2 * domain_Vn_size + 3 to run the unity prover.
// We take the larger of the two.
let poly_ck_size = max(actual_degree, 2 * domain_Vn.size() + 3);
// Setup algorithm. To be replaced by output of a universal setup before being
// production ready.
let powers_time = start_timer!(|| "setup powers");
let srs = KZG10::<E, DensePolynomial<E::Fr>>::setup(max(max_degree, poly_ck_size), true, rng)
.unwrap();
end_timer!(powers_time);
// trim down to size.
let (poly_ck, poly_vk) = trim::<E, DensePolynomial<E::Fr>>(&srs, poly_ck_size);
// g^x^d = maximum power given in setup
let poly_ck_d = srs.powers_of_g[srs.powers_of_g.len() - 1];
// g^x^(d-1) = penultimate power given in setup
let poly_ck_pen = srs.powers_of_g[srs.powers_of_g.len() - 2];
// random pedersen commitment generatoor
let ped_h: E::G1Affine = E::G1Projective::rand(rng).into_affine();
// precomputation to speed up prover
// lagrange_polynomials_Vn[i] = polynomial equal to 0 at w^j for j!= i and 1 at
// w^i
let mut lagrange_polynomials_Vn: Vec<DensePolynomial<E::Fr>> = Vec::new();
// precomputation to speed up verifier.
// scalars such that lagrange_scalars_Vn[i] = prod_(j != i) (w^i - w^j)^(-1)
let mut lagrange_scalars_Vn: Vec<E::Fr> = Vec::new();
for i in 0..domain_Vn.size() {
let evals: Vec<E::Fr> = cfg_into_iter!(0..domain_Vn.size())
.map(|k| if k == i { E::Fr::one() } else { E::Fr::zero() })
.collect();
lagrange_polynomials_Vn
.push(EvaluationsOnDomain::from_vec_and_domain(evals, domain_Vn).interpolate());
}
for i in 0..5 {
let mut temp = E::Fr::one();
for j in 0..domain_Vn.size() {
if j != i {
temp *= domain_Vn.element(i) - domain_Vn.element(j);
}
}
lagrange_scalars_Vn.push(temp.inverse().unwrap());
}
// also want lagrange_scalars_Vn[logN + 5]
let mut temp = E::Fr::one();
for j in 0..domain_Vn.size() {
if j != (logN + 5) {
temp *= domain_Vn.element(logN + 5) - domain_Vn.element(j);
}
}
lagrange_scalars_Vn.push(temp.inverse().unwrap());
// poly_prod = (X - 1) (X - w) (X - w^2) (X - w^3) (X - w^4) (X - w^(5 + logN))
// (X - w^(6 + logN)) for efficiency not including (X - w^i) for i > 6 +
// logN prover sets these evaluations to 0 anyway.
let mut poly_prod = DensePolynomial::from_coefficients_slice(&[E::Fr::one()]);
for i in 0..domain_Vn.size() {
if i < 5 {
poly_prod = &poly_prod
* (&DensePolynomial::from_coefficients_slice(&[
-domain_Vn.element(i),
E::Fr::one(),
]))
}
if i == (5 + logN) {
poly_prod = &poly_prod
* (&DensePolynomial::from_coefficients_slice(&[
-domain_Vn.element(i),
E::Fr::one(),
]))
}
if i == (6 + logN) {
poly_prod = &poly_prod
* (&DensePolynomial::from_coefficients_slice(&[
-domain_Vn.element(i),
E::Fr::one(),
]))
}
}
// ped_g = g^x^0 from kzg commitment key.
let ped_g = poly_ck.powers_of_g[0];
// need some powers of g2
// arkworks setup doesn't give these powers but the setup does use a fixed
// randomness to generate them. so we can generate powers of g2 directly.
let rng = &mut ark_std::test_rng();
let beta = E::Fr::rand(rng);
let mut temp = poly_vk.h;
let mut powers_of_g2: Vec<E::G2Affine> = Vec::new();
for _ in 0..3 {
powers_of_g2.push(temp);
temp = temp.mul(beta).into_affine();
}
let verifier_pp = VerifierPublicParameters {
poly_ck_pen,
lagrange_scalars_Vn,
poly_prod,
poly_vk,
pedersen_param: PedersenParam { g: ped_g, h: ped_h },
g1_x: srs.powers_of_g[1],
domain_H_size: domain_H.size(),
logN,
domain_Vn,
powers_of_g2,
};
let pp = PublicParameters {
poly_ck,
poly_ck_d,
lagrange_polynomials_Vn,
verifier_pp,
};
end_timer!(total_time);
pp
}

421
src/single/unity.rs Normal file
View File

@@ -0,0 +1,421 @@
// This file includes the Caulk's unity prover and verifier for single openings.
// The protocol is described in Figure 2.
use super::setup::{PublicParameters, VerifierPublicParameters};
use crate::{kzg::KZGCommit, CaulkTranscript};
use ark_ec::{AffineCurve, PairingEngine, ProjectiveCurve};
use ark_ff::Field;
use ark_poly::{
univariate::DensePolynomial, EvaluationDomain, Evaluations as EvaluationsOnDomain,
GeneralEvaluationDomain, Polynomial, UVPolynomial,
};
use ark_poly_commit::kzg10::*;
use ark_std::{cfg_into_iter, end_timer, rand::RngCore, start_timer, One, UniformRand, Zero};
#[cfg(feature = "parallel")]
use rayon::iter::{IntoParallelIterator, ParallelIterator};
// prover public parameters structure for caulk_single_unity_prove
#[allow(non_snake_case)]
pub struct PublicParametersUnity<E: PairingEngine> {
pub poly_ck: Powers<'static, E>,
pub gxd: E::G1Affine,
pub gxpen: E::G1Affine,
pub lagrange_polynomials_Vn: Vec<DensePolynomial<E::Fr>>,
pub poly_prod: DensePolynomial<E::Fr>,
pub logN: usize,
pub domain_Vn: GeneralEvaluationDomain<E::Fr>,
}
// verifier parameters structure for caulk_single_unity_verify
#[allow(non_snake_case)]
pub struct VerifierPublicParametersUnity<E: PairingEngine> {
pub poly_vk: VerifierKey<E>,
pub gxpen: E::G1Affine,
pub g1: E::G1Affine,
pub g1_x: E::G1Affine,
pub lagrange_scalars_Vn: Vec<E::Fr>,
pub poly_prod: DensePolynomial<E::Fr>,
pub logN: usize,
pub domain_Vn: GeneralEvaluationDomain<E::Fr>,
pub powers_of_g2: Vec<E::G2Affine>,
}
// output structure of caulk_single_unity_prove
#[allow(non_snake_case)]
pub struct CaulkProofUnity<E: PairingEngine> {
pub g1_F: E::G1Affine,
pub g1_H: E::G1Affine,
pub v1: E::Fr,
pub v2: E::Fr,
pub pi1: E::G1Affine,
pub pi2: E::G1Affine,
}
impl<E: PairingEngine> From<&PublicParameters<E>> for PublicParametersUnity<E> {
fn from(pp: &PublicParameters<E>) -> Self {
Self {
poly_ck: pp.poly_ck.clone(),
gxd: pp.poly_ck_d,
gxpen: pp.verifier_pp.poly_ck_pen,
lagrange_polynomials_Vn: pp.lagrange_polynomials_Vn.clone(),
poly_prod: pp.verifier_pp.poly_prod.clone(),
logN: pp.verifier_pp.logN,
domain_Vn: pp.verifier_pp.domain_Vn,
}
}
}
impl<E: PairingEngine> From<&VerifierPublicParameters<E>> for VerifierPublicParametersUnity<E> {
fn from(vk: &VerifierPublicParameters<E>) -> Self {
Self {
poly_vk: vk.poly_vk.clone(),
gxpen: vk.poly_ck_pen,
g1: vk.pedersen_param.g,
g1_x: vk.g1_x,
lagrange_scalars_Vn: vk.lagrange_scalars_Vn.clone(),
poly_prod: vk.poly_prod.clone(),
logN: vk.logN,
domain_Vn: vk.domain_Vn,
powers_of_g2: vk.powers_of_g2.clone(),
}
}
}
// Prove knowledge of a, b such that g2_z = [ax - b]_2 and a^n = b^n
#[allow(non_snake_case)]
pub fn caulk_single_unity_prove<E: PairingEngine, R: RngCore>(
pp: &PublicParametersUnity<E>,
transcript: &mut CaulkTranscript<E::Fr>,
g2_z: &E::G2Affine,
a: &E::Fr,
b: &E::Fr,
rng: &mut R,
) -> CaulkProofUnity<E> {
let timer = start_timer!(|| "single unity prove");
// a_poly = a X - b
let a_poly = DensePolynomial::from_coefficients_slice(&[-*b, *a]);
// provers blinders for zero-knowledge
let r0 = E::Fr::rand(rng);
let r1 = E::Fr::rand(rng);
let r2 = E::Fr::rand(rng);
let r3 = E::Fr::rand(rng);
let r_poly = DensePolynomial::from_coefficients_slice(&[r1, r2, r3]);
// roots of unity in domain of size m = log_2(n) + 6
let sigma = pp.domain_Vn.element(1);
// X^n - 1
let z: DensePolynomial<E::Fr> = pp.domain_Vn.vanishing_polynomial().into();
// computing [ (a/b), (a/b)^2, (a/b)^4, ..., (a/b)^(2^logN) = (a/b)^n ]
let mut a_div_b = *a * (*b).inverse().unwrap();
let mut vec_a_div_b: Vec<E::Fr> = Vec::new();
for _ in 0..(pp.logN + 1) {
vec_a_div_b.push(a_div_b);
a_div_b = a_div_b * a_div_b;
}
////////////////////////////
// computing f(X). First compute in domain.
////////////////////////////
let f_evals: Vec<E::Fr> = cfg_into_iter!(0..pp.domain_Vn.size())
.map(|k| {
if k == 0 {
*a - *b
} else if k == 1 {
*a * sigma - *b
} else if k == 2 {
*a
} else if k == 3 {
*b
} else if k > 3 && k < (pp.logN + 5) {
vec_a_div_b[k - 4]
} else if k == pp.logN + 5 {
r0
} else {
E::Fr::zero()
}
})
.collect();
let f_poly = &EvaluationsOnDomain::from_vec_and_domain(f_evals, pp.domain_Vn).interpolate()
+ &(&r_poly * &z);
// computing f( sigma^(-1) X) and f( sigma^(-2) X)
let mut f_poly_shift_1 = f_poly.clone();
let mut f_poly_shift_2 = f_poly.clone();
let mut shift_1 = E::Fr::one();
let mut shift_2 = E::Fr::one();
for i in 0..f_poly.len() {
f_poly_shift_1[i] *= shift_1;
f_poly_shift_2[i] *= shift_2;
shift_1 *= pp.domain_Vn.element(pp.domain_Vn.size() - 1);
shift_2 *= pp.domain_Vn.element(pp.domain_Vn.size() - 2);
}
////////////////////////////
// computing h(X). First compute p(X) then divide.
////////////////////////////
// p(X) = p(X) + (f(X) - a(X)) (rho_1(X) + rho_2(X))
let mut p_poly =
&(&f_poly - &a_poly) * &(&pp.lagrange_polynomials_Vn[0] + &pp.lagrange_polynomials_Vn[1]);
// p(X) = p(X) + ( (1 - sigma) f(X) - f(sigma^(-2)X) + f(sigma^(-1) X) )
// rho_3(X)
p_poly = &p_poly
+ &(&(&(&(&DensePolynomial::from_coefficients_slice(&[(E::Fr::one() - sigma)])
* &f_poly)
- &f_poly_shift_2)
+ &f_poly_shift_1)
* &pp.lagrange_polynomials_Vn[2]);
// p(X) = p(X) + ( -sigma f(sigma^(-1) X) + f(sigma^(-2)X) + f(X) ) rho_4(X)
p_poly = &p_poly
+ &(&(&(&(&DensePolynomial::from_coefficients_slice(&[-sigma]) * &f_poly_shift_1)
+ &f_poly_shift_2)
+ &f_poly)
* &pp.lagrange_polynomials_Vn[3]);
// p(X) = p(X) + ( f(X) f(sigma^(-1) X) - f(sigma^(-2)X) ) rho_5(X)
p_poly = &p_poly
+ &(&(&(&f_poly * &f_poly_shift_1) - &f_poly_shift_2) * &pp.lagrange_polynomials_Vn[4]);
// p(X) = p(X) + ( f(X) - f(sigma^(-1) X) * f(sigma^(-1)X) ) prod_(i not in
// [5, .. , logN + 4]) (X - sigma^i)
p_poly = &p_poly + &(&(&f_poly - &(&f_poly_shift_1 * &f_poly_shift_1)) * &pp.poly_prod);
// p(X) = p(X) + ( f(sigma^(-1) X) - 1 ) rho_(logN + 6)(X)
p_poly = &p_poly
+ &(&(&f_poly_shift_1 - &(DensePolynomial::from_coefficients_slice(&[E::Fr::one()])))
* &pp.lagrange_polynomials_Vn[pp.logN + 5]);
// Compute h_hat_poly = p(X) / z_Vn(X) and abort if division is not perfect
let (h_hat_poly, remainder) = p_poly.divide_by_vanishing_poly(pp.domain_Vn).unwrap();
assert!(remainder.is_zero(), "z_Vn(X) does not divide p(X)");
////////////////////////////
// Commit to f(X) and h(X)
////////////////////////////
let g1_F = KZGCommit::<E>::commit_g1(&pp.poly_ck, &f_poly);
let h_hat_com = KZGCommit::<E>::commit_g1(&pp.poly_ck, &h_hat_poly);
// g1_H is a commitment to h_hat_poly + X^(d-1) z(X)
let g1_H = (h_hat_com.into_projective() + pp.gxd.mul(-*a) + pp.gxpen.mul(*b)).into_affine();
////////////////////////////
// alpha = Hash([z]_2, [F]_1, [H]_1)
////////////////////////////
transcript.append_element(b"F", &g1_F);
transcript.append_element(b"H", &g1_H);
transcript.append_element(b"z", g2_z);
let alpha = transcript.get_and_append_challenge(b"alpha");
////////////////////////////
// v1 = f(sigma^(-1) alpha) and v2 = f(w^(-2) alpha)
////////////////////////////
let alpha1 = alpha * pp.domain_Vn.element(pp.domain_Vn.size() - 1);
let alpha2 = alpha * pp.domain_Vn.element(pp.domain_Vn.size() - 2);
let v1 = f_poly.evaluate(&alpha1);
let v2 = f_poly.evaluate(&alpha2);
////////////////////////////
// Compute polynomial p_alpha(X) that opens at alpha to 0
////////////////////////////
// restating some field elements as polynomials so that can multiply polynomials
let pz_alpha = DensePolynomial::from_coefficients_slice(&[-z.evaluate(&alpha)]);
let pv1 = DensePolynomial::from_coefficients_slice(&[v1]);
let pv2 = DensePolynomial::from_coefficients_slice(&[v2]);
let prho1_add_2 = DensePolynomial::from_coefficients_slice(&[pp.lagrange_polynomials_Vn[0]
.evaluate(&alpha)
+ pp.lagrange_polynomials_Vn[1].evaluate(&alpha)]);
let prho3 =
DensePolynomial::from_coefficients_slice(&[pp.lagrange_polynomials_Vn[2].evaluate(&alpha)]);
let prho4 =
DensePolynomial::from_coefficients_slice(&[pp.lagrange_polynomials_Vn[3].evaluate(&alpha)]);
let prho5 =
DensePolynomial::from_coefficients_slice(&[pp.lagrange_polynomials_Vn[4].evaluate(&alpha)]);
let ppolyprod = DensePolynomial::from_coefficients_slice(&[pp.poly_prod.evaluate(&alpha)]);
let prhologN6 = DensePolynomial::from_coefficients_slice(&[pp.lagrange_polynomials_Vn
[pp.logN + 5]
.evaluate(&alpha)]);
// p_alpha(X) = - zVn(alpha) h(X)
let mut p_alpha_poly = &pz_alpha * &h_hat_poly;
// p_alpha(X) = p_alpha(X) + ( f(X) - z(X) )(rho1(alpha) + rho2(alpha))
p_alpha_poly += &(&(&f_poly - &a_poly) * &prho1_add_2);
// p_alpha(X) = p_alpha(X) + ( (1-sigma) f(X) - v2 + v1 ) rho3(alpha)
p_alpha_poly +=
&(&(&(&(&DensePolynomial::from_coefficients_slice(&[(E::Fr::one() - sigma)]) * &f_poly)
- &pv2)
+ &pv1)
* &prho3);
// p_alpha(X) = p_alpha(X) + ( f(X) + v2 - sigma v1 ) rho4(alpha)
p_alpha_poly += &(&(&(&(&DensePolynomial::from_coefficients_slice(&[-sigma]) * &pv1) + &pv2)
+ &f_poly)
* &prho4);
// p_alpha(X) = p_alpha(X) + ( v1 f(X) - v2 ) rho5(alpha)
p_alpha_poly += &(&(&(&f_poly * &pv1) - &pv2) * &prho5);
// p_alpha(X) = p_alpha(X) + ( f(X) - v1^2 ) prod_(i not in [5, .. , logN +
// 4]) (alpha - sigma^i)
p_alpha_poly += &(&(&f_poly - &(&pv1 * &pv1)) * &ppolyprod);
// Differing slightly from paper
// Paper uses p_alpha(X) = p_alpha(X) + ( v1 - 1 ) rho_(n)(alpha) assuming that
// logN = n - 6 We use p_alpha(X) = p_alpha(X) + ( v1 - 1 ) rho_(logN +
// 6)(alpha) to allow for any value of logN
p_alpha_poly +=
&(&(&pv1 - &(DensePolynomial::from_coefficients_slice(&[E::Fr::one()]))) * &prhologN6);
////////////////////////////
// Compute opening proofs
////////////////////////////
// KZG.Open(srs_KZG, f(X), deg = bot, (alpha1, alpha2))
let (_evals1, pi1) =
KZGCommit::open_g1_batch(&pp.poly_ck, &f_poly, None, [alpha1, alpha2].as_ref());
// KZG.Open(srs_KZG, p_alpha(X), deg = bot, alpha)
let (evals2, pi2) = KZGCommit::open_g1_batch(&pp.poly_ck, &p_alpha_poly, None, &[alpha]);
// abort if p_alpha( alpha) != 0
assert!(
evals2[0] == E::Fr::zero(),
"p_alpha(X) does not equal 0 at alpha"
);
end_timer!(timer);
CaulkProofUnity {
g1_F,
g1_H,
v1,
v2,
pi1,
pi2,
}
}
// Verify that the prover knows a, b such that g2_z = g2^(a x - b) and a^n = b^n
#[allow(non_snake_case)]
pub fn caulk_single_unity_verify<E: PairingEngine>(
vk: &VerifierPublicParametersUnity<E>,
transcript: &mut CaulkTranscript<E::Fr>,
g2_z: &E::G2Affine,
proof: &CaulkProofUnity<E>,
) -> bool {
let timer = start_timer!(|| "single unity verify");
// g2_z must not be the identity
assert!(!g2_z.is_zero(), "g2_z is the identity");
// roots of unity in domain of size m = log1_2(n) + 6
let sigma = vk.domain_Vn.element(1);
let v1 = proof.v1;
let v2 = proof.v2;
////////////////////////////
// alpha = Hash(A, F, H)
////////////////////////////
transcript.append_element(b"F", &proof.g1_F);
transcript.append_element(b"H", &proof.g1_H);
transcript.append_element(b"z", g2_z);
let alpha = transcript.get_and_append_challenge(b"alpha");
// alpha1 = sigma^(-1) alpha and alpha2 = sigma^(-2) alpha
let alpha1: E::Fr = alpha * vk.domain_Vn.element(vk.domain_Vn.size() - 1);
let alpha2: E::Fr = alpha * vk.domain_Vn.element(vk.domain_Vn.size() - 2);
///////////////////////////////
// KZG opening check
///////////////////////////////
let check1 = KZGCommit::<E>::verify_g1(
[vk.g1, vk.g1_x].as_ref(),
&vk.powers_of_g2,
&proof.g1_F,
None,
[alpha1, alpha2].as_ref(),
[proof.v1, proof.v2].as_ref(),
&proof.pi1,
);
///////////////////////////////
// Compute P = commitment to p_alpha(X)
///////////////////////////////
// Useful field elements.
// zalpha = z(alpha) = alpha^n - 1,
let zalpha = vk.domain_Vn.vanishing_polynomial().evaluate(&alpha);
// rhoi = L_i(alpha) = ls_i * [(X^m - 1) / (alpha - w^i) ]
// where ls_i = lagrange_scalars_Vn[i] = prod_{j neq i} (w_i - w_j)^(-1)
let rho0 =
zalpha * (alpha - vk.domain_Vn.element(0)).inverse().unwrap() * vk.lagrange_scalars_Vn[0];
let rho1 =
zalpha * (alpha - vk.domain_Vn.element(1)).inverse().unwrap() * vk.lagrange_scalars_Vn[1];
let rho2 =
zalpha * (alpha - vk.domain_Vn.element(2)).inverse().unwrap() * vk.lagrange_scalars_Vn[2];
let rho3 =
zalpha * (alpha - vk.domain_Vn.element(3)).inverse().unwrap() * vk.lagrange_scalars_Vn[3];
let rho4 =
zalpha * (alpha - vk.domain_Vn.element(4)).inverse().unwrap() * vk.lagrange_scalars_Vn[4];
let rhologN5 = zalpha
* (alpha - vk.domain_Vn.element(vk.logN + 5))
.inverse()
.unwrap()
* vk.lagrange_scalars_Vn[5];
// pprod = prod_(i not in [5,..,logN+4]) (alpha - w^i)
let pprod = vk.poly_prod.evaluate(&alpha);
///////////////////////////////
// Pairing checks
///////////////////////////////
// P = H^(-z(alpha)) * F^(rho0(alpha) + L_1(alpha) + (1 - w)L_2(alpha) +
// L_3(alpha) + v1 L_4(alpha) + prod_(i not in
// [5,..,logN+4]) (alpha - w^i))
// * g^( (v1 -v2)L_2(alpha) + (v2 - w v1)L_3(alpha) - v2
// L_4(alpha) + (v1 - 1)L_(logN+5)(alpha)
// - v1^2 * prod_(i not in [5,..,logN+4]) (alpha - w^i) )
let g1_p = proof.g1_H.mul(-zalpha)
+ proof
.g1_F
.mul(rho0 + rho1 + (E::Fr::one() - sigma) * rho2 + rho3 + v1 * rho4 + pprod)
+ vk.g1.mul(
(v1 - v2) * rho2 + (v2 - sigma * v1) * rho3 - v2 * rho4
+ (v1 - E::Fr::one()) * rhologN5
- v1 * v1 * pprod,
);
let g1_q = proof.pi2;
// check that e(P Q3^(-alpha), g2)e( g^(-(rho0 + rho1) - zH(alpha) x^(d-1)), A )
// e( Q3, g2^x ) = 1 Had to move A from affine to projective and back to
// affine to get it to compile. No idea what difference this makes.
let eq1 = vec![
(
(g1_p + g1_q.mul(alpha)).into_affine().into(),
vk.poly_vk.prepared_h.clone(),
),
(
((vk.g1.mul(-rho0 - rho1) + vk.gxpen.mul(-zalpha)).into_affine()).into(),
(*g2_z).into(),
),
((-g1_q).into(), vk.poly_vk.prepared_beta_h.clone()),
];
let check2 = E::product_of_pairings(&eq1).is_one();
end_timer!(timer);
check1 && check2
}

40
src/transcript.rs Normal file
View File

@@ -0,0 +1,40 @@
use ark_ff::PrimeField;
use ark_serialize::CanonicalSerialize;
use merlin::Transcript;
use std::marker::PhantomData;
pub struct CaulkTranscript<F: PrimeField> {
transcript: Transcript,
phantom: PhantomData<F>,
}
impl<F: PrimeField> Default for CaulkTranscript<F> {
fn default() -> Self {
Self::new()
}
}
impl<F: PrimeField> CaulkTranscript<F> {
pub fn new() -> Self {
Self {
transcript: Transcript::new(b"Init Caulk transcript"),
phantom: PhantomData::default(),
}
}
/// Get a uniform random field element for field size < 384
pub fn get_and_append_challenge(&mut self, label: &'static [u8]) -> F {
let mut bytes = [0u8; 64];
self.transcript.challenge_bytes(label, &mut bytes);
let challenge = F::from_le_bytes_mod_order(bytes.as_ref());
self.append_element(b"append challenge", &challenge);
challenge
}
/// Append a field/group element to the transcript
pub fn append_element<T: CanonicalSerialize>(&mut self, label: &'static [u8], r: &T) {
let mut buf = vec![];
r.serialize(&mut buf).unwrap();
self.transcript.append_message(label, buf.as_ref());
}
}

46
src/util.rs Normal file
View File

@@ -0,0 +1,46 @@
use ark_ec::PairingEngine;
use ark_ff::PrimeField;
use ark_poly::UVPolynomial;
use ark_poly_commit::kzg10::*;
#[cfg(feature = "parallel")]
use rayon::iter::{IntoParallelRefIterator, ParallelIterator};
// Reduces full srs down to smaller srs for smaller polynomials
// Copied from arkworks library (where same function is private)
pub(crate) fn trim<E: PairingEngine, P: UVPolynomial<E::Fr>>(
srs: &UniversalParams<E>,
mut supported_degree: usize,
) -> (Powers<'static, E>, VerifierKey<E>) {
if supported_degree == 1 {
supported_degree += 1;
}
let powers_of_g = srs.powers_of_g[..=supported_degree].to_vec();
let powers_of_gamma_g = (0..=supported_degree)
.map(|i| srs.powers_of_gamma_g[&i])
.collect();
let powers = Powers {
powers_of_g: ark_std::borrow::Cow::Owned(powers_of_g),
powers_of_gamma_g: ark_std::borrow::Cow::Owned(powers_of_gamma_g),
};
let vk = VerifierKey {
g: srs.powers_of_g[0],
gamma_g: srs.powers_of_gamma_g[&0],
h: srs.h,
beta_h: srs.beta_h,
prepared_h: srs.prepared_h.clone(),
prepared_beta_h: srs.prepared_beta_h.clone(),
};
(powers, vk)
}
////////////////////////////////////////////////
//
// copied from arkworks
pub(crate) fn convert_to_bigints<F: PrimeField>(p: &[F]) -> Vec<F::BigInt> {
ark_std::cfg_iter!(p)
.map(|s| s.into_repr())
.collect::<Vec<_>>()
}