mirror of
https://github.com/caulk-crypto/caulk.git
synced 2026-01-08 21:07:56 -05:00
refactoring single and multi open into a same crate
This commit is contained in:
5
.gitignore
vendored
5
.gitignore
vendored
@@ -9,5 +9,6 @@ Cargo.lock
|
||||
# These are backup files generated by rustfmt
|
||||
**/*.rs.bk
|
||||
|
||||
caulk_multi_lookup/target
|
||||
caulk_single_opening/target
|
||||
srs/
|
||||
polys/
|
||||
tmp/
|
||||
|
||||
45
Cargo.toml
45
Cargo.toml
@@ -1,6 +1,41 @@
|
||||
[workspace]
|
||||
[package]
|
||||
name = "caulk"
|
||||
version = "0.1.0"
|
||||
authors = ["mmaller <mary.maller@ethereum.org>"]
|
||||
edition = "2018"
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[dependencies]
|
||||
ark-ff = { version = "^0.3.0", default-features = false }
|
||||
ark-ec = { version = "^0.3.0", default-features = false }
|
||||
ark-serialize = { version = "^0.3.0", default-features = false, features = [ "derive" ] }
|
||||
ark-poly = { version = "^0.3.0", default-features = false }
|
||||
ark-std = { version = "^0.3.0", default-features = false }
|
||||
ark-r1cs-std = { version = "^0.3.0", default-features = false, optional = true }
|
||||
ark-bls12-381 = { version = "^0.3.0", features = [ "std" ] }
|
||||
ark-bls12-377 = { version = "^0.3.0", features = [ "std" ] }
|
||||
ark-poly-commit = { version = "^0.3.0", default-features = false }
|
||||
|
||||
tracing = { version = "0.1", default-features = false, features = [ "attributes" ], optional = true }
|
||||
derivative = { version = "2.0", features = ["use_core"], optional = true}
|
||||
rand = "0.8.5"
|
||||
rand_chacha = { version = "0.3.1" }
|
||||
thiserror = "1.0.19"
|
||||
blake2s_simd = "1.0.0"
|
||||
|
||||
rayon = { version = "1.5.2", default-features = false, optional = true }
|
||||
merlin = { version = "3.0.0" }
|
||||
|
||||
[features]
|
||||
asm = [ "ark-ff/asm" ]
|
||||
parallel = [
|
||||
"rayon",
|
||||
"ark-std/parallel",
|
||||
"ark-ff/parallel",
|
||||
"ark-poly/parallel"
|
||||
]
|
||||
print-trace = [
|
||||
"ark-std/print-trace"
|
||||
]
|
||||
|
||||
members = [
|
||||
"caulk_multi_lookup",
|
||||
"caulk_single_opening"
|
||||
]
|
||||
@@ -1,31 +0,0 @@
|
||||
[package]
|
||||
name = "caulk_multi_lookup"
|
||||
authors = ["mmaller <mary.maller@ethereum.org>", "khovratovich <khovratovich@gmail.com>"]
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[dependencies]
|
||||
ark-ff = { version = "^0.3.0", default-features = false }
|
||||
ark-ec = { version = "^0.3.0", default-features = false }
|
||||
ark-serialize = { version = "^0.3.0", default-features = false, features = [ "derive" ] }
|
||||
ark-poly = { version = "^0.3.0", default-features = false }
|
||||
ark-std = { version = "^0.3.0", default-features = false }
|
||||
ark-relations = { version = "^0.3.0", default-features = false }
|
||||
ark-crypto-primitives = { version = "^0.3.0", default-features = false }
|
||||
ark-r1cs-std = { version = "^0.3.0", default-features = false, optional = true }
|
||||
ark-bls12-381 = { version = "^0.3.0", features = [ "std" ] }
|
||||
ark-poly-commit = { version = "^0.3.0", default-features = false }
|
||||
ark-marlin = { version = "^0.3.0", default-features = false }
|
||||
|
||||
tracing = { version = "0.1", default-features = false, features = [ "attributes" ], optional = true }
|
||||
derivative = { version = "2.0", features = ["use_core"], optional = true}
|
||||
rand = "0.7.3"
|
||||
rand_chacha = { version = "0.2.1" }
|
||||
thiserror = "1.0.19"
|
||||
blake2s_simd = "0.5.10"
|
||||
|
||||
|
||||
[features]
|
||||
asm = [ "ark-ff/asm" ]
|
||||
@@ -1,382 +0,0 @@
|
||||
/*
|
||||
This file includes the setup algorithm for Caulk with multi openings.
|
||||
A full description of the setup is not formally given in the paper.
|
||||
*/
|
||||
|
||||
use ark_bls12_381::{Bls12_381, Fr, FrParameters, G1Affine, G2Affine};
|
||||
use ark_ec::{bls12::Bls12, AffineCurve, PairingEngine, ProjectiveCurve};
|
||||
use ark_ff::{Fp256, UniformRand};
|
||||
use ark_poly::{
|
||||
EvaluationDomain, Evaluations as EvaluationsOnDomain, GeneralEvaluationDomain, UVPolynomial,
|
||||
};
|
||||
use ark_poly_commit::kzg10::*;
|
||||
use ark_serialize::{CanonicalDeserialize, CanonicalSerialize};
|
||||
|
||||
use crate::tools::{KzgBls12_381, UniPoly381};
|
||||
use ark_std::{cfg_into_iter, One, Zero};
|
||||
use std::{fs::File, io::Read, time::Instant};
|
||||
|
||||
// structure of public parameters
|
||||
#[allow(non_snake_case)]
|
||||
pub struct PublicParameters {
|
||||
pub poly_ck: Powers<'static, Bls12<ark_bls12_381::Parameters>>,
|
||||
pub poly_vk: VerifierKey<Bls12<ark_bls12_381::Parameters>>,
|
||||
pub domain_m: GeneralEvaluationDomain<Fr>,
|
||||
pub domain_n: GeneralEvaluationDomain<Fr>,
|
||||
pub domain_N: GeneralEvaluationDomain<Fr>,
|
||||
pub verifier_pp: VerifierPublicParameters,
|
||||
pub lagrange_polynomials_n: Vec<UniPoly381>,
|
||||
pub lagrange_polynomials_m: Vec<UniPoly381>,
|
||||
pub id_poly: UniPoly381,
|
||||
pub N: usize,
|
||||
pub m: usize,
|
||||
pub n: usize,
|
||||
pub g2_powers: Vec<G2Affine>,
|
||||
}
|
||||
|
||||
pub struct LookupParameters {
|
||||
m: usize,
|
||||
lagrange_polynomials_m: Vec<UniPoly381>,
|
||||
domain_m: GeneralEvaluationDomain<Fr>,
|
||||
id_poly: UniPoly381,
|
||||
}
|
||||
|
||||
impl PublicParameters {
|
||||
pub fn regenerate_lookup_params(&mut self, m: usize) {
|
||||
let lp = generate_lookup_params(m);
|
||||
self.m = lp.m;
|
||||
self.lagrange_polynomials_m = lp.lagrange_polynomials_m;
|
||||
self.domain_m = lp.domain_m;
|
||||
self.id_poly = lp.id_poly;
|
||||
}
|
||||
|
||||
//store powers of g in a file
|
||||
pub fn store(&self, path: &str) {
|
||||
use std::io::Write;
|
||||
|
||||
//1. Powers of g
|
||||
let mut g_bytes = vec![];
|
||||
let mut f = File::create(path).expect("Unable to create file");
|
||||
let deg: u32 = self.poly_ck.powers_of_g.len().try_into().unwrap();
|
||||
let deg_bytes = deg.to_be_bytes();
|
||||
f.write_all(°_bytes).expect("Unable to write data");
|
||||
let deg32: usize = deg.try_into().unwrap();
|
||||
for i in 0..deg32 {
|
||||
self.poly_ck.powers_of_g[i]
|
||||
.into_projective()
|
||||
.into_affine()
|
||||
.serialize_uncompressed(&mut g_bytes)
|
||||
.ok();
|
||||
}
|
||||
f.write_all(&g_bytes).expect("Unable to write data");
|
||||
|
||||
//2. Powers of gammag
|
||||
let deg_gamma: u32 = self.poly_ck.powers_of_gamma_g.len().try_into().unwrap();
|
||||
let mut gg_bytes = vec![];
|
||||
let deg_bytes = deg_gamma.to_be_bytes();
|
||||
f.write_all(°_bytes).expect("Unable to write data");
|
||||
let deg32: usize = deg.try_into().unwrap();
|
||||
for i in 0..deg32 {
|
||||
self.poly_ck.powers_of_gamma_g[i]
|
||||
.into_projective()
|
||||
.into_affine()
|
||||
.serialize_uncompressed(&mut gg_bytes)
|
||||
.ok();
|
||||
}
|
||||
f.write_all(&gg_bytes).expect("Unable to write data");
|
||||
|
||||
//3. Verifier key
|
||||
let mut h_bytes = vec![];
|
||||
self.poly_vk.h.serialize_uncompressed(&mut h_bytes).ok();
|
||||
self.poly_vk
|
||||
.beta_h
|
||||
.serialize_uncompressed(&mut h_bytes)
|
||||
.ok();
|
||||
f.write_all(&h_bytes).expect("Unable to write data");
|
||||
|
||||
//4. g2 powers
|
||||
let mut g2_bytes = vec![];
|
||||
let deg2: u32 = self.g2_powers.len().try_into().unwrap();
|
||||
let deg2_bytes = deg2.to_be_bytes();
|
||||
f.write_all(°2_bytes).expect("Unable to write data");
|
||||
let deg2_32: usize = deg2.try_into().unwrap();
|
||||
for i in 0..deg2_32 {
|
||||
self.g2_powers[i]
|
||||
.into_projective()
|
||||
.into_affine()
|
||||
.serialize_uncompressed(&mut g2_bytes)
|
||||
.ok();
|
||||
}
|
||||
f.write_all(&g2_bytes).expect("Unable to write data");
|
||||
}
|
||||
|
||||
//load powers of g from a file
|
||||
pub fn load(
|
||||
path: &str,
|
||||
) -> (
|
||||
Powers<'static, Bls12<ark_bls12_381::Parameters>>,
|
||||
VerifierKey<Bls12<ark_bls12_381::Parameters>>,
|
||||
Vec<G2Affine>,
|
||||
) {
|
||||
const G1_UNCOMPR_SIZE: usize = 96;
|
||||
const G2_UNCOMPR_SIZE: usize = 192;
|
||||
let mut data = Vec::new();
|
||||
let mut f = File::open(path).expect("Unable to open file");
|
||||
f.read_to_end(&mut data).expect("Unable to read data");
|
||||
|
||||
//1. reading g powers
|
||||
let mut cur_counter: usize = 0;
|
||||
let deg_bytes: [u8; 4] = (&data[0..4]).try_into().unwrap();
|
||||
let deg: u32 = u32::from_be_bytes(deg_bytes);
|
||||
let mut powers_of_g = vec![];
|
||||
let deg32: usize = deg.try_into().unwrap();
|
||||
cur_counter += 4;
|
||||
for i in 0..deg32 {
|
||||
let buf_bytes =
|
||||
&data[cur_counter + i * G1_UNCOMPR_SIZE..cur_counter + (i + 1) * G1_UNCOMPR_SIZE];
|
||||
let tmp = G1Affine::deserialize_unchecked(buf_bytes).unwrap();
|
||||
powers_of_g.push(tmp);
|
||||
}
|
||||
cur_counter += deg32 * G1_UNCOMPR_SIZE;
|
||||
|
||||
//2. reading gamma g powers
|
||||
let deg_bytes: [u8; 4] = (&data[cur_counter..cur_counter + 4]).try_into().unwrap();
|
||||
let deg: u32 = u32::from_be_bytes(deg_bytes);
|
||||
let mut powers_of_gamma_g = vec![];
|
||||
let deg32: usize = deg.try_into().unwrap();
|
||||
cur_counter += 4;
|
||||
for i in 0..deg32 {
|
||||
let buf_bytes =
|
||||
&data[cur_counter + i * G1_UNCOMPR_SIZE..cur_counter + (i + 1) * G1_UNCOMPR_SIZE];
|
||||
let tmp = G1Affine::deserialize_unchecked(buf_bytes).unwrap();
|
||||
powers_of_gamma_g.push(tmp);
|
||||
}
|
||||
cur_counter += deg32 * G1_UNCOMPR_SIZE;
|
||||
|
||||
//3. reading verifier key
|
||||
let buf_bytes = &data[cur_counter..cur_counter + G2_UNCOMPR_SIZE];
|
||||
let h = G2Affine::deserialize_unchecked(buf_bytes).unwrap();
|
||||
cur_counter += G2_UNCOMPR_SIZE;
|
||||
let buf_bytes = &data[cur_counter..cur_counter + G2_UNCOMPR_SIZE];
|
||||
let beta_h = G2Affine::deserialize_unchecked(buf_bytes).unwrap();
|
||||
cur_counter += G2_UNCOMPR_SIZE;
|
||||
|
||||
//4. reading G2 powers
|
||||
let deg2_bytes: [u8; 4] = (&data[cur_counter..cur_counter + 4]).try_into().unwrap();
|
||||
let deg2: u32 = u32::from_be_bytes(deg2_bytes);
|
||||
let mut g2_powers = vec![];
|
||||
let deg2_32: usize = deg2.try_into().unwrap();
|
||||
cur_counter += 4;
|
||||
for _ in 0..deg2_32 {
|
||||
let buf_bytes = &data[cur_counter..cur_counter + G2_UNCOMPR_SIZE];
|
||||
let tmp = G2Affine::deserialize_unchecked(buf_bytes).unwrap();
|
||||
g2_powers.push(tmp);
|
||||
cur_counter += G2_UNCOMPR_SIZE;
|
||||
}
|
||||
|
||||
let vk = VerifierKey {
|
||||
g: powers_of_g[0].clone(),
|
||||
gamma_g: powers_of_gamma_g[0].clone(),
|
||||
h: h,
|
||||
beta_h: beta_h,
|
||||
prepared_h: h.into(),
|
||||
prepared_beta_h: beta_h.into(),
|
||||
};
|
||||
|
||||
let powers = Powers {
|
||||
powers_of_g: ark_std::borrow::Cow::Owned(powers_of_g),
|
||||
powers_of_gamma_g: ark_std::borrow::Cow::Owned(powers_of_gamma_g),
|
||||
};
|
||||
|
||||
(powers, vk, g2_powers)
|
||||
}
|
||||
}
|
||||
|
||||
// smaller set of public parameters used by verifier
|
||||
pub struct VerifierPublicParameters {
|
||||
pub poly_vk: VerifierKey<Bls12<ark_bls12_381::Parameters>>,
|
||||
pub domain_m_size: usize,
|
||||
}
|
||||
|
||||
fn generate_lookup_params(m: usize) -> LookupParameters {
|
||||
let domain_m: GeneralEvaluationDomain<Fr> = GeneralEvaluationDomain::new(m.clone()).unwrap();
|
||||
|
||||
// id_poly(X) = 1 for omega_m in range and 0 for omega_m not in range.
|
||||
let mut id_vec = Vec::new();
|
||||
for _ in 0..m.clone() {
|
||||
id_vec.push(Fr::one());
|
||||
}
|
||||
for _ in m.clone()..domain_m.size() {
|
||||
id_vec.push(Fr::zero());
|
||||
}
|
||||
let id_poly = EvaluationsOnDomain::from_vec_and_domain(id_vec, domain_m).interpolate();
|
||||
let mut lagrange_polynomials_m: Vec<UniPoly381> = Vec::new();
|
||||
|
||||
for i in 0..domain_m.size() {
|
||||
let evals: Vec<Fp256<FrParameters>> = cfg_into_iter!(0..domain_m.size())
|
||||
.map(|k| if k == i { Fr::one() } else { Fr::zero() })
|
||||
.collect();
|
||||
lagrange_polynomials_m
|
||||
.push(EvaluationsOnDomain::from_vec_and_domain(evals, domain_m).interpolate());
|
||||
}
|
||||
|
||||
return LookupParameters {
|
||||
m: m,
|
||||
lagrange_polynomials_m: lagrange_polynomials_m,
|
||||
domain_m: domain_m,
|
||||
id_poly: id_poly,
|
||||
};
|
||||
}
|
||||
|
||||
// Reduces full srs down to smaller srs for smaller polynomials
|
||||
// Copied from arkworks library (where same function is private)
|
||||
fn trim<E: PairingEngine, P: UVPolynomial<E::Fr>>(
|
||||
srs: UniversalParams<E>,
|
||||
mut supported_degree: usize,
|
||||
) -> (Powers<'static, E>, VerifierKey<E>) {
|
||||
if supported_degree == 1 {
|
||||
supported_degree += 1;
|
||||
}
|
||||
let pp = srs.clone();
|
||||
let powers_of_g = pp.powers_of_g[..=supported_degree].to_vec();
|
||||
let powers_of_gamma_g = (0..=supported_degree)
|
||||
.map(|i| pp.powers_of_gamma_g[&i])
|
||||
.collect();
|
||||
|
||||
let powers = Powers {
|
||||
powers_of_g: ark_std::borrow::Cow::Owned(powers_of_g),
|
||||
powers_of_gamma_g: ark_std::borrow::Cow::Owned(powers_of_gamma_g),
|
||||
};
|
||||
let vk = VerifierKey {
|
||||
g: pp.powers_of_g[0],
|
||||
gamma_g: pp.powers_of_gamma_g[&0],
|
||||
h: pp.h,
|
||||
beta_h: pp.beta_h,
|
||||
prepared_h: pp.prepared_h.clone(),
|
||||
prepared_beta_h: pp.prepared_beta_h.clone(),
|
||||
};
|
||||
(powers, vk)
|
||||
}
|
||||
|
||||
// setup algorithm for index_hiding_polycommit
|
||||
// also includes a bunch of precomputation.
|
||||
// @max_degree max degree of table polynomial C(X), also the size of the trusted setup
|
||||
// @N domain size on which proofs are constructed. Should not be smaller than max_degree
|
||||
// @m lookup size. Can be changed later
|
||||
// @n suppl domain for the unity proofs. Should be at least 6+log N
|
||||
#[allow(non_snake_case)]
|
||||
pub fn setup_multi_lookup(max_degree: &usize, N: &usize, m: &usize, n: &usize) -> PublicParameters {
|
||||
let rng = &mut ark_std::test_rng();
|
||||
|
||||
// Setup algorithm. To be replaced by output of a universal setup before being production ready.
|
||||
|
||||
//let mut srs = KzgBls12_381::setup(4, true, rng).unwrap();
|
||||
let poly_ck: Powers<'static, Bls12<ark_bls12_381::Parameters>>;
|
||||
let poly_vk: VerifierKey<Bls12<ark_bls12_381::Parameters>>;
|
||||
let mut g2_powers: Vec<G2Affine> = Vec::new();
|
||||
|
||||
//try opening the file. If it exists load the setup from there, otherwise generate
|
||||
let path = format!("srs/srs_{}.setup", max_degree);
|
||||
let res = File::open(path.clone());
|
||||
let store_to_file: bool;
|
||||
match res {
|
||||
Ok(_) => {
|
||||
let now = Instant::now();
|
||||
let (_poly_ck, _poly_vk, _g2_powers) = PublicParameters::load(&path);
|
||||
println!("time to load powers = {:?}", now.elapsed());
|
||||
store_to_file = false;
|
||||
g2_powers = _g2_powers;
|
||||
poly_ck = _poly_ck;
|
||||
poly_vk = _poly_vk;
|
||||
}
|
||||
Err(_) => {
|
||||
let now = Instant::now();
|
||||
let srs = KzgBls12_381::setup(max_degree.clone(), true, rng).unwrap();
|
||||
println!("time to setup powers = {:?}", now.elapsed());
|
||||
|
||||
// trim down to size
|
||||
let (poly_ck2, poly_vk2) = trim::<Bls12_381, UniPoly381>(srs, max_degree.clone());
|
||||
poly_ck = Powers {
|
||||
powers_of_g: ark_std::borrow::Cow::Owned(poly_ck2.powers_of_g.into()),
|
||||
powers_of_gamma_g: ark_std::borrow::Cow::Owned(poly_ck2.powers_of_gamma_g.into()),
|
||||
};
|
||||
poly_vk = poly_vk2;
|
||||
|
||||
// need some powers of g2
|
||||
// arkworks setup doesn't give these powers but the setup does use a fixed randomness to generate them.
|
||||
// so we can generate powers of g2 directly.
|
||||
let rng = &mut ark_std::test_rng();
|
||||
let beta: Fp256<FrParameters> = Fr::rand(rng);
|
||||
let mut temp = poly_vk.h.clone();
|
||||
|
||||
for _ in 0..poly_ck.powers_of_g.len() {
|
||||
g2_powers.push(temp.clone());
|
||||
temp = temp.mul(beta).into_affine();
|
||||
}
|
||||
|
||||
store_to_file = true;
|
||||
}
|
||||
}
|
||||
|
||||
// domain where openings {w_i}_{i in I} are embedded
|
||||
let domain_n: GeneralEvaluationDomain<Fr> = GeneralEvaluationDomain::new(n.clone()).unwrap();
|
||||
let domain_N: GeneralEvaluationDomain<Fr> = GeneralEvaluationDomain::new(N.clone()).unwrap();
|
||||
|
||||
// precomputation to speed up prover
|
||||
// lagrange_polynomials[i] = polynomial equal to 0 at w^j for j!= i and 1 at w^i
|
||||
let mut lagrange_polynomials_n: Vec<UniPoly381> = Vec::new();
|
||||
|
||||
for i in 0..domain_n.size() {
|
||||
let evals: Vec<Fp256<FrParameters>> = cfg_into_iter!(0..domain_n.size())
|
||||
.map(|k| if k == i { Fr::one() } else { Fr::zero() })
|
||||
.collect();
|
||||
lagrange_polynomials_n
|
||||
.push(EvaluationsOnDomain::from_vec_and_domain(evals, domain_n).interpolate());
|
||||
}
|
||||
|
||||
let lp = generate_lookup_params(m.clone());
|
||||
|
||||
let verifier_pp = VerifierPublicParameters {
|
||||
poly_vk: poly_vk.clone(),
|
||||
domain_m_size: lp.domain_m.size(),
|
||||
};
|
||||
|
||||
let pp = PublicParameters {
|
||||
poly_ck: poly_ck,
|
||||
domain_m: lp.domain_m,
|
||||
domain_n: domain_n,
|
||||
lagrange_polynomials_n: lagrange_polynomials_n,
|
||||
lagrange_polynomials_m: lp.lagrange_polynomials_m,
|
||||
id_poly: lp.id_poly,
|
||||
domain_N: domain_N,
|
||||
poly_vk: poly_vk,
|
||||
verifier_pp: verifier_pp,
|
||||
N: N.clone(),
|
||||
n: n.clone(),
|
||||
m: lp.m.clone(),
|
||||
g2_powers: g2_powers.clone(),
|
||||
};
|
||||
if store_to_file {
|
||||
pp.store(&path);
|
||||
}
|
||||
return pp;
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[allow(non_snake_case)]
|
||||
pub fn test_load() {
|
||||
let n: usize = 4;
|
||||
let N: usize = 1 << n;
|
||||
let powers_size: usize = 4 * N; //SRS SIZE
|
||||
let temp_m = n; //dummy
|
||||
let pp = setup_multi_lookup(&powers_size, &N, &temp_m, &n);
|
||||
let path = "powers.log";
|
||||
pp.store(path);
|
||||
let loaded = PublicParameters::load(path);
|
||||
assert_eq!(pp.poly_ck.powers_of_g, loaded.0.powers_of_g);
|
||||
assert_eq!(pp.poly_ck.powers_of_gamma_g, loaded.0.powers_of_gamma_g);
|
||||
assert_eq!(pp.poly_vk.h, loaded.1.h);
|
||||
assert_eq!(pp.poly_vk.beta_h, loaded.1.beta_h);
|
||||
assert_eq!(pp.g2_powers, loaded.2);
|
||||
std::fs::remove_file(&path).expect("File can not be deleted");
|
||||
}
|
||||
@@ -1,394 +0,0 @@
|
||||
/*
|
||||
This file includes an algorithm for calculating n openings of a KZG vector commitment of size n in n log(n) time.
|
||||
The algorithm is by Feist and khovratovich.
|
||||
It is useful for preprocessing.
|
||||
The full algorithm is described here https://github.com/khovratovich/Kate/blob/master/Kate_amortized.pdf
|
||||
*/
|
||||
|
||||
use std::str::FromStr;
|
||||
//use std::time::{Instant};
|
||||
use std::vec::Vec;
|
||||
|
||||
use ark_bls12_381::{Fr, FrParameters, G2Affine, G2Projective};
|
||||
use ark_ec::{AffineCurve, ProjectiveCurve};
|
||||
use ark_ff::{Field, Fp256, PrimeField};
|
||||
use ark_poly::{
|
||||
univariate::DensePolynomial, EvaluationDomain, GeneralEvaluationDomain, UVPolynomial,
|
||||
};
|
||||
|
||||
pub fn compute_h_opt_g2(
|
||||
c_poly: &DensePolynomial<Fp256<FrParameters>>, //c(X) degree up to d<2^p , i.e. c_poly has at most d+1 coeffs non-zero
|
||||
g2powers: &Vec<G2Affine>, //SRS
|
||||
p: usize,
|
||||
) -> Vec<G2Projective> {
|
||||
let mut coeffs = c_poly.coeffs().to_vec();
|
||||
let dom_size = 1 << p;
|
||||
let fpzero = Fp256::from_str("0").unwrap();
|
||||
coeffs.resize(dom_size, fpzero);
|
||||
|
||||
//let now = Instant::now();
|
||||
//1. x_ext = [[x^(d-1)], [x^{d-2},...,[x],[1], d+2 [0]'s]
|
||||
let mut x_ext = vec![];
|
||||
for i in 0..=dom_size - 2 {
|
||||
x_ext.push(g2powers[dom_size - 2 - i].into_projective());
|
||||
}
|
||||
let g1inf = g2powers[0].mul(fpzero);
|
||||
x_ext.resize(2 * dom_size, g1inf); //filling 2d+2 neutral elements
|
||||
|
||||
let y = dft_g2_opt(&x_ext, p + 1);
|
||||
//println!("Step 1 computed in {:?}", now.elapsed());
|
||||
|
||||
//2. c_ext = [c_d, d zeroes, c_d,c_{0},c_1,...,c_{d-2},c_{d-1}]
|
||||
//let now = Instant::now();
|
||||
let mut c_ext = vec![];
|
||||
c_ext.push(coeffs[coeffs.len() - 1]);
|
||||
c_ext.resize(dom_size, fpzero);
|
||||
c_ext.push(coeffs[coeffs.len() - 1]);
|
||||
for i in 0..coeffs.len() - 1 {
|
||||
c_ext.push(coeffs[i]);
|
||||
}
|
||||
assert_eq!(c_ext.len(), 2 * dom_size);
|
||||
let v = dft_opt(&c_ext, p + 1);
|
||||
//println!("Step 2 computed in {:?}", now.elapsed());
|
||||
|
||||
//3. u = y o v
|
||||
|
||||
//let now = Instant::now();
|
||||
let u = y
|
||||
.into_iter()
|
||||
.zip(v.into_iter())
|
||||
.map(|(a, b)| a.mul(b.into_repr()))
|
||||
.collect();
|
||||
// println!("Step 3 computed in {:?}", now.elapsed());
|
||||
|
||||
//4. h_ext = idft_{2d+2}(u)
|
||||
//let now = Instant::now();
|
||||
let h_ext = idft_g2_opt(&u, p + 1);
|
||||
//println!("Step 4 computed in {:?}", now.elapsed());
|
||||
|
||||
return h_ext[0..dom_size].to_vec();
|
||||
}
|
||||
|
||||
//compute dft of size @dom_size over vector of G1 elements
|
||||
//q_i = h_0 + h_1w^i + h_2w^{2i}+\cdots + h_{dom_size-1}w^{(dom_size-1)i} for 0<= i< dom_size=2^p
|
||||
pub fn dft_g2_opt(h: &Vec<G2Projective>, p: usize) -> Vec<G2Projective> {
|
||||
let dom_size = 1 << p;
|
||||
assert_eq!(h.len(), dom_size); //we do not support inputs of size not power of 2
|
||||
let input_domain: GeneralEvaluationDomain<Fr> = EvaluationDomain::new(dom_size).unwrap();
|
||||
let mut l = dom_size / 2;
|
||||
let mut m: usize = 1;
|
||||
//Stockham FFT
|
||||
let mut xprev = h.to_vec();
|
||||
for _ in 1..=p {
|
||||
let mut xnext = vec![];
|
||||
xnext.resize(xprev.len(), h[0]);
|
||||
for j in 0..l {
|
||||
for k in 0..m {
|
||||
let c0 = xprev[k + j * m].clone();
|
||||
let c1 = &xprev[k + j * m + l * m];
|
||||
xnext[k + 2 * j * m] = c0 + c1;
|
||||
let wj_2l = input_domain.element((j * dom_size / (2 * l)) % dom_size);
|
||||
xnext[k + 2 * j * m + m] = (c0 - c1).mul(wj_2l.into_repr());
|
||||
}
|
||||
}
|
||||
l = l / 2;
|
||||
m = m * 2;
|
||||
xprev = xnext;
|
||||
}
|
||||
return xprev;
|
||||
}
|
||||
|
||||
//compute dft of size @dom_size over vector of Fr elements
|
||||
//q_i = h_0 + h_1w^i + h_2w^{2i}+\cdots + h_{dom_size-1}w^{(dom_size-1)i} for 0<= i< dom_size=2^p
|
||||
pub fn dft_opt(h: &Vec<Fr>, p: usize) -> Vec<Fr> {
|
||||
let dom_size = 1 << p;
|
||||
assert_eq!(h.len(), dom_size); //we do not support inputs of size not power of 2
|
||||
let input_domain: GeneralEvaluationDomain<Fr> = EvaluationDomain::new(dom_size).unwrap();
|
||||
let mut l = dom_size / 2;
|
||||
let mut m: usize = 1;
|
||||
//Stockham FFT
|
||||
let mut xprev = h.to_vec();
|
||||
for _ in 1..=p {
|
||||
let mut xnext = vec![];
|
||||
xnext.resize(xprev.len(), h[0]);
|
||||
for j in 0..l {
|
||||
for k in 0..m {
|
||||
let c0 = xprev[k + j * m].clone();
|
||||
let c1 = &xprev[k + j * m + l * m];
|
||||
xnext[k + 2 * j * m] = c0 + c1;
|
||||
let wj_2l = input_domain.element((j * dom_size / (2 * l)) % dom_size);
|
||||
xnext[k + 2 * j * m + m] = (c0 - c1) * (wj_2l);
|
||||
}
|
||||
}
|
||||
l = l / 2;
|
||||
m = m * 2;
|
||||
xprev = xnext;
|
||||
}
|
||||
return xprev;
|
||||
}
|
||||
|
||||
//compute idft of size @dom_size over vector of G1 elements
|
||||
//q_i = (h_0 + h_1w^-i + h_2w^{-2i}+\cdots + h_{dom_size-1}w^{-(dom_size-1)i})/dom_size for 0<= i< dom_size=2^p
|
||||
pub fn idft_g2_opt(h: &Vec<G2Projective>, p: usize) -> Vec<G2Projective> {
|
||||
let dom_size = 1 << p;
|
||||
assert_eq!(h.len(), dom_size); //we do not support inputs of size not power of 2
|
||||
let input_domain: GeneralEvaluationDomain<Fr> = EvaluationDomain::new(dom_size).unwrap();
|
||||
let mut l = dom_size / 2;
|
||||
let mut m: usize = 1;
|
||||
let mut dom_fr = Fr::from_str("1").unwrap();
|
||||
//Stockham FFT
|
||||
let mut xprev = h.to_vec();
|
||||
for _ in 1..=p {
|
||||
let mut xnext = vec![];
|
||||
xnext.resize(xprev.len(), h[0]);
|
||||
for j in 0..l {
|
||||
for k in 0..m {
|
||||
let c0 = xprev[k + j * m].clone();
|
||||
let c1 = &xprev[k + j * m + l * m];
|
||||
xnext[k + 2 * j * m] = c0 + c1;
|
||||
let wj_2l = input_domain
|
||||
.element((dom_size - (j * dom_size / (2 * l)) % dom_size) % dom_size);
|
||||
xnext[k + 2 * j * m + m] = (c0 - c1).mul(wj_2l.into_repr()); //Difference #1 to forward dft
|
||||
}
|
||||
}
|
||||
l = l / 2;
|
||||
m = m * 2;
|
||||
dom_fr = dom_fr + dom_fr;
|
||||
xprev = xnext;
|
||||
}
|
||||
let res = xprev
|
||||
.iter()
|
||||
.map(|x| x.mul(dom_fr.inverse().unwrap().into_repr()))
|
||||
.collect();
|
||||
return res;
|
||||
}
|
||||
|
||||
//compute all openings to c_poly using a smart formula
|
||||
pub fn multiple_open_g2(
|
||||
g2powers: &Vec<G2Affine>, //SRS
|
||||
c_poly: &DensePolynomial<Fp256<FrParameters>>, //c(X)
|
||||
p: usize,
|
||||
) -> Vec<G2Affine> {
|
||||
let degree = c_poly.coeffs.len() - 1;
|
||||
let input_domain: GeneralEvaluationDomain<Fr> = EvaluationDomain::new(degree).unwrap();
|
||||
|
||||
//let now = Instant::now();
|
||||
let h2 = compute_h_opt_g2(c_poly, g2powers, p);
|
||||
//println!("H2 computed in {:?}", now.elapsed());
|
||||
//assert_eq!(h,h2);
|
||||
|
||||
let dom_size = input_domain.size();
|
||||
assert_eq!(1 << p, dom_size);
|
||||
assert_eq!(degree + 1, dom_size);
|
||||
/*let now = Instant::now();
|
||||
let q = dftG1(&h,p);
|
||||
println!("Q computed in {:?}", now.elapsed());*/
|
||||
|
||||
//let now = Instant::now();
|
||||
let q2 = dft_g2_opt(&h2, p);
|
||||
//println!("Q2 computed in {:?}", now.elapsed());
|
||||
//assert_eq!(q,q2);
|
||||
|
||||
let mut res: Vec<G2Affine> = vec![];
|
||||
for i in 0..dom_size {
|
||||
res.push(q2[i].into_affine());
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub mod tests {
|
||||
|
||||
use std::time::Instant;
|
||||
|
||||
use ark_bls12_381::{Bls12_381, G1Affine, G1Projective};
|
||||
use ark_ec::{AffineCurve, ProjectiveCurve};
|
||||
use ark_ff::Fp256;
|
||||
use ark_poly::univariate::DensePolynomial;
|
||||
use ark_poly::EvaluationDomain;
|
||||
use ark_poly_commit::kzg10::*;
|
||||
use ark_poly_commit::UVPolynomial;
|
||||
use ark_std::One;
|
||||
|
||||
use crate::caulk_multi_setup::{setup_multi_lookup, PublicParameters};
|
||||
use crate::multiopen::*;
|
||||
use crate::tools::{kzg_commit_g2, kzg_open_g1, KzgBls12_381, UniPoly381};
|
||||
|
||||
pub fn commit_direct(
|
||||
c_poly: &DensePolynomial<Fp256<FrParameters>>, //c(X)
|
||||
poly_ck: &Powers<Bls12_381>, //SRS
|
||||
) -> G1Affine {
|
||||
assert!(c_poly.coeffs.len() <= poly_ck.powers_of_g.len());
|
||||
let mut com = poly_ck.powers_of_g[0].mul(c_poly.coeffs[0]);
|
||||
for i in 1..c_poly.coeffs.len() {
|
||||
com = com + poly_ck.powers_of_g[i].mul(c_poly.coeffs[i]);
|
||||
}
|
||||
return com.into_affine();
|
||||
}
|
||||
|
||||
//compute dft of size @dom_size over vector of G1 elements
|
||||
//q_i = h_0 + h_1w^i + h_2w^{2i}+\cdots + h_{dom_size-1}w^{(dom_size-1)i} for 0<= i< dom_size=2^p
|
||||
#[allow(dead_code)]
|
||||
pub fn dft_g1_opt(h: &Vec<G1Projective>, p: usize) -> Vec<G1Projective> {
|
||||
let dom_size = 1 << p;
|
||||
assert_eq!(h.len(), dom_size); //we do not support inputs of size not power of 2
|
||||
let input_domain: GeneralEvaluationDomain<Fr> = EvaluationDomain::new(dom_size).unwrap();
|
||||
let mut l = dom_size / 2;
|
||||
let mut m: usize = 1;
|
||||
//Stockham FFT
|
||||
let mut xprev = h.to_vec();
|
||||
for _ in 1..=p {
|
||||
let mut xnext = vec![];
|
||||
xnext.resize(xprev.len(), h[0]);
|
||||
for j in 0..l {
|
||||
for k in 0..m {
|
||||
let c0 = xprev[k + j * m].clone();
|
||||
let c1 = &xprev[k + j * m + l * m];
|
||||
xnext[k + 2 * j * m] = c0 + c1;
|
||||
let wj_2l = input_domain.element((j * dom_size / (2 * l)) % dom_size);
|
||||
xnext[k + 2 * j * m + m] = (c0 - c1).mul(wj_2l.into_repr());
|
||||
}
|
||||
}
|
||||
l = l / 2;
|
||||
m = m * 2;
|
||||
xprev = xnext;
|
||||
}
|
||||
return xprev;
|
||||
}
|
||||
|
||||
//compute idft of size @dom_size over vector of G1 elements
|
||||
//q_i = (h_0 + h_1w^-i + h_2w^{-2i}+\cdots + h_{dom_size-1}w^{-(dom_size-1)i})/dom_size for 0<= i< dom_size=2^p
|
||||
#[allow(dead_code)]
|
||||
pub fn idft_g1_opt(h: &Vec<G1Projective>, p: usize) -> Vec<G1Projective> {
|
||||
let dom_size = 1 << p;
|
||||
assert_eq!(h.len(), dom_size); //we do not support inputs of size not power of 2
|
||||
let input_domain: GeneralEvaluationDomain<Fr> = EvaluationDomain::new(dom_size).unwrap();
|
||||
let mut l = dom_size / 2;
|
||||
let mut m: usize = 1;
|
||||
let mut dom_fr = Fr::from_str("1").unwrap();
|
||||
//Stockham FFT
|
||||
let mut xprev = h.to_vec();
|
||||
for _ in 1..=p {
|
||||
let mut xnext = vec![];
|
||||
xnext.resize(xprev.len(), h[0]);
|
||||
for j in 0..l {
|
||||
for k in 0..m {
|
||||
let c0 = xprev[k + j * m].clone();
|
||||
let c1 = &xprev[k + j * m + l * m];
|
||||
xnext[k + 2 * j * m] = c0 + c1;
|
||||
let wj_2l = input_domain
|
||||
.element((dom_size - (j * dom_size / (2 * l)) % dom_size) % dom_size);
|
||||
xnext[k + 2 * j * m + m] = (c0 - c1).mul(wj_2l.into_repr());
|
||||
//Difference #1 to forward dft
|
||||
}
|
||||
}
|
||||
l = l / 2;
|
||||
m = m * 2;
|
||||
dom_fr = dom_fr + dom_fr;
|
||||
xprev = xnext;
|
||||
}
|
||||
let res = xprev
|
||||
.iter()
|
||||
.map(|x| x.mul(dom_fr.inverse().unwrap().into_repr()))
|
||||
.collect();
|
||||
return res;
|
||||
}
|
||||
|
||||
//compute all openings to c_poly by mere calling `open` N times
|
||||
#[allow(dead_code)]
|
||||
pub fn multiple_open_naive(
|
||||
c_poly: &DensePolynomial<Fp256<FrParameters>>,
|
||||
c_com_open: &Randomness<Fp256<FrParameters>, DensePolynomial<Fp256<FrParameters>>>,
|
||||
poly_ck: &Powers<Bls12_381>,
|
||||
degree: usize,
|
||||
) -> Vec<G1Affine> {
|
||||
let input_domain: GeneralEvaluationDomain<Fr> = EvaluationDomain::new(degree).unwrap();
|
||||
let mut res: Vec<G1Affine> = vec![];
|
||||
for i in 0..input_domain.size() {
|
||||
let omega_i = input_domain.element(i);
|
||||
res.push(kzg_open_g1(&c_poly, &omega_i, &c_com_open, &poly_ck).w);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
//compute all openings to c_poly by mere calling `open` N times
|
||||
pub fn multiple_open_naive_g2(
|
||||
c_poly: &DensePolynomial<Fp256<FrParameters>>,
|
||||
srs: &PublicParameters,
|
||||
degree: usize,
|
||||
) -> Vec<G2Affine> {
|
||||
let input_domain: GeneralEvaluationDomain<Fr> = EvaluationDomain::new(degree).unwrap();
|
||||
let mut res: Vec<G2Affine> = vec![];
|
||||
for i in 0..input_domain.size() {
|
||||
let omega_i = input_domain.element(i);
|
||||
res.push(kzg_open_g2(&c_poly, &omega_i, srs));
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
pub fn kzg_open_g2(
|
||||
p: &DensePolynomial<Fp256<FrParameters>>,
|
||||
x: &Fp256<FrParameters>, //point
|
||||
srs: &PublicParameters,
|
||||
) -> G2Affine {
|
||||
let tmp = DensePolynomial::from_coefficients_slice(&[Fr::one()]);
|
||||
let (_tmp_com, tmp_com_open) =
|
||||
KzgBls12_381::commit(&srs.poly_ck, &tmp, None, None).unwrap();
|
||||
let (witness_polynomial, _random_witness_polynomial) =
|
||||
KzgBls12_381::compute_witness_polynomial(p, x.clone(), &tmp_com_open).unwrap();
|
||||
|
||||
return kzg_commit_g2(&witness_polynomial, srs);
|
||||
}
|
||||
|
||||
#[allow(non_snake_case)]
|
||||
#[test]
|
||||
pub fn test_commit() {
|
||||
// current kzg setup should be changed with output from a setup ceremony
|
||||
let p: usize = 8; //bitlength of poly degree
|
||||
let max_degree: usize = (1 << p) + 2;
|
||||
let actual_degree: usize = (1 << p) - 1;
|
||||
let m: usize = 1 << (p / 2);
|
||||
let N: usize = 1 << p;
|
||||
let pp = setup_multi_lookup(&max_degree, &N, &m, &p);
|
||||
|
||||
// Setting up test instance to run evaluate on.
|
||||
// test randomness for c_poly is same everytime.
|
||||
// g_c = g^(c(x))
|
||||
let rng = &mut ark_std::test_rng();
|
||||
let c_poly = UniPoly381::rand(actual_degree, rng);
|
||||
let (c_com, _) = KzgBls12_381::commit(&pp.poly_ck, &c_poly, None, None).unwrap();
|
||||
let g_c1 = c_com.0;
|
||||
|
||||
let g_c2 = commit_direct(&c_poly, &pp.poly_ck);
|
||||
assert_eq!(g_c1, g_c2);
|
||||
println!("commit test passed")
|
||||
}
|
||||
|
||||
#[allow(non_snake_case)]
|
||||
#[test]
|
||||
pub fn test_multi_g2() {
|
||||
// current kzg setup should be changed with output from a setup ceremony
|
||||
let p: usize = 6; //bitlength of poly degree
|
||||
let max_degree: usize = (1 << p) + 2;
|
||||
let actual_degree: usize = (1 << p) - 1;
|
||||
let m: usize = 1 << (p / 2);
|
||||
let N: usize = 1 << p;
|
||||
let pp = setup_multi_lookup(&max_degree, &N, &m, &p);
|
||||
|
||||
// Setting up test instance to run evaluate on.
|
||||
// test randomness for c_poly is same everytime.
|
||||
// test index equals 5 everytime
|
||||
// g_c = g^(c(x))
|
||||
let rng = &mut ark_std::test_rng();
|
||||
let c_poly = UniPoly381::rand(actual_degree, rng);
|
||||
|
||||
let now = Instant::now();
|
||||
let q = multiple_open_naive_g2(&c_poly, &pp, actual_degree);
|
||||
println!("Multi naive computed in {:?}", now.elapsed());
|
||||
|
||||
let now = Instant::now();
|
||||
let q2 = multiple_open_g2(&pp.g2_powers, &c_poly, p);
|
||||
println!("Multi advanced computed in {:?}", now.elapsed());
|
||||
assert_eq!(q, q2);
|
||||
}
|
||||
}
|
||||
@@ -1,634 +0,0 @@
|
||||
/*
|
||||
This file includes backend tools:
|
||||
(1) read_line() is for taking inputs from the user
|
||||
(2) bipoly_commit commits to bivariate polynomials
|
||||
(3) hash_caulk_single is for hashing group and field elements into a field element
|
||||
(4) random_field is for generating random field elements
|
||||
(5) convert_to_bigints is for formatting inputs into multiscalar operations
|
||||
(6) kzg_open_g1_native is for opening KZG commitments
|
||||
(7) kzg_partial_open_g1_native is for partially opening bivariate commitments to univariate commitments
|
||||
(8) kzg_verify_g1_native is for verifying KZG commitments
|
||||
(9) kzg_partial_open_g1_native is for partially verifying bivariate commitments to univariate commitments
|
||||
(10) generate_lagrange_polynomials_subset is for generating lagrange polynomials over a subset that is not roots of unity.
|
||||
(11) aggregate_kzg_proofs_g2 is for aggregating KZG proofs
|
||||
*/
|
||||
|
||||
use ark_bls12_381::{Bls12_381, Fr, FrParameters, G1Affine, G1Projective, G2Affine, G2Projective};
|
||||
use ark_ec::{msm::VariableBaseMSM, AffineCurve, PairingEngine, ProjectiveCurve};
|
||||
use ark_ff::{Field, Fp256, PrimeField};
|
||||
use ark_poly::{
|
||||
univariate::DensePolynomial as DensePoly, EvaluationDomain, GeneralEvaluationDomain,
|
||||
Polynomial, UVPolynomial,
|
||||
};
|
||||
use ark_poly_commit::kzg10::*;
|
||||
use ark_serialize::CanonicalSerialize;
|
||||
use ark_std::One;
|
||||
use ark_std::Zero;
|
||||
|
||||
use blake2s_simd::Params;
|
||||
use rand::{thread_rng, Rng, SeedableRng};
|
||||
use rand_chacha::ChaChaRng;
|
||||
use std::{error::Error, io, str::FromStr};
|
||||
|
||||
use crate::caulk_multi_setup::PublicParameters;
|
||||
|
||||
pub type UniPoly381 = DensePoly<<Bls12_381 as PairingEngine>::Fr>;
|
||||
pub type KzgBls12_381 = KZG10<Bls12_381, UniPoly381>;
|
||||
|
||||
// Function for reading inputs from the command line.
|
||||
pub fn read_line<T: FromStr>() -> T
|
||||
where
|
||||
<T as FromStr>::Err: Error + 'static,
|
||||
{
|
||||
let mut input = String::new();
|
||||
io::stdin()
|
||||
.read_line(&mut input)
|
||||
.expect("Failed to get console input.");
|
||||
let output: T = input.trim().parse().expect("Console input is invalid.");
|
||||
output
|
||||
}
|
||||
|
||||
/*
|
||||
Function to commit to f(X,Y)
|
||||
here f = [ [a0, a1, a2], [b1, b2, b3] ] represents (a0 + a1 Y + a2 Y^2 ) + X (b1 + b2 Y + b3 Y^2)
|
||||
|
||||
First we unwrap to get a vector of form [a0, a1, a2, b0, b1, b2]
|
||||
Then we commit to f as a commitment to f'(X) = a0 + a1 X + a2 X^2 + b0 X^3 + b1 X^4 + b2 X^5
|
||||
|
||||
We also need to know the maximum degree of (a0 + a1 Y + a2 Y^2 ) to prevent overflow errors.
|
||||
|
||||
This is described in Section 4.6.2
|
||||
*/
|
||||
pub fn bipoly_commit(
|
||||
pp: &PublicParameters,
|
||||
poly: &Vec<DensePoly<Fp256<FrParameters>>>,
|
||||
deg_x: usize,
|
||||
) -> G1Affine {
|
||||
let mut poly_formatted = Vec::new();
|
||||
|
||||
for i in 0..poly.len() {
|
||||
let temp = convert_to_bigints(&poly[i].coeffs);
|
||||
for j in 0..poly[i].len() {
|
||||
poly_formatted.push(temp[j]);
|
||||
}
|
||||
let temp = convert_to_bigints(&[Fr::zero()].to_vec())[0];
|
||||
for _ in poly[i].len()..deg_x {
|
||||
poly_formatted.push(temp);
|
||||
}
|
||||
}
|
||||
|
||||
assert!(pp.poly_ck.powers_of_g.len() >= poly_formatted.len());
|
||||
let g1_poly =
|
||||
VariableBaseMSM::multi_scalar_mul(&pp.poly_ck.powers_of_g, poly_formatted.as_slice())
|
||||
.into_affine();
|
||||
|
||||
return g1_poly;
|
||||
}
|
||||
|
||||
/////////////////////////////////////////////////////////////////////
|
||||
// Hashing
|
||||
/////////////////////////////////////////////////////////////////////
|
||||
|
||||
// hashing to field copied from
|
||||
// https://github.com/kobigurk/aggregatable-dkg/blob/main/src/signature/utils/hash.rs
|
||||
fn rng_from_message(personalization: &[u8], message: &[u8]) -> ChaChaRng {
|
||||
let hash = Params::new()
|
||||
.hash_length(32)
|
||||
.personal(personalization)
|
||||
.to_state()
|
||||
.update(message)
|
||||
.finalize();
|
||||
let mut seed = [0u8; 32];
|
||||
seed.copy_from_slice(hash.as_bytes());
|
||||
let rng = ChaChaRng::from_seed(seed);
|
||||
rng
|
||||
}
|
||||
|
||||
pub fn hash_to_field<F: PrimeField>(personalization: &[u8], message: &[u8]) -> F {
|
||||
let mut rng = rng_from_message(personalization, message);
|
||||
loop {
|
||||
let bytes: Vec<u8> = (0..F::zero().serialized_size())
|
||||
.map(|_| rng.gen())
|
||||
.collect();
|
||||
if let Some(p) = F::from_random_bytes(&bytes) {
|
||||
return p;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* hash function that takes as input:
|
||||
(1) some state (either equal to the last hash output or zero)
|
||||
(2) a vector of g1 elements
|
||||
(3) a vector of g2 elements
|
||||
(4) a vector of field elements
|
||||
|
||||
It returns a field element.
|
||||
*/
|
||||
pub fn hash_caulk_multi<F: PrimeField>(
|
||||
state: Fr,
|
||||
g1_elements: Option<&Vec<&G1Affine>>,
|
||||
g2_elements: Option<&Vec<&G2Affine>>,
|
||||
field_elements: Option<&Vec<&Fr>>,
|
||||
) -> Fr {
|
||||
// PERSONALIZATION distinguishes this hash from other hashes that may be in the system
|
||||
const PERSONALIZATION: &[u8] = b"CAULK2";
|
||||
|
||||
///////////////////////////////////////////////////////////
|
||||
// Handling cases where no g1_elements or no g1_elements or no field elements are input
|
||||
///////////////////////////////////////////////////////////
|
||||
let g1_elements_len: usize;
|
||||
let g2_elements_len: usize;
|
||||
let field_elements_len: usize;
|
||||
|
||||
if g1_elements == None {
|
||||
g1_elements_len = 0;
|
||||
} else {
|
||||
g1_elements_len = g1_elements.unwrap().len();
|
||||
}
|
||||
|
||||
if g2_elements == None {
|
||||
g2_elements_len = 0;
|
||||
} else {
|
||||
g2_elements_len = g2_elements.unwrap().len();
|
||||
}
|
||||
|
||||
if field_elements == None {
|
||||
field_elements_len = 0;
|
||||
} else {
|
||||
field_elements_len = field_elements.unwrap().len();
|
||||
}
|
||||
|
||||
///////////////////////////////////////////////////////////
|
||||
// Transform inputs into bytes
|
||||
///////////////////////////////////////////////////////////
|
||||
let mut state_bytes = vec![];
|
||||
state.serialize(&mut state_bytes).ok();
|
||||
|
||||
let mut g1_elements_bytes = Vec::new();
|
||||
for i in 0..g1_elements_len {
|
||||
let mut temp = vec![];
|
||||
g1_elements.unwrap()[i].clone().serialize(&mut temp).ok();
|
||||
g1_elements_bytes.append(&mut temp.clone());
|
||||
}
|
||||
|
||||
let mut g2_elements_bytes = Vec::new();
|
||||
for i in 0..g2_elements_len {
|
||||
let mut temp = vec![];
|
||||
g2_elements.unwrap()[i].clone().serialize(&mut temp).ok();
|
||||
g2_elements_bytes.append(&mut temp.clone());
|
||||
}
|
||||
|
||||
let mut field_elements_bytes = Vec::new();
|
||||
for i in 0..field_elements_len {
|
||||
let mut temp = vec![];
|
||||
field_elements.unwrap()[i].clone().serialize(&mut temp).ok();
|
||||
field_elements_bytes.append(&mut temp.clone());
|
||||
}
|
||||
|
||||
// Transform bytes into vector of bytes of the form expected by hash_to_field
|
||||
let mut hash_input: Vec<u8> = state_bytes.clone();
|
||||
for i in 0..g1_elements_bytes.len() {
|
||||
hash_input = [&hash_input as &[_], &[g1_elements_bytes[i]]].concat();
|
||||
}
|
||||
|
||||
for i in 0..g2_elements_bytes.len() {
|
||||
hash_input = [&hash_input as &[_], &[g2_elements_bytes[i]]].concat();
|
||||
}
|
||||
|
||||
for i in 0..field_elements_bytes.len() {
|
||||
hash_input = [&hash_input as &[_], &[field_elements_bytes[i]]].concat();
|
||||
}
|
||||
|
||||
// hash_to_field
|
||||
return hash_to_field::<Fr>(PERSONALIZATION, &hash_input);
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////
|
||||
|
||||
pub fn random_field<F: PrimeField>() -> F {
|
||||
let mut rng = thread_rng();
|
||||
loop {
|
||||
let bytes: Vec<u8> = (0..F::zero().serialized_size())
|
||||
.map(|_| rng.gen())
|
||||
.collect();
|
||||
if let Some(p) = F::from_random_bytes(&bytes) {
|
||||
return p;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
//copied from arkworks
|
||||
pub fn convert_to_bigints<F: PrimeField>(p: &Vec<F>) -> Vec<F::BigInt> {
|
||||
let coeffs = ark_std::cfg_iter!(p)
|
||||
.map(|s| s.into_repr())
|
||||
.collect::<Vec<_>>();
|
||||
coeffs
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////
|
||||
//
|
||||
/*
|
||||
KZG.Open( srs_KZG, f(X), deg, (alpha1, alpha2, ..., alphan) )
|
||||
returns ([f(alpha1), ..., f(alphan)], pi)
|
||||
Algorithm described in Section 4.6.1, Multiple Openings
|
||||
*/
|
||||
pub fn kzg_open_g1_native(
|
||||
poly_ck: &Powers<Bls12_381>,
|
||||
poly: &DensePoly<Fr>,
|
||||
max_deg: Option<&usize>,
|
||||
points: Vec<&Fr>,
|
||||
) -> (Vec<Fr>, G1Affine) {
|
||||
let mut evals = Vec::new();
|
||||
let mut proofs = Vec::new();
|
||||
for i in 0..points.len() {
|
||||
let (eval, pi) = kzg_open_g1_native_single(poly_ck, poly, max_deg, points[i]);
|
||||
evals.push(eval);
|
||||
proofs.push(pi);
|
||||
}
|
||||
|
||||
let mut res: G1Projective = G1Projective::zero(); //default value
|
||||
|
||||
for j in 0..points.len() {
|
||||
let w_j = points[j].clone();
|
||||
//1. Computing coefficient [1/prod]
|
||||
let mut prod = Fr::one();
|
||||
for k in 0..points.len() {
|
||||
let w_k = points[k];
|
||||
if k != j {
|
||||
prod = prod * (w_j - w_k);
|
||||
}
|
||||
}
|
||||
//2. Summation
|
||||
let q_add = proofs[j].mul(prod.inverse().unwrap()); //[1/prod]Q_{j}
|
||||
res = res + q_add;
|
||||
}
|
||||
|
||||
return (evals, res.into_affine());
|
||||
}
|
||||
|
||||
fn kzg_open_g1_native_single(
|
||||
poly_ck: &Powers<Bls12_381>,
|
||||
poly: &DensePoly<Fr>,
|
||||
max_deg: Option<&usize>,
|
||||
point: &Fr,
|
||||
) -> (Fr, G1Affine) {
|
||||
let eval = poly.evaluate(&point);
|
||||
|
||||
let global_max_deg = poly_ck.powers_of_g.len();
|
||||
|
||||
let mut d: usize = 0;
|
||||
if max_deg == None {
|
||||
d += global_max_deg;
|
||||
} else {
|
||||
d += max_deg.unwrap();
|
||||
}
|
||||
let divisor = DensePoly::from_coefficients_vec(vec![-point.clone(), Fr::one()]);
|
||||
let witness_polynomial = poly / &divisor;
|
||||
|
||||
assert!(poly_ck.powers_of_g[(global_max_deg - d)..].len() >= witness_polynomial.len());
|
||||
let proof = VariableBaseMSM::multi_scalar_mul(
|
||||
&poly_ck.powers_of_g[(global_max_deg - d)..],
|
||||
&convert_to_bigints(&witness_polynomial.coeffs).as_slice(),
|
||||
)
|
||||
.into_affine();
|
||||
return (eval, proof);
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////
|
||||
//
|
||||
/*
|
||||
KZG.Open( srs_KZG, f(X, Y), deg, alpha )
|
||||
returns ([f(alpha, x)]_1, pi)
|
||||
Algorithm described in Section 4.6.2, KZG for Bivariate Polynomials
|
||||
*/
|
||||
pub fn kzg_partial_open_g1_native(
|
||||
pp: &PublicParameters,
|
||||
poly: &Vec<DensePoly<Fr>>,
|
||||
deg_x: usize,
|
||||
point: &Fr,
|
||||
) -> (G1Affine, G1Affine, DensePoly<Fr>) {
|
||||
let mut poly_partial_eval = DensePoly::from_coefficients_vec(vec![Fr::zero()]);
|
||||
let mut alpha = Fr::one();
|
||||
for i in 0..poly.len() {
|
||||
let pow_alpha = DensePoly::from_coefficients_vec(vec![alpha.clone()]);
|
||||
poly_partial_eval = poly_partial_eval + &pow_alpha * &poly[i];
|
||||
alpha = alpha * point;
|
||||
}
|
||||
|
||||
let eval = VariableBaseMSM::multi_scalar_mul(
|
||||
&pp.poly_ck.powers_of_g,
|
||||
convert_to_bigints(&poly_partial_eval.coeffs).as_slice(),
|
||||
)
|
||||
.into_affine();
|
||||
|
||||
let mut witness_bipolynomial = Vec::new();
|
||||
let poly_reverse: Vec<_> = poly.into_iter().rev().collect();
|
||||
witness_bipolynomial.push(poly_reverse[0].clone());
|
||||
|
||||
let alpha = DensePoly::from_coefficients_vec(vec![point.clone()]);
|
||||
for i in 1..(poly_reverse.len() - 1) {
|
||||
witness_bipolynomial.push(poly_reverse[i].clone() + &alpha * &witness_bipolynomial[i - 1]);
|
||||
}
|
||||
|
||||
witness_bipolynomial.reverse();
|
||||
|
||||
let proof = bipoly_commit(pp, &witness_bipolynomial, deg_x);
|
||||
|
||||
return (eval, proof, poly_partial_eval);
|
||||
}
|
||||
|
||||
/*
|
||||
// KZG.Verify( srs_KZG, F, deg, (alpha1, alpha2, ..., alphan), (v1, ..., vn), pi )
|
||||
Algorithm described in Section 4.6.1, Multiple Openings
|
||||
*/
|
||||
pub fn kzg_verify_g1_native(
|
||||
//Verify that @c_com is a commitment to C(X) such that C(x)=z
|
||||
srs: &PublicParameters,
|
||||
c_com: G1Affine, //commitment
|
||||
max_deg: Option<&usize>, // max degree
|
||||
points: Vec<Fr>, // x such that eval = C(x)
|
||||
evals: Vec<Fr>, //evaluation
|
||||
pi: G1Affine, //proof
|
||||
) -> bool {
|
||||
// Interpolation set
|
||||
// tau_i(X) = lagrange_tau[i] = polynomial equal to 0 at point[j] for j!= i and 1 at points[i]
|
||||
|
||||
let mut lagrange_tau = DensePoly::from_coefficients_slice(&[Fr::zero()]);
|
||||
for i in 0..points.len() {
|
||||
let mut temp: UniPoly381 = DensePoly::from_coefficients_slice(&[Fr::one()]);
|
||||
for j in 0..points.len() {
|
||||
if i != j {
|
||||
temp = &temp * (&DensePoly::from_coefficients_slice(&[-points[j], Fr::one()]));
|
||||
}
|
||||
}
|
||||
let lagrange_scalar = temp.evaluate(&points[i]).inverse().unwrap() * &evals[i];
|
||||
lagrange_tau =
|
||||
lagrange_tau + &temp * (&DensePoly::from_coefficients_slice(&[lagrange_scalar]));
|
||||
}
|
||||
|
||||
// commit to sum evals[i] tau_i(X)
|
||||
|
||||
// println!( "lagrange_tau = {:?}", lagrange_tau.evaluate(&points[0]) == evals[0] );
|
||||
assert!(
|
||||
srs.poly_ck.powers_of_g.len() >= lagrange_tau.len(),
|
||||
"not enough powers of g in kzg_verify_g1_native"
|
||||
);
|
||||
let g1_tau = VariableBaseMSM::multi_scalar_mul(
|
||||
&srs.poly_ck.powers_of_g[..lagrange_tau.len()],
|
||||
convert_to_bigints(&lagrange_tau.coeffs).as_slice(),
|
||||
);
|
||||
|
||||
// vanishing polynomial
|
||||
// z_tau[i] = polynomial equal to 0 at point[j]
|
||||
|
||||
let mut z_tau = DensePoly::from_coefficients_slice(&[Fr::one()]);
|
||||
for i in 0..points.len() {
|
||||
z_tau = &z_tau * (&DensePoly::from_coefficients_slice(&[-points[i], Fr::one()]));
|
||||
}
|
||||
|
||||
// commit to z_tau(X) in g2
|
||||
assert!(srs.g2_powers.len() >= z_tau.len());
|
||||
let g2_z_tau = VariableBaseMSM::multi_scalar_mul(
|
||||
&srs.g2_powers[..z_tau.len()],
|
||||
convert_to_bigints(&z_tau.coeffs).as_slice(),
|
||||
);
|
||||
|
||||
let global_max_deg = srs.poly_ck.powers_of_g.len();
|
||||
|
||||
let mut d: usize = 0;
|
||||
if max_deg == None {
|
||||
d += global_max_deg;
|
||||
} else {
|
||||
d += max_deg.unwrap();
|
||||
}
|
||||
|
||||
let pairing1 = Bls12_381::pairing(
|
||||
c_com.into_projective() - g1_tau,
|
||||
srs.g2_powers[global_max_deg - d],
|
||||
);
|
||||
|
||||
let pairing2 = Bls12_381::pairing(pi, g2_z_tau);
|
||||
|
||||
return pairing1 == pairing2;
|
||||
}
|
||||
|
||||
/*
|
||||
KZG.Verify( srs_KZG, F, deg, alpha, F_alpha, pi )
|
||||
Algorithm described in Section 4.6.2, KZG for Bivariate Polynomials
|
||||
Be very careful here. Verification is only valid if it is paired with a degree check.
|
||||
*/
|
||||
pub fn kzg_partial_verify_g1_native(
|
||||
srs: &PublicParameters,
|
||||
c_com: G1Affine, //commitment
|
||||
deg_x: usize,
|
||||
point: Fr,
|
||||
partial_eval: G1Affine,
|
||||
pi: G1Affine, //proof
|
||||
) -> bool {
|
||||
let pairing1 = Bls12_381::pairing(
|
||||
c_com.into_projective() - partial_eval.into_projective(),
|
||||
srs.g2_powers[0],
|
||||
);
|
||||
let pairing2 = Bls12_381::pairing(
|
||||
pi,
|
||||
srs.g2_powers[deg_x].into_projective() - srs.g2_powers[0].mul(point),
|
||||
);
|
||||
|
||||
return pairing1 == pairing2;
|
||||
}
|
||||
|
||||
pub fn kzg_commit_g2(poly: &DensePoly<Fp256<FrParameters>>, srs: &PublicParameters) -> G2Affine {
|
||||
let mut res = srs.g2_powers[0].mul(poly[0]);
|
||||
for i in 1..poly.len() {
|
||||
res = res + srs.g2_powers[i].mul(poly[i])
|
||||
}
|
||||
return res.into_affine();
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////
|
||||
|
||||
pub fn generate_lagrange_polynomials_subset(
|
||||
positions: &Vec<usize>,
|
||||
srs: &PublicParameters,
|
||||
) -> Vec<DensePoly<Fp256<FrParameters>>> {
|
||||
let mut tau_polys = vec![];
|
||||
let m = positions.len();
|
||||
for j in 0..m {
|
||||
let mut tau_j = DensePoly::from_coefficients_slice(&[Fr::one()]); //start from tau_j =1
|
||||
for k in 0..m {
|
||||
if k != j {
|
||||
//tau_j = prod_{k\neq j} (X-w^(i_k))/(w^(i_j)-w^(i_k))
|
||||
let denum = srs.domain_N.element(positions[j]) - srs.domain_N.element(positions[k]);
|
||||
tau_j = &tau_j
|
||||
* &DensePoly::from_coefficients_slice(&[
|
||||
-srs.domain_N.element(positions[k]) / denum, //-w^(i_k))/(w^(i_j)-w^(i_k)
|
||||
Fr::one() / denum, //1//(w^(i_j)-w^(i_k))
|
||||
]);
|
||||
}
|
||||
}
|
||||
tau_polys.push(tau_j.clone());
|
||||
}
|
||||
tau_polys
|
||||
}
|
||||
|
||||
/*
|
||||
Algorithm for aggregating KZG proofs into a single proof
|
||||
Described in Section 4.6.3 Subset openings
|
||||
compute Q =\sum_{j=1}^m \frac{Q_{i_j}}}{\prod_{1\leq k\leq m,\; k\neq j}(\omega^{i_j}-\omega^{i_k})}
|
||||
*/
|
||||
pub fn aggregate_kzg_proofs_g2(
|
||||
openings: &Vec<G2Affine>, //Q_i
|
||||
positions: &Vec<usize>, //i_j
|
||||
input_domain: &GeneralEvaluationDomain<Fr>,
|
||||
) -> G2Affine {
|
||||
let m = positions.len();
|
||||
let mut res: G2Projective = openings[0].into_projective(); //default value
|
||||
|
||||
for j in 0..m {
|
||||
let i_j = positions[j];
|
||||
let w_ij = input_domain.element(i_j);
|
||||
//1. Computing coefficient [1/prod]
|
||||
let mut prod = Fr::one();
|
||||
for k in 0..m {
|
||||
let i_k = positions[k];
|
||||
let w_ik = input_domain.element(i_k);
|
||||
if k != j {
|
||||
prod = prod * (w_ij - w_ik);
|
||||
}
|
||||
}
|
||||
//2. Summation
|
||||
let q_add = openings[i_j].mul(prod.inverse().unwrap()); //[1/prod]Q_{j}
|
||||
if j == 0 {
|
||||
res = q_add;
|
||||
} else {
|
||||
res = res + q_add;
|
||||
}
|
||||
}
|
||||
return res.into_affine();
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////
|
||||
|
||||
#[cfg(test)]
|
||||
pub mod tests {
|
||||
|
||||
use crate::caulk_multi_setup::setup_multi_lookup;
|
||||
|
||||
use crate::multiopen::multiple_open_g2;
|
||||
use crate::tools::{
|
||||
aggregate_kzg_proofs_g2, generate_lagrange_polynomials_subset, KzgBls12_381, UniPoly381,
|
||||
};
|
||||
|
||||
use ark_poly::{
|
||||
univariate::DensePolynomial as DensePoly, EvaluationDomain, Polynomial, UVPolynomial,
|
||||
};
|
||||
|
||||
use ark_bls12_381::{Bls12_381, Fr, G2Affine};
|
||||
use ark_ec::{AffineCurve, PairingEngine, ProjectiveCurve};
|
||||
use ark_std::{One, Zero};
|
||||
use std::time::Instant;
|
||||
|
||||
#[allow(non_snake_case)]
|
||||
#[test]
|
||||
pub fn test_lagrange() {
|
||||
let p: usize = 8; //bitlength of poly degree
|
||||
let max_degree: usize = (1 << p) + 2;
|
||||
let m: usize = 8;
|
||||
let N: usize = 1 << p;
|
||||
let pp = setup_multi_lookup(&max_degree, &N, &m, &p);
|
||||
let now = Instant::now();
|
||||
println!("time to setup {:?}", now.elapsed());
|
||||
|
||||
let mut positions: Vec<usize> = vec![];
|
||||
for i in 0..m {
|
||||
//generate positions evenly distributed in the set
|
||||
let i_j: usize = i * (max_degree / m);
|
||||
positions.push(i_j);
|
||||
}
|
||||
|
||||
let tau_polys = generate_lagrange_polynomials_subset(&positions, &pp);
|
||||
for j in 0..m {
|
||||
for k in 0..m {
|
||||
if k == j {
|
||||
assert_eq!(
|
||||
tau_polys[j].evaluate(&pp.domain_N.element(positions[k])),
|
||||
Fr::one()
|
||||
)
|
||||
} else {
|
||||
assert_eq!(
|
||||
tau_polys[j].evaluate(&pp.domain_N.element(positions[k])),
|
||||
Fr::zero()
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(non_snake_case)]
|
||||
#[test]
|
||||
pub fn test_Q_g2() {
|
||||
// current kzg setup should be changed with output from a setup ceremony
|
||||
let p: usize = 6; //bitlength of poly degree
|
||||
let max_degree: usize = (1 << p) + 2;
|
||||
let actual_degree: usize = (1 << p) - 1;
|
||||
let m: usize = 1 << (p / 2);
|
||||
let N: usize = 1 << p;
|
||||
let pp = setup_multi_lookup(&max_degree, &N, &m, &p);
|
||||
|
||||
// Setting up test instance to run evaluate on.
|
||||
// test randomness for c_poly is same everytime.
|
||||
// test index equals 5 everytime
|
||||
// g_c = g^(c(x))
|
||||
let rng = &mut ark_std::test_rng();
|
||||
let c_poly = UniPoly381::rand(actual_degree, rng);
|
||||
let (c_com, _) = KzgBls12_381::commit(&pp.poly_ck, &c_poly, None, None).unwrap();
|
||||
|
||||
let now = Instant::now();
|
||||
let openings = multiple_open_g2(&pp.g2_powers, &c_poly, p);
|
||||
println!("Multi advanced computed in {:?}", now.elapsed());
|
||||
|
||||
let mut positions: Vec<usize> = vec![];
|
||||
for i in 0..m {
|
||||
let i_j: usize = i * (max_degree / m);
|
||||
positions.push(i_j);
|
||||
}
|
||||
|
||||
let now = Instant::now();
|
||||
|
||||
//Compute proof
|
||||
let Q: G2Affine = aggregate_kzg_proofs_g2(&openings, &positions, &pp.domain_N);
|
||||
println!(
|
||||
"Full proof for {:?} positions computed in {:?}",
|
||||
m,
|
||||
now.elapsed()
|
||||
);
|
||||
|
||||
//Compute commitment to C_I
|
||||
let mut C_I = DensePoly::from_coefficients_slice(&[Fr::zero()]); //C_I = sum_j c_j*tau_j
|
||||
let tau_polys = generate_lagrange_polynomials_subset(&positions, &pp);
|
||||
for j in 0..m {
|
||||
C_I = &C_I + &(&tau_polys[j] * c_poly.evaluate(&pp.domain_N.element(positions[j])));
|
||||
//sum_j c_j*tau_j
|
||||
}
|
||||
let (c_I_com, _c_I_com_open) = KzgBls12_381::commit(&pp.poly_ck, &C_I, None, None).unwrap();
|
||||
|
||||
//Compute commitment to z_I
|
||||
let mut z_I = DensePoly::from_coefficients_slice(&[Fr::one()]);
|
||||
for j in 0..m {
|
||||
z_I = &z_I
|
||||
* &DensePoly::from_coefficients_slice(&[
|
||||
-pp.domain_N.element(positions[j]),
|
||||
Fr::one(),
|
||||
]);
|
||||
}
|
||||
let (z_I_com, _) = KzgBls12_381::commit(&pp.poly_ck, &z_I, None, None).unwrap();
|
||||
|
||||
//pairing check
|
||||
let pairing1 = Bls12_381::pairing(
|
||||
(c_com.0.into_projective() - c_I_com.0.into_projective()).into_affine(),
|
||||
pp.g2_powers[0],
|
||||
);
|
||||
let pairing2 = Bls12_381::pairing(z_I_com.0, Q);
|
||||
assert_eq!(pairing1, pairing2);
|
||||
}
|
||||
}
|
||||
@@ -1,41 +0,0 @@
|
||||
[package]
|
||||
name = "caulk_single_opening"
|
||||
version = "0.1.0"
|
||||
authors = ["mmaller <mary.maller@ethereum.org>"]
|
||||
edition = "2018"
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[dependencies]
|
||||
ark-ff = { version = "^0.3.0", default-features = false }
|
||||
ark-ec = { version = "^0.3.0", default-features = false }
|
||||
ark-serialize = { version = "^0.3.0", default-features = false, features = [ "derive" ] }
|
||||
ark-poly = { version = "^0.3.0", default-features = false }
|
||||
ark-std = { version = "^0.3.0", default-features = false }
|
||||
ark-r1cs-std = { version = "^0.3.0", default-features = false, optional = true }
|
||||
ark-bls12-381 = { version = "^0.3.0", features = [ "std" ] }
|
||||
ark-bls12-377 = { version = "^0.3.0", features = [ "std" ] }
|
||||
ark-poly-commit = { version = "^0.3.0", default-features = false }
|
||||
|
||||
tracing = { version = "0.1", default-features = false, features = [ "attributes" ], optional = true }
|
||||
derivative = { version = "2.0", features = ["use_core"], optional = true}
|
||||
rand = "0.8.5"
|
||||
rand_chacha = { version = "0.3.1" }
|
||||
thiserror = "1.0.19"
|
||||
blake2s_simd = "1.0.0"
|
||||
|
||||
rayon = { version = "1.5.2", default-features = false, optional = true }
|
||||
merlin = { version = "3.0.0" }
|
||||
|
||||
[features]
|
||||
asm = [ "ark-ff/asm" ]
|
||||
parallel = [
|
||||
"rayon",
|
||||
"ark-std/parallel",
|
||||
"ark-ff/parallel",
|
||||
"ark-poly/parallel"
|
||||
]
|
||||
print-trace = [
|
||||
"ark-std/print-trace"
|
||||
]
|
||||
|
||||
@@ -1,371 +0,0 @@
|
||||
/*
|
||||
This file includes an algorithm for calculating n openings of a KZG vector commitment of size n in n log(n) time.
|
||||
The algorithm is by Feist and khovratovich.
|
||||
It is useful for preprocessing.
|
||||
The full algorithm is described here https://github.com/khovratovich/Kate/blob/master/Kate_amortized.pdf
|
||||
*/
|
||||
|
||||
use ark_ec::{AffineCurve, PairingEngine, ProjectiveCurve};
|
||||
use ark_ff::{Field, PrimeField};
|
||||
use ark_poly::univariate::DensePolynomial;
|
||||
use ark_poly::{EvaluationDomain, GeneralEvaluationDomain, UVPolynomial};
|
||||
use ark_poly_commit::kzg10::*;
|
||||
use ark_std::One;
|
||||
use ark_std::Zero;
|
||||
use std::vec::Vec;
|
||||
|
||||
//compute all pre-proofs using DFT
|
||||
// h_i= c_d[x^{d-i-1}]+c_{d-1}[x^{d-i-2}]+c_{d-2}[x^{d-i-3}]+\cdots + c_{i+2}[x]+c_{i+1}[1]
|
||||
pub fn compute_h<E: PairingEngine>(
|
||||
c_poly: &DensePolynomial<E::Fr>, //c(X) degree up to d<2^p , i.e. c_poly has at most d+1 coeffs non-zero
|
||||
poly_ck: &Powers<E>, //SRS
|
||||
p: usize,
|
||||
) -> Vec<E::G1Projective> {
|
||||
let mut coeffs = c_poly.coeffs().to_vec();
|
||||
let dom_size = 1 << p;
|
||||
let fpzero = E::Fr::zero();
|
||||
coeffs.resize(dom_size, fpzero);
|
||||
|
||||
//let now = Instant::now();
|
||||
//1. x_ext = [[x^(d-1)], [x^{d-2},...,[x],[1], d+2 [0]'s]
|
||||
let mut x_ext: Vec<E::G1Projective> = poly_ck
|
||||
.powers_of_g
|
||||
.iter()
|
||||
.take(dom_size - 1)
|
||||
.rev()
|
||||
.map(|x| x.into_projective())
|
||||
.collect();
|
||||
x_ext.resize(2 * dom_size, E::G1Projective::zero()); //filling 2d+2 neutral elements
|
||||
|
||||
let y = dft_g1::<E>(&x_ext, p + 1);
|
||||
//println!("Step 1 computed in {:?}", now.elapsed());
|
||||
|
||||
//2. c_ext = [c_d, d zeroes, c_d,c_{0},c_1,...,c_{d-2},c_{d-1}]
|
||||
//let now = Instant::now();
|
||||
let mut c_ext = vec![coeffs[coeffs.len() - 1]];
|
||||
c_ext.resize(dom_size, fpzero);
|
||||
c_ext.push(coeffs[coeffs.len() - 1]);
|
||||
for &e in coeffs.iter().take(coeffs.len() - 1) {
|
||||
c_ext.push(e);
|
||||
}
|
||||
assert_eq!(c_ext.len(), 2 * dom_size);
|
||||
let v = dft_opt::<E>(&c_ext, p + 1);
|
||||
//println!("Step 2 computed in {:?}", now.elapsed());
|
||||
|
||||
//3. u = y o v
|
||||
|
||||
//let now = Instant::now();
|
||||
let u: Vec<_> = y
|
||||
.into_iter()
|
||||
.zip(v.into_iter())
|
||||
.map(|(a, b)| a.mul(b.into_repr()))
|
||||
.collect();
|
||||
// println!("Step 3 computed in {:?}", now.elapsed());
|
||||
|
||||
//4. h_ext = idft_{2d+2}(u)
|
||||
//let now = Instant::now();
|
||||
let h_ext = idft_g1::<E>(&u, p + 1);
|
||||
//println!("Step 4 computed in {:?}", now.elapsed());
|
||||
|
||||
h_ext[0..dom_size].to_vec()
|
||||
}
|
||||
|
||||
//compute DFT of size @dom_size over vector of G1 elements
|
||||
//q_i = h_0 + h_1w^i + h_2w^{2i}+\cdots + h_{dom_size-1}w^{(dom_size-1)i} for 0<= i< dom_size=2^p
|
||||
pub fn dft_g1<E: PairingEngine>(h: &[E::G1Projective], p: usize) -> Vec<E::G1Projective> {
|
||||
let dom_size = 1 << p;
|
||||
assert_eq!(h.len(), dom_size); //we do not support inputs of size not power of 2
|
||||
let input_domain: GeneralEvaluationDomain<E::Fr> = EvaluationDomain::new(dom_size).unwrap();
|
||||
let mut l = dom_size / 2;
|
||||
let mut m: usize = 1;
|
||||
//Stockham FFT
|
||||
let mut xvec = h.to_vec();
|
||||
for _ in 0..p {
|
||||
let mut xt = xvec.clone();
|
||||
for j in 0..l {
|
||||
for k in 0..m {
|
||||
let c0 = xvec[k + j * m];
|
||||
let c1 = xvec[k + j * m + l * m];
|
||||
xt[k + 2 * j * m] = c0 + c1;
|
||||
let wj_2l = input_domain.element((j * dom_size / (2 * l)) % dom_size);
|
||||
xt[k + 2 * j * m + m] = (c0 - c1).mul(wj_2l.into_repr());
|
||||
}
|
||||
}
|
||||
l /= 2;
|
||||
m *= 2;
|
||||
xvec = xt;
|
||||
}
|
||||
xvec
|
||||
}
|
||||
|
||||
//compute DFT of size @dom_size over vector of Fr elements
|
||||
//q_i = h_0 + h_1w^i + h_2w^{2i}+\cdots + h_{dom_size-1}w^{(dom_size-1)i} for 0<= i< dom_size=2^p
|
||||
pub fn dft_opt<E: PairingEngine>(h: &[E::Fr], p: usize) -> Vec<E::Fr> {
|
||||
let dom_size = 1 << p;
|
||||
assert_eq!(h.len(), dom_size); //we do not support inputs of size not power of 2
|
||||
let input_domain: GeneralEvaluationDomain<E::Fr> = EvaluationDomain::new(dom_size).unwrap();
|
||||
let mut l = dom_size / 2;
|
||||
let mut m: usize = 1;
|
||||
//Stockham FFT
|
||||
let mut xvec = h.to_vec();
|
||||
for _ in 0..p {
|
||||
let mut xt = xvec.clone();
|
||||
for j in 0..l {
|
||||
for k in 0..m {
|
||||
let c0 = xvec[k + j * m];
|
||||
let c1 = xvec[k + j * m + l * m];
|
||||
xt[k + 2 * j * m] = c0 + c1;
|
||||
let wj_2l = input_domain.element((j * dom_size / (2 * l)) % dom_size);
|
||||
xt[k + 2 * j * m + m] = (c0 - c1) * (wj_2l);
|
||||
}
|
||||
}
|
||||
l /= 2;
|
||||
m *= 2;
|
||||
xvec = xt;
|
||||
}
|
||||
xvec
|
||||
}
|
||||
|
||||
//compute idft of size @dom_size over vector of G1 elements
|
||||
//q_i = (h_0 + h_1w^-i + h_2w^{-2i}+\cdots + h_{dom_size-1}w^{-(dom_size-1)i})/dom_size for 0<= i< dom_size=2^p
|
||||
pub fn idft_g1<E: PairingEngine>(h: &[E::G1Projective], p: usize) -> Vec<E::G1Projective> {
|
||||
let dom_size = 1 << p;
|
||||
assert_eq!(h.len(), dom_size); //we do not support inputs of size not power of 2
|
||||
let input_domain: GeneralEvaluationDomain<E::Fr> = EvaluationDomain::new(dom_size).unwrap();
|
||||
let mut l = dom_size / 2;
|
||||
let mut m: usize = 1;
|
||||
let mut dom_fr = E::Fr::one();
|
||||
//Stockham FFT
|
||||
let mut xvec = h.to_vec();
|
||||
for _ in 0..p {
|
||||
let mut xt = xvec.clone();
|
||||
for j in 0..l {
|
||||
for k in 0..m {
|
||||
let c0 = xvec[k + j * m];
|
||||
let c1 = xvec[k + j * m + l * m];
|
||||
xt[k + 2 * j * m] = c0 + c1;
|
||||
let wj_2l = input_domain
|
||||
.element((dom_size - (j * dom_size / (2 * l)) % dom_size) % dom_size);
|
||||
xt[k + 2 * j * m + m] = (c0 - c1).mul(wj_2l.into_repr()); //Difference #1 to forward DFT
|
||||
}
|
||||
}
|
||||
l /= 2;
|
||||
m *= 2;
|
||||
dom_fr = dom_fr + dom_fr;
|
||||
xvec = xt;
|
||||
}
|
||||
|
||||
let domain_inverse = dom_fr.inverse().unwrap().into_repr();
|
||||
|
||||
xvec.iter().map(|x| x.mul(domain_inverse)).collect()
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub mod tests {
|
||||
use super::*;
|
||||
use crate::caulk_single_setup::caulk_single_setup;
|
||||
use crate::KZGCommit;
|
||||
use ark_bls12_377::Bls12_377;
|
||||
use ark_bls12_381::Bls12_381;
|
||||
use ark_ec::PairingEngine;
|
||||
use ark_poly::univariate::DensePolynomial;
|
||||
use ark_std::test_rng;
|
||||
use ark_std::time::Instant;
|
||||
use ark_std::UniformRand;
|
||||
|
||||
/// Various functions that are used for testing
|
||||
|
||||
fn commit_direct<E: PairingEngine>(
|
||||
c_poly: &DensePolynomial<E::Fr>, //c(X)
|
||||
poly_ck: &Powers<E>, //SRS
|
||||
) -> E::G1Affine {
|
||||
assert!(c_poly.coeffs.len() <= poly_ck.powers_of_g.len());
|
||||
let mut com = poly_ck.powers_of_g[0].mul(c_poly.coeffs[0]);
|
||||
for i in 1..c_poly.coeffs.len() {
|
||||
com = com + poly_ck.powers_of_g[i].mul(c_poly.coeffs[i]);
|
||||
}
|
||||
com.into_affine()
|
||||
}
|
||||
|
||||
//compute all openings to c_poly by mere calling `open` N times
|
||||
fn multiple_open_naive<E: PairingEngine>(
|
||||
c_poly: &DensePolynomial<E::Fr>,
|
||||
c_com_open: &Randomness<E::Fr, DensePolynomial<E::Fr>>,
|
||||
poly_ck: &Powers<E>,
|
||||
degree: usize,
|
||||
) -> Vec<E::G1Affine> {
|
||||
let input_domain: GeneralEvaluationDomain<E::Fr> = EvaluationDomain::new(degree).unwrap();
|
||||
let mut res: Vec<E::G1Affine> = vec![];
|
||||
for i in 0..input_domain.size() {
|
||||
let omega_i = input_domain.element(i);
|
||||
res.push(kzg_open_g1_test::<E>(&c_poly, &omega_i, &c_com_open, &poly_ck).w);
|
||||
}
|
||||
res
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////
|
||||
fn kzg_open_g1_test<E: PairingEngine>(
|
||||
p: &DensePolynomial<E::Fr>,
|
||||
omega_5: &E::Fr,
|
||||
polycom_open: &Randomness<E::Fr, DensePolynomial<E::Fr>>,
|
||||
poly_ck: &Powers<E>,
|
||||
) -> Proof<E> {
|
||||
let rng = &mut ark_std::test_rng();
|
||||
|
||||
let (witness_polynomial, _random_witness_polynomial) =
|
||||
KZG10::<E, _>::compute_witness_polynomial(p, omega_5.clone(), polycom_open).unwrap();
|
||||
|
||||
let (temp0, _temp1) = KZG10::commit(poly_ck, &witness_polynomial, None, Some(rng)).unwrap();
|
||||
Proof {
|
||||
w: temp0.0,
|
||||
random_v: None,
|
||||
}
|
||||
}
|
||||
|
||||
//compute KZG proof Q = g1_q = g^( (c(x) - c(w^i)) / (x - w^i) ) where x is secret, w^i is the point where we open, and c(X) is the committed polynomial
|
||||
fn single_open_default<E: PairingEngine>(
|
||||
c_poly: &DensePolynomial<E::Fr>, //c(X)
|
||||
c_com_open: &Randomness<E::Fr, DensePolynomial<E::Fr>>, //
|
||||
poly_ck: &Powers<E>,
|
||||
i: usize, //
|
||||
degree: usize,
|
||||
) -> E::G1Affine {
|
||||
let input_domain: GeneralEvaluationDomain<E::Fr> = EvaluationDomain::new(degree).unwrap();
|
||||
let omega_i = input_domain.element(i);
|
||||
let c_poly_open = kzg_open_g1_test(&c_poly, &omega_i, &c_com_open, &poly_ck);
|
||||
c_poly_open.w
|
||||
}
|
||||
|
||||
//KZG proof/opening at point y for c(X) = sum_i c_i X^i
|
||||
//(1)T_y(X) = sum_i t_i X^i
|
||||
//(2) t_{deg-1} = c_deg
|
||||
//(3) t_j = c_{j+1} + y*t_{j+1}
|
||||
fn single_open_fast<E: PairingEngine>(
|
||||
c_poly: &DensePolynomial<E::Fr>, //c(X)
|
||||
poly_ck: &Powers<E>, //SRS
|
||||
i: usize, //y=w^i
|
||||
degree: usize, //degree of c(X)
|
||||
) -> E::G1Affine {
|
||||
//computing opening point
|
||||
let input_domain: GeneralEvaluationDomain<E::Fr> = EvaluationDomain::new(degree).unwrap();
|
||||
let y = input_domain.element(i);
|
||||
|
||||
//compute quotient
|
||||
let mut t_poly = c_poly.clone();
|
||||
t_poly.coeffs.remove(0); //shifting indices
|
||||
for j in (0..t_poly.len() - 1).rev() {
|
||||
t_poly.coeffs[j] = c_poly.coeffs[j + 1] + y * t_poly.coeffs[j + 1]
|
||||
}
|
||||
|
||||
//commit
|
||||
let (t_com, _) = KZG10::commit(&poly_ck, &t_poly, None, None).unwrap();
|
||||
t_com.0
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_single() {
|
||||
test_single_helper::<Bls12_381>();
|
||||
test_single_helper::<Bls12_377>();
|
||||
}
|
||||
|
||||
fn test_single_helper<E: PairingEngine>() {
|
||||
let mut rng = test_rng();
|
||||
|
||||
// setting public parameters
|
||||
// current kzg setup should be changed with output from a setup ceremony
|
||||
let max_degree: usize = 100;
|
||||
let actual_degree: usize = 63;
|
||||
let pp = caulk_single_setup(max_degree, actual_degree, &mut rng);
|
||||
|
||||
// Setting up test instance to run evaluate on.
|
||||
// test randomness for c_poly is same everytime.
|
||||
// test index equals 5 everytime
|
||||
// g_c = g^(c(x))
|
||||
let rng = &mut test_rng();
|
||||
let c_poly = DensePolynomial::<E::Fr>::rand(actual_degree, rng);
|
||||
let (_c_com, c_com_open) = KZG10::<E, _>::commit(&pp.poly_ck, &c_poly, None, None).unwrap();
|
||||
|
||||
let i: usize = 6;
|
||||
let q = single_open_default(&c_poly, &c_com_open, &pp.poly_ck, i, actual_degree);
|
||||
let q2 = single_open_fast(&c_poly, &pp.poly_ck, i, actual_degree);
|
||||
assert_eq!(q, q2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_dft() {
|
||||
test_dft_helper::<Bls12_381>();
|
||||
test_dft_helper::<Bls12_377>();
|
||||
}
|
||||
|
||||
fn test_dft_helper<E: PairingEngine>() {
|
||||
let mut rng = test_rng();
|
||||
for i in 2..6 {
|
||||
let size = 1 << i;
|
||||
let h: Vec<E::G1Projective> =
|
||||
(0..size).map(|_| E::G1Projective::rand(&mut rng)).collect();
|
||||
|
||||
let c_dft = dft_g1::<E>(&h, i);
|
||||
let c_back = idft_g1::<E>(&c_dft, i);
|
||||
assert_eq!(h, c_back);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_commit() {
|
||||
test_commit_helper::<Bls12_381>();
|
||||
test_commit_helper::<Bls12_377>();
|
||||
}
|
||||
|
||||
pub fn test_commit_helper<E: PairingEngine>() {
|
||||
let mut rng = test_rng();
|
||||
|
||||
// current kzg setup should be changed with output from a setup ceremony
|
||||
let max_degree: usize = 100;
|
||||
let actual_degree: usize = 63;
|
||||
let pp = caulk_single_setup(max_degree, actual_degree, &mut rng);
|
||||
|
||||
// Setting up test instance to run evaluate on.
|
||||
// test randomness for c_poly is same everytime.
|
||||
// g_c = g^(c(x))
|
||||
let c_poly = DensePolynomial::<E::Fr>::rand(actual_degree, &mut rng);
|
||||
let (c_com, _c_com_open) = KZG10::<E, _>::commit(&pp.poly_ck, &c_poly, None, None).unwrap();
|
||||
let g_c1 = c_com.0;
|
||||
|
||||
let g_c2 = commit_direct(&c_poly, &pp.poly_ck);
|
||||
assert_eq!(g_c1, g_c2);
|
||||
println!("commit test passed")
|
||||
}
|
||||
|
||||
#[test]
|
||||
pub fn test_multi() {
|
||||
test_multi_helper::<Bls12_381>();
|
||||
test_multi_helper::<Bls12_377>();
|
||||
}
|
||||
|
||||
pub fn test_multi_helper<E: PairingEngine>() {
|
||||
let mut rng = test_rng();
|
||||
|
||||
// current kzg setup should be changed with output from a setup ceremony
|
||||
let p: usize = 9;
|
||||
let max_degree: usize = 1 << p + 1;
|
||||
let actual_degree: usize = (1 << p) - 1;
|
||||
let pp = caulk_single_setup(max_degree, actual_degree, &mut rng);
|
||||
|
||||
// Setting up test instance to run evaluate on.
|
||||
// test randomness for c_poly is same everytime.
|
||||
// test index equals 5 everytime
|
||||
// g_c = g^(c(x))
|
||||
let c_poly = DensePolynomial::<E::Fr>::rand(actual_degree, &mut rng);
|
||||
let (c_com, c_com_open) = KZG10::<E, _>::commit(&pp.poly_ck, &c_poly, None, None).unwrap();
|
||||
let _g_c = c_com.0;
|
||||
|
||||
let now = Instant::now();
|
||||
let q = multiple_open_naive(&c_poly, &c_com_open, &pp.poly_ck, actual_degree);
|
||||
println!("Multi naive computed in {:?}", now.elapsed());
|
||||
|
||||
let now = Instant::now();
|
||||
let q2 = KZGCommit::multiple_open(&c_poly, &pp.poly_ck, p);
|
||||
println!("Multi advanced computed in {:?}", now.elapsed());
|
||||
assert_eq!(q, q2);
|
||||
}
|
||||
}
|
||||
@@ -1,224 +0,0 @@
|
||||
/*
|
||||
This file includes backend tools:
|
||||
(1) read_line() is for taking inputs from the user
|
||||
(2) kzg_open_g1 is for opening KZG commitments
|
||||
(3) kzg_verify_g1 is for verifying KZG commitments
|
||||
(4) hash_caulk_single is for hashing group and field elements into a field element
|
||||
(5) random_field is for generating random field elements
|
||||
*/
|
||||
|
||||
use crate::{compute_h, dft_g1};
|
||||
use ark_ec::{msm::VariableBaseMSM, AffineCurve, PairingEngine, ProjectiveCurve};
|
||||
use ark_ff::{Field, PrimeField};
|
||||
use ark_poly::{univariate::DensePolynomial, Polynomial, UVPolynomial};
|
||||
use ark_poly::{EvaluationDomain, GeneralEvaluationDomain};
|
||||
use ark_poly_commit::kzg10::*;
|
||||
use ark_std::{end_timer, start_timer};
|
||||
use ark_std::{One, Zero};
|
||||
#[cfg(feature = "parallel")]
|
||||
use rayon::iter::{IntoParallelRefIterator, ParallelIterator};
|
||||
use std::marker::PhantomData;
|
||||
|
||||
////////////////////////////////////////////////
|
||||
//
|
||||
|
||||
//copied from arkworks
|
||||
fn convert_to_bigints<F: PrimeField>(p: &[F]) -> Vec<F::BigInt> {
|
||||
ark_std::cfg_iter!(p)
|
||||
.map(|s| s.into_repr())
|
||||
.collect::<Vec<_>>()
|
||||
}
|
||||
|
||||
/////////////////////////////////////////////////////////////////////
|
||||
// KZG opening and verifying
|
||||
/////////////////////////////////////////////////////////////////////
|
||||
|
||||
pub struct KZGCommit<E: PairingEngine> {
|
||||
phantom: PhantomData<E>,
|
||||
}
|
||||
|
||||
impl<E: PairingEngine> KZGCommit<E> {
|
||||
pub fn commit(powers: &Powers<E>, polynomial: &DensePolynomial<E::Fr>) -> E::G1Affine {
|
||||
let (com, _randomness) = KZG10::<E, _>::commit(powers, polynomial, None, None).unwrap();
|
||||
com.0
|
||||
}
|
||||
|
||||
// compute all openings to c_poly using a smart formula
|
||||
// This Code implements an algorithm for calculating n openings of a KZG vector commitment of size n in n log(n) time.
|
||||
// The algorithm is by Feist and Khovratovich.
|
||||
// It is useful for preprocessing.
|
||||
// The full algorithm is described here https://github.com/khovratovich/Kate/blob/master/Kate_amortized.pdf
|
||||
pub fn multiple_open(
|
||||
c_poly: &DensePolynomial<E::Fr>, //c(X)
|
||||
poly_ck: &Powers<E>, //SRS
|
||||
p: usize,
|
||||
) -> Vec<E::G1Affine> {
|
||||
let timer = start_timer!(|| "multiple open");
|
||||
|
||||
let degree = c_poly.coeffs.len() - 1;
|
||||
let input_domain: GeneralEvaluationDomain<E::Fr> = EvaluationDomain::new(degree).unwrap();
|
||||
|
||||
let h_timer = start_timer!(|| "compute h");
|
||||
let h2 = compute_h(c_poly, poly_ck, p);
|
||||
end_timer!(h_timer);
|
||||
|
||||
let dom_size = input_domain.size();
|
||||
assert_eq!(1 << p, dom_size);
|
||||
assert_eq!(degree + 1, dom_size);
|
||||
|
||||
let dft_timer = start_timer!(|| "G1 dft");
|
||||
let q2 = dft_g1::<E>(&h2, p);
|
||||
end_timer!(dft_timer);
|
||||
|
||||
let res = E::G1Projective::batch_normalization_into_affine(q2.as_ref());
|
||||
|
||||
end_timer!(timer);
|
||||
res
|
||||
}
|
||||
|
||||
/*
|
||||
KZG.Open( srs_KZG, f(X), deg, (alpha1, alpha2, ..., alphan) )
|
||||
returns ([f(alpha1), ..., f(alphan)], pi)
|
||||
Algorithm described in Section 4.6.1, Multiple Openings
|
||||
*/
|
||||
pub fn open_g1_batch(
|
||||
poly_ck: &Powers<E>,
|
||||
poly: &DensePolynomial<E::Fr>,
|
||||
max_deg: Option<&usize>,
|
||||
points: &[E::Fr],
|
||||
) -> (Vec<E::Fr>, E::G1Affine) {
|
||||
let mut evals = Vec::new();
|
||||
let mut proofs = Vec::new();
|
||||
for p in points.iter() {
|
||||
let (eval, pi) = Self::open_g1_single(poly_ck, poly, max_deg, p);
|
||||
evals.push(eval);
|
||||
proofs.push(pi);
|
||||
}
|
||||
|
||||
let mut res = E::G1Projective::zero(); //default value
|
||||
|
||||
for j in 0..points.len() {
|
||||
let w_j = points[j];
|
||||
//1. Computing coefficient [1/prod]
|
||||
let mut prod = E::Fr::one();
|
||||
for (k, p) in points.iter().enumerate() {
|
||||
if k != j {
|
||||
prod *= w_j - p;
|
||||
}
|
||||
}
|
||||
//2. Summation
|
||||
let q_add = proofs[j].mul(prod.inverse().unwrap()); //[1/prod]Q_{j}
|
||||
res += q_add;
|
||||
}
|
||||
|
||||
(evals, res.into_affine())
|
||||
}
|
||||
|
||||
//KZG.Open( srs_KZG, f(X), deg, alpha ) returns (f(alpha), pi)
|
||||
fn open_g1_single(
|
||||
poly_ck: &Powers<E>,
|
||||
poly: &DensePolynomial<E::Fr>,
|
||||
max_deg: Option<&usize>,
|
||||
point: &E::Fr,
|
||||
) -> (E::Fr, E::G1Affine) {
|
||||
let eval = poly.evaluate(point);
|
||||
|
||||
let global_max_deg = poly_ck.powers_of_g.len();
|
||||
|
||||
let mut d: usize = 0;
|
||||
if max_deg == None {
|
||||
d += global_max_deg;
|
||||
} else {
|
||||
d += max_deg.unwrap();
|
||||
}
|
||||
let divisor = DensePolynomial::from_coefficients_vec(vec![-*point, E::Fr::one()]);
|
||||
let witness_polynomial = poly / &divisor;
|
||||
|
||||
assert!(poly_ck.powers_of_g[(global_max_deg - d)..].len() >= witness_polynomial.len());
|
||||
let proof = VariableBaseMSM::multi_scalar_mul(
|
||||
&poly_ck.powers_of_g[(global_max_deg - d)..],
|
||||
convert_to_bigints(&witness_polynomial.coeffs).as_slice(),
|
||||
)
|
||||
.into_affine();
|
||||
(eval, proof)
|
||||
}
|
||||
|
||||
/*
|
||||
// KZG.Verify( srs_KZG, F, deg, (alpha1, alpha2, ..., alphan), (v1, ..., vn), pi )
|
||||
Algorithm described in Section 4.6.1, Multiple Openings
|
||||
*/
|
||||
pub fn verify_g1(
|
||||
// Verify that @c_com is a commitment to C(X) such that C(x)=z
|
||||
powers_of_g1: &[E::G1Affine], // generator of G1
|
||||
powers_of_g2: &[E::G2Affine], // [1]_2, [x]_2, [x^2]_2, ...
|
||||
c_com: &E::G1Affine, //commitment
|
||||
max_deg: Option<&usize>, // max degree
|
||||
points: &[E::Fr], // x such that eval = C(x)
|
||||
evals: &[E::Fr], //evaluation
|
||||
pi: &E::G1Affine, //proof
|
||||
) -> bool {
|
||||
// Interpolation set
|
||||
// tau_i(X) = lagrange_tau[i] = polynomial equal to 0 at point[j] for j!= i and 1 at points[i]
|
||||
|
||||
let mut lagrange_tau = DensePolynomial::from_coefficients_slice(&[E::Fr::zero()]);
|
||||
let mut prod = DensePolynomial::from_coefficients_slice(&[E::Fr::one()]);
|
||||
let mut components = vec![];
|
||||
for &p in points.iter() {
|
||||
let poly = DensePolynomial::from_coefficients_slice(&[-p, E::Fr::one()]);
|
||||
prod = &prod * (&poly);
|
||||
components.push(poly);
|
||||
}
|
||||
|
||||
for i in 0..points.len() {
|
||||
let mut temp = &prod / &components[i];
|
||||
let lagrange_scalar = temp.evaluate(&points[i]).inverse().unwrap() * evals[i];
|
||||
temp.coeffs.iter_mut().for_each(|x| *x *= lagrange_scalar);
|
||||
lagrange_tau = lagrange_tau + temp;
|
||||
}
|
||||
|
||||
// commit to sum evals[i] tau_i(X)
|
||||
assert!(
|
||||
powers_of_g1.len() >= lagrange_tau.len(),
|
||||
"KZG verifier doesn't have enough g1 powers"
|
||||
);
|
||||
let g1_tau = VariableBaseMSM::multi_scalar_mul(
|
||||
&powers_of_g1[..lagrange_tau.len()],
|
||||
convert_to_bigints(&lagrange_tau.coeffs).as_slice(),
|
||||
);
|
||||
|
||||
// vanishing polynomial
|
||||
let z_tau = prod;
|
||||
|
||||
// commit to z_tau(X) in g2
|
||||
assert!(
|
||||
powers_of_g2.len() >= z_tau.len(),
|
||||
"KZG verifier doesn't have enough g2 powers"
|
||||
);
|
||||
let g2_z_tau = VariableBaseMSM::multi_scalar_mul(
|
||||
&powers_of_g2[..z_tau.len()],
|
||||
convert_to_bigints(&z_tau.coeffs).as_slice(),
|
||||
);
|
||||
|
||||
let global_max_deg = powers_of_g1.len();
|
||||
|
||||
let mut d: usize = 0;
|
||||
if max_deg == None {
|
||||
d += global_max_deg;
|
||||
} else {
|
||||
d += max_deg.unwrap();
|
||||
}
|
||||
|
||||
let pairing_inputs = vec![
|
||||
(
|
||||
E::G1Prepared::from((g1_tau - c_com.into_projective()).into_affine()),
|
||||
E::G2Prepared::from(powers_of_g2[global_max_deg - d]),
|
||||
),
|
||||
(
|
||||
E::G1Prepared::from(*pi),
|
||||
E::G2Prepared::from(g2_z_tau.into_affine()),
|
||||
),
|
||||
];
|
||||
|
||||
E::product_of_pairings(pairing_inputs.iter()).is_one()
|
||||
}
|
||||
}
|
||||
@@ -1,28 +1,35 @@
|
||||
mod caulk_multi_lookup;
|
||||
mod caulk_multi_setup;
|
||||
mod caulk_multi_unity;
|
||||
mod multiopen;
|
||||
mod tools;
|
||||
|
||||
use crate::caulk_multi_lookup::{
|
||||
compute_lookup_proof, get_poly_and_g2_openings, verify_lookup_proof, LookupInstance,
|
||||
LookupProverInput,
|
||||
};
|
||||
use crate::caulk_multi_setup::setup_multi_lookup;
|
||||
use crate::tools::{random_field, read_line, KzgBls12_381};
|
||||
|
||||
use ark_bls12_381::{Fr, FrParameters};
|
||||
use ark_ff::Fp256;
|
||||
use ark_bls12_381::{Bls12_381, Fr};
|
||||
use ark_poly::{univariate::DensePolynomial, EvaluationDomain};
|
||||
use ark_poly_commit::{Polynomial, UVPolynomial};
|
||||
use ark_std::time::Instant;
|
||||
|
||||
use ark_std::{test_rng, time::Instant, UniformRand};
|
||||
use caulk::{
|
||||
multi::{
|
||||
compute_lookup_proof, get_poly_and_g2_openings, verify_lookup_proof, LookupInstance,
|
||||
LookupProverInput,
|
||||
},
|
||||
KZGCommit, PublicParameters,
|
||||
};
|
||||
use rand::Rng;
|
||||
use std::cmp::max;
|
||||
use std::{cmp::max, error::Error, io, str::FromStr};
|
||||
|
||||
// Function for reading inputs from the command line.
|
||||
fn read_line<T: FromStr>() -> T
|
||||
where
|
||||
<T as FromStr>::Err: Error + 'static,
|
||||
{
|
||||
let mut input = String::new();
|
||||
io::stdin()
|
||||
.read_line(&mut input)
|
||||
.expect("Failed to get console input.");
|
||||
let output: T = input.trim().parse().expect("Console input is invalid.");
|
||||
output
|
||||
}
|
||||
|
||||
#[allow(non_snake_case)]
|
||||
fn main() {
|
||||
//1. Setup
|
||||
let mut rng = test_rng();
|
||||
|
||||
// 1. Setup
|
||||
// setting public parameters
|
||||
// current kzg setup should be changed with output from a setup ceremony
|
||||
println!("What is the bitsize of the degree of the polynomial inside the commitment? ");
|
||||
@@ -33,39 +40,39 @@ fn main() {
|
||||
let N: usize = 1 << n;
|
||||
let powers_size: usize = max(N + 2, 1024);
|
||||
let actual_degree = N - 1;
|
||||
let temp_m = n; //dummy
|
||||
let temp_m = n; // dummy
|
||||
|
||||
let now = Instant::now();
|
||||
let mut pp = setup_multi_lookup(&powers_size, &N, &temp_m, &n);
|
||||
let mut pp = PublicParameters::<Bls12_381>::setup(&powers_size, &N, &temp_m, &n);
|
||||
println!(
|
||||
"Time to setup multi openings of table size {:?} = {:?}",
|
||||
actual_degree + 1,
|
||||
now.elapsed()
|
||||
);
|
||||
|
||||
//2. Poly and openings
|
||||
// 2. Poly and openings
|
||||
let now = Instant::now();
|
||||
let table = get_poly_and_g2_openings(&pp, actual_degree);
|
||||
println!("Time to generate commitment table = {:?}", now.elapsed());
|
||||
|
||||
//3. Setup
|
||||
// 3. Setup
|
||||
|
||||
pp.regenerate_lookup_params(m);
|
||||
|
||||
//4. Positions
|
||||
let mut rng = rand::thread_rng();
|
||||
// 4. Positions
|
||||
// let mut rng = rand::thread_rng();
|
||||
let mut positions: Vec<usize> = vec![];
|
||||
for _ in 0..m {
|
||||
//generate positions randomly in the set
|
||||
//let i_j: usize = j*(actual_degree/m);
|
||||
let i_j: usize = rng.gen_range(0, actual_degree);
|
||||
// generate positions randomly in the set
|
||||
// let i_j: usize = j*(actual_degree/m);
|
||||
let i_j: usize = rng.gen_range(0..actual_degree);
|
||||
positions.push(i_j);
|
||||
}
|
||||
|
||||
println!("positions = {:?}", positions);
|
||||
|
||||
//5. generating phi
|
||||
let blinder: Fp256<FrParameters> = random_field::<Fr>();
|
||||
// 5. generating phi
|
||||
let blinder = Fr::rand(&mut rng);
|
||||
let a_m = DensePolynomial::from_coefficients_slice(&[blinder]);
|
||||
let mut phi_poly = a_m.mul_by_vanishing_poly(pp.domain_m);
|
||||
let c_poly_local = table.c_poly.clone();
|
||||
@@ -73,39 +80,36 @@ fn main() {
|
||||
for j in 0..m {
|
||||
phi_poly = &phi_poly
|
||||
+ &(&pp.lagrange_polynomials_m[j]
|
||||
* c_poly_local.evaluate(&pp.domain_N.element(positions[j]))); //adding c(w^{i_j})*mu_j(X)
|
||||
* c_poly_local.evaluate(&pp.domain_N.element(positions[j]))); // adding c(w^{i_j})*mu_j(X)
|
||||
}
|
||||
|
||||
for j in m..pp.domain_m.size() {
|
||||
phi_poly = &phi_poly
|
||||
+ &(&pp.lagrange_polynomials_m[j] * c_poly_local.evaluate(&pp.domain_N.element(0)));
|
||||
//adding c(w^{i_j})*mu_j(X)
|
||||
// adding c(w^{i_j})*mu_j(X)
|
||||
}
|
||||
|
||||
//6. Running proofs
|
||||
// 6. Running proofs
|
||||
let now = Instant::now();
|
||||
let (c_com, _) = KzgBls12_381::commit(&pp.poly_ck, &table.c_poly, None, None).unwrap();
|
||||
let (phi_com, _) = KzgBls12_381::commit(&pp.poly_ck, &phi_poly, None, None).unwrap();
|
||||
let c_com = KZGCommit::<Bls12_381>::commit_g1(&pp.poly_ck, &table.c_poly);
|
||||
let phi_com = KZGCommit::<Bls12_381>::commit_g1(&pp.poly_ck, &phi_poly);
|
||||
println!("Time to generate inputs = {:?}", now.elapsed());
|
||||
|
||||
let lookup_instance = LookupInstance {
|
||||
c_com: c_com.0.clone(),
|
||||
phi_com: phi_com.0.clone(),
|
||||
};
|
||||
let lookup_instance = LookupInstance { c_com, phi_com };
|
||||
|
||||
let prover_input = LookupProverInput {
|
||||
c_poly: table.c_poly.clone(),
|
||||
phi_poly: phi_poly,
|
||||
positions: positions,
|
||||
phi_poly,
|
||||
positions,
|
||||
openings: table.openings.clone(),
|
||||
};
|
||||
|
||||
println!("We are now ready to run the prover. How many times should we run it?");
|
||||
let number_of_openings: usize = read_line();
|
||||
let now = Instant::now();
|
||||
let (proof, unity_proof) = compute_lookup_proof(&lookup_instance, &prover_input, &pp);
|
||||
let (proof, unity_proof) = compute_lookup_proof(&lookup_instance, &prover_input, &pp, &mut rng);
|
||||
for _ in 1..number_of_openings {
|
||||
_ = compute_lookup_proof(&lookup_instance, &prover_input, &pp);
|
||||
_ = compute_lookup_proof(&lookup_instance, &prover_input, &pp, &mut rng);
|
||||
}
|
||||
println!(
|
||||
"Time to evaluate {} times {} multi-openings of table size {:?} = {:?} ",
|
||||
@@ -117,7 +121,7 @@ fn main() {
|
||||
|
||||
let now = Instant::now();
|
||||
for _ in 0..number_of_openings {
|
||||
verify_lookup_proof(table.c_com, phi_com.0, &proof, &unity_proof, &pp);
|
||||
verify_lookup_proof(&table.c_com, &phi_com, &proof, &unity_proof, &pp);
|
||||
}
|
||||
println!(
|
||||
"Time to verify {} times {} multi-openings of table size {:?} = {:?} ",
|
||||
@@ -128,7 +132,7 @@ fn main() {
|
||||
);
|
||||
|
||||
assert!(
|
||||
verify_lookup_proof(table.c_com, phi_com.0, &proof, &unity_proof, &pp),
|
||||
verify_lookup_proof(&table.c_com, &phi_com, &proof, &unity_proof, &pp),
|
||||
"Result does not verify"
|
||||
);
|
||||
}
|
||||
@@ -1,16 +1,15 @@
|
||||
use ark_bls12_381::{Bls12_381, Fr, G1Affine};
|
||||
use ark_ec::{AffineCurve, ProjectiveCurve};
|
||||
use ark_poly::univariate::DensePolynomial;
|
||||
use ark_poly::{EvaluationDomain, GeneralEvaluationDomain, Polynomial, UVPolynomial};
|
||||
use ark_poly::{
|
||||
univariate::DensePolynomial, EvaluationDomain, GeneralEvaluationDomain, Polynomial,
|
||||
UVPolynomial,
|
||||
};
|
||||
use ark_poly_commit::kzg10::KZG10;
|
||||
use ark_std::test_rng;
|
||||
use ark_std::UniformRand;
|
||||
use caulk_single_opening::caulk_single_setup;
|
||||
use caulk_single_opening::CaulkTranscript;
|
||||
use caulk_single_opening::KZGCommit;
|
||||
use caulk_single_opening::{caulk_single_prove, caulk_single_verify};
|
||||
use std::time::Instant;
|
||||
use std::{error::Error, io, str::FromStr};
|
||||
use ark_std::{test_rng, UniformRand};
|
||||
use caulk::{
|
||||
caulk_single_prove, caulk_single_setup, caulk_single_verify, CaulkTranscript, KZGCommit,
|
||||
};
|
||||
use std::{error::Error, io, str::FromStr, time::Instant};
|
||||
|
||||
type UniPoly381 = DensePolynomial<Fr>;
|
||||
type KzgBls12_381 = KZG10<Bls12_381, UniPoly381>;
|
||||
@@ -48,7 +47,7 @@ fn main() {
|
||||
now.elapsed()
|
||||
);
|
||||
|
||||
//polynomial and commitment
|
||||
// polynomial and commitment
|
||||
let now = Instant::now();
|
||||
// deterministic randomness. Should never be used in practice.
|
||||
let c_poly = UniPoly381::rand(actual_degree, &mut rng);
|
||||
@@ -60,7 +59,7 @@ fn main() {
|
||||
now.elapsed()
|
||||
);
|
||||
|
||||
//point at which we will open c_com
|
||||
// point at which we will open c_com
|
||||
let input_domain: GeneralEvaluationDomain<Fr> = EvaluationDomain::new(actual_degree).unwrap();
|
||||
println!("Which position in the vector should we open at? ");
|
||||
let position: usize = read_line();
|
||||
@@ -71,7 +70,7 @@ fn main() {
|
||||
);
|
||||
let omega_i = input_domain.element(position);
|
||||
|
||||
//Deciding whether to open all positions or just the one position.
|
||||
// Deciding whether to open all positions or just the one position.
|
||||
println!("Should we open all possible positions? Opening all possible positions is slow. Please input either YES or NO" );
|
||||
let open_all: String = read_line();
|
||||
|
||||
@@ -92,9 +91,10 @@ fn main() {
|
||||
"Console input is invalid"
|
||||
);
|
||||
|
||||
//compute all openings
|
||||
// compute all openings
|
||||
let now = Instant::now();
|
||||
let g1_qs = KZGCommit::multiple_open(&c_poly, &pp.poly_ck, p);
|
||||
let g1_qs =
|
||||
KZGCommit::<Bls12_381>::multiple_open::<G1Affine>(&c_poly, &pp.poly_ck.powers_of_g, p);
|
||||
g1_q = g1_qs[position];
|
||||
println!("Time to compute all KZG openings {:?}", now.elapsed());
|
||||
}
|
||||
9
rustfmt.toml
Normal file
9
rustfmt.toml
Normal file
@@ -0,0 +1,9 @@
|
||||
reorder_imports = true
|
||||
wrap_comments = true
|
||||
normalize_comments = true
|
||||
use_try_shorthand = true
|
||||
match_block_trailing_comma = true
|
||||
use_field_init_shorthand = true
|
||||
edition = "2018"
|
||||
condense_wildcard_suffixes = true
|
||||
imports_granularity = "Crate"
|
||||
246
src/dft.rs
Normal file
246
src/dft.rs
Normal file
@@ -0,0 +1,246 @@
|
||||
// This file includes an algorithm for calculating n openings of a KZG vector
|
||||
// commitment of size n in n log(n) time. The algorithm is by Feist and
|
||||
// khovratovich. It is useful for preprocessing.
|
||||
// The full algorithm is described here https://github.com/khovratovich/Kate/blob/master/Kate_amortized.pdf
|
||||
|
||||
use ark_ec::ProjectiveCurve;
|
||||
use ark_ff::PrimeField;
|
||||
use ark_poly::{
|
||||
univariate::DensePolynomial, EvaluationDomain, GeneralEvaluationDomain, UVPolynomial,
|
||||
};
|
||||
use std::vec::Vec;
|
||||
|
||||
// compute all pre-proofs using DFT
|
||||
// h_i= c_d[x^{d-i-1}]+c_{d-1}[x^{d-i-2}]+c_{d-2}[x^{d-i-3}]+\cdots +
|
||||
// c_{i+2}[x]+c_{i+1}[1]
|
||||
pub fn compute_h<F, G>(
|
||||
c_poly: &DensePolynomial<F>, /* c(X) degree up to d<2^p , i.e. c_poly has at most d+1 coeffs
|
||||
* non-zero */
|
||||
powers: &[G], // SRS
|
||||
p: usize,
|
||||
) -> Vec<G>
|
||||
where
|
||||
F: PrimeField,
|
||||
G: ProjectiveCurve,
|
||||
{
|
||||
let mut coeffs = c_poly.coeffs().to_vec();
|
||||
let dom_size = 1 << p;
|
||||
let fpzero = F::zero();
|
||||
coeffs.resize(dom_size, fpzero);
|
||||
|
||||
// let now = Instant::now();
|
||||
// 1. x_ext = [[x^(d-1)], [x^{d-2},...,[x],[1], d+2 [0]'s]
|
||||
let mut x_ext: Vec<G> = powers.iter().take(dom_size - 1).rev().map(|&x| x).collect();
|
||||
x_ext.resize(2 * dom_size, G::zero()); // filling 2d+2 neutral elements
|
||||
|
||||
let y = group_dft::<F, G>(&x_ext, p + 1);
|
||||
// println!("Step 1 computed in {:?}", now.elapsed());
|
||||
|
||||
// 2. c_ext = [c_d, d zeroes, c_d,c_{0},c_1,...,c_{d-2},c_{d-1}]
|
||||
// let now = Instant::now();
|
||||
let mut c_ext = vec![coeffs[coeffs.len() - 1]];
|
||||
c_ext.resize(dom_size, fpzero);
|
||||
c_ext.push(coeffs[coeffs.len() - 1]);
|
||||
for &e in coeffs.iter().take(coeffs.len() - 1) {
|
||||
c_ext.push(e);
|
||||
}
|
||||
assert_eq!(c_ext.len(), 2 * dom_size);
|
||||
let v = field_dft::<F>(&c_ext, p + 1);
|
||||
// println!("Step 2 computed in {:?}", now.elapsed());
|
||||
|
||||
// 3. u = y o v
|
||||
|
||||
// let now = Instant::now();
|
||||
let u: Vec<_> = y
|
||||
.into_iter()
|
||||
.zip(v.into_iter())
|
||||
.map(|(a, b)| a.mul(b.into_repr()))
|
||||
.collect();
|
||||
// println!("Step 3 computed in {:?}", now.elapsed());
|
||||
|
||||
// 4. h_ext = idft_{2d+2}(u)
|
||||
// let now = Instant::now();
|
||||
let h_ext = group_inv_dft::<F, G>(&u, p + 1);
|
||||
// println!("Step 4 computed in {:?}", now.elapsed());
|
||||
|
||||
h_ext[0..dom_size].to_vec()
|
||||
}
|
||||
|
||||
// compute DFT of size @dom_size over vector of Fr elements
|
||||
// q_i = h_0 + h_1w^i + h_2w^{2i}+\cdots + h_{dom_size-1}w^{(dom_size-1)i} for
|
||||
// 0<= i< dom_size=2^p
|
||||
pub fn group_dft<F, G>(h: &[G], p: usize) -> Vec<G>
|
||||
where
|
||||
F: PrimeField,
|
||||
G: ProjectiveCurve,
|
||||
{
|
||||
let dom_size = 1 << p;
|
||||
assert_eq!(h.len(), dom_size); // we do not support inputs of size not power of 2
|
||||
let input_domain: GeneralEvaluationDomain<F> = EvaluationDomain::new(dom_size).unwrap();
|
||||
let mut l = dom_size / 2;
|
||||
let mut m: usize = 1;
|
||||
// Stockham FFT
|
||||
let mut xvec = h.to_vec();
|
||||
for _ in 0..p {
|
||||
let mut xt = xvec.clone();
|
||||
for j in 0..l {
|
||||
for k in 0..m {
|
||||
let c0 = xvec[k + j * m];
|
||||
let c1 = xvec[k + j * m + l * m];
|
||||
xt[k + 2 * j * m] = c0 + c1;
|
||||
let wj_2l = input_domain.element((j * dom_size / (2 * l)) % dom_size);
|
||||
xt[k + 2 * j * m + m] = (c0 - c1).mul(wj_2l.into_repr());
|
||||
}
|
||||
}
|
||||
l /= 2;
|
||||
m *= 2;
|
||||
xvec = xt;
|
||||
}
|
||||
xvec
|
||||
}
|
||||
|
||||
// compute DFT of size @dom_size over vector of Fr elements
|
||||
// q_i = h_0 + h_1w^i + h_2w^{2i}+\cdots + h_{dom_size-1}w^{(dom_size-1)i} for
|
||||
// 0<= i< dom_size=2^p
|
||||
pub fn field_dft<F: PrimeField>(h: &[F], p: usize) -> Vec<F> {
|
||||
let dom_size = 1 << p;
|
||||
assert_eq!(h.len(), dom_size); // we do not support inputs of size not power of 2
|
||||
let input_domain: GeneralEvaluationDomain<F> = EvaluationDomain::new(dom_size).unwrap();
|
||||
let mut l = dom_size / 2;
|
||||
let mut m: usize = 1;
|
||||
// Stockham FFT
|
||||
let mut xvec = h.to_vec();
|
||||
for _ in 0..p {
|
||||
let mut xt = xvec.clone();
|
||||
for j in 0..l {
|
||||
for k in 0..m {
|
||||
let c0 = xvec[k + j * m];
|
||||
let c1 = xvec[k + j * m + l * m];
|
||||
xt[k + 2 * j * m] = c0 + c1;
|
||||
let wj_2l = input_domain.element((j * dom_size / (2 * l)) % dom_size);
|
||||
xt[k + 2 * j * m + m] = (c0 - c1) * (wj_2l);
|
||||
}
|
||||
}
|
||||
l /= 2;
|
||||
m *= 2;
|
||||
xvec = xt;
|
||||
}
|
||||
xvec
|
||||
}
|
||||
|
||||
// compute idft of size @dom_size over vector of G1 elements
|
||||
// q_i = (h_0 + h_1w^-i + h_2w^{-2i}+\cdots +
|
||||
// h_{dom_size-1}w^{-(dom_size-1)i})/dom_size for 0<= i< dom_size=2^p
|
||||
pub fn group_inv_dft<F, G>(h: &[G], p: usize) -> Vec<G>
|
||||
where
|
||||
F: PrimeField,
|
||||
G: ProjectiveCurve,
|
||||
{
|
||||
let dom_size = 1 << p;
|
||||
assert_eq!(h.len(), dom_size); // we do not support inputs of size not power of 2
|
||||
let input_domain: GeneralEvaluationDomain<F> = EvaluationDomain::new(dom_size).unwrap();
|
||||
let mut l = dom_size / 2;
|
||||
let mut m: usize = 1;
|
||||
let mut dom_fr = F::one();
|
||||
// Stockham FFT
|
||||
let mut xvec = h.to_vec();
|
||||
for _ in 0..p {
|
||||
let mut xt = xvec.clone();
|
||||
for j in 0..l {
|
||||
for k in 0..m {
|
||||
let c0 = xvec[k + j * m];
|
||||
let c1 = xvec[k + j * m + l * m];
|
||||
xt[k + 2 * j * m] = c0 + c1;
|
||||
let wj_2l = input_domain
|
||||
.element((dom_size - (j * dom_size / (2 * l)) % dom_size) % dom_size);
|
||||
xt[k + 2 * j * m + m] = (c0 - c1).mul(wj_2l.into_repr()); // Difference #1 to forward DFT
|
||||
}
|
||||
}
|
||||
l /= 2;
|
||||
m *= 2;
|
||||
dom_fr = dom_fr + dom_fr;
|
||||
xvec = xt;
|
||||
}
|
||||
|
||||
let domain_inverse = dom_fr.inverse().unwrap().into_repr();
|
||||
|
||||
xvec.iter().map(|x| x.mul(domain_inverse)).collect()
|
||||
}
|
||||
|
||||
// compute idft of size @dom_size over vector of G1 elements
|
||||
// q_i = (h_0 + h_1w^-i + h_2w^{-2i}+\cdots +
|
||||
// h_{dom_size-1}w^{-(dom_size-1)i})/dom_size for 0<= i< dom_size=2^p
|
||||
pub fn field_inv_dft<F: PrimeField>(h: &[F], p: usize) -> Vec<F> {
|
||||
let dom_size = 1 << p;
|
||||
assert_eq!(h.len(), dom_size); // we do not support inputs of size not power of 2
|
||||
let input_domain: GeneralEvaluationDomain<F> = EvaluationDomain::new(dom_size).unwrap();
|
||||
let mut l = dom_size / 2;
|
||||
let mut m: usize = 1;
|
||||
let mut dom_fr = F::one();
|
||||
// Stockham FFT
|
||||
let mut xvec = h.to_vec();
|
||||
for _ in 0..p {
|
||||
let mut xt = xvec.clone();
|
||||
for j in 0..l {
|
||||
for k in 0..m {
|
||||
let c0 = xvec[k + j * m];
|
||||
let c1 = xvec[k + j * m + l * m];
|
||||
xt[k + 2 * j * m] = c0 + c1;
|
||||
let wj_2l = input_domain
|
||||
.element((dom_size - (j * dom_size / (2 * l)) % dom_size) % dom_size);
|
||||
xt[k + 2 * j * m + m] = (c0 - c1) * wj_2l; // Difference #1 to
|
||||
// forward DFT
|
||||
}
|
||||
}
|
||||
l /= 2;
|
||||
m *= 2;
|
||||
dom_fr = dom_fr + dom_fr;
|
||||
xvec = xt;
|
||||
}
|
||||
|
||||
let domain_inverse = dom_fr.inverse().unwrap();
|
||||
|
||||
xvec.iter().map(|&x| x * domain_inverse).collect()
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub mod tests {
|
||||
use super::*;
|
||||
use ark_bls12_377::Bls12_377;
|
||||
use ark_bls12_381::Bls12_381;
|
||||
use ark_ec::PairingEngine;
|
||||
use ark_std::{test_rng, UniformRand};
|
||||
|
||||
#[test]
|
||||
fn test_dft() {
|
||||
test_dft_helper::<Bls12_381>();
|
||||
test_dft_helper::<Bls12_377>();
|
||||
}
|
||||
|
||||
fn test_dft_helper<E: PairingEngine>() {
|
||||
let mut rng = test_rng();
|
||||
for i in 2..6 {
|
||||
let size = 1 << i;
|
||||
|
||||
let h: Vec<E::Fr> = (0..size).map(|_| E::Fr::rand(&mut rng)).collect();
|
||||
|
||||
let c_dft = field_dft::<E::Fr>(&h, i);
|
||||
let c_back = field_inv_dft::<E::Fr>(&c_dft, i);
|
||||
assert_eq!(h, c_back);
|
||||
|
||||
let h: Vec<E::G1Projective> =
|
||||
(0..size).map(|_| E::G1Projective::rand(&mut rng)).collect();
|
||||
|
||||
let c_dft = group_dft::<E::Fr, E::G1Projective>(&h, i);
|
||||
let c_back = group_inv_dft::<E::Fr, E::G1Projective>(&c_dft, i);
|
||||
assert_eq!(h, c_back);
|
||||
|
||||
let h: Vec<E::G2Projective> =
|
||||
(0..size).map(|_| E::G2Projective::rand(&mut rng)).collect();
|
||||
|
||||
let c_dft = group_dft::<E::Fr, E::G2Projective>(&h, i);
|
||||
let c_back = group_inv_dft::<E::Fr, E::G2Projective>(&c_dft, i);
|
||||
assert_eq!(h, c_back);
|
||||
}
|
||||
}
|
||||
}
|
||||
698
src/kzg.rs
Normal file
698
src/kzg.rs
Normal file
@@ -0,0 +1,698 @@
|
||||
// This file includes backend tools:
|
||||
// (1) read_line() is for taking inputs from the user
|
||||
// (2) kzg_open_g1 is for opening KZG commitments
|
||||
// (3) kzg_verify_g1 is for verifying KZG commitments
|
||||
// (4) hash_caulk_single is for hashing group and field elements into a field
|
||||
// element (5) random_field is for generating random field elements
|
||||
|
||||
use crate::{compute_h, group_dft, util::convert_to_bigints};
|
||||
use ark_ec::{msm::VariableBaseMSM, AffineCurve, PairingEngine, ProjectiveCurve};
|
||||
use ark_ff::Field;
|
||||
use ark_poly::{
|
||||
univariate::DensePolynomial, EvaluationDomain, GeneralEvaluationDomain, Polynomial,
|
||||
UVPolynomial,
|
||||
};
|
||||
use ark_poly_commit::kzg10::*;
|
||||
use ark_std::{end_timer, start_timer, One, Zero};
|
||||
#[cfg(feature = "parallel")]
|
||||
use rayon::iter::{IntoParallelRefIterator, ParallelIterator};
|
||||
use std::marker::PhantomData;
|
||||
|
||||
/////////////////////////////////////////////////////////////////////
|
||||
// KZG opening and verifying
|
||||
/////////////////////////////////////////////////////////////////////
|
||||
|
||||
pub struct KZGCommit<E: PairingEngine> {
|
||||
phantom: PhantomData<E>,
|
||||
}
|
||||
|
||||
impl<E: PairingEngine> KZGCommit<E> {
|
||||
pub fn commit_g1(powers: &Powers<E>, polynomial: &DensePolynomial<E::Fr>) -> E::G1Affine {
|
||||
let (com, _randomness) = KZG10::<E, _>::commit(powers, polynomial, None, None).unwrap();
|
||||
com.0
|
||||
}
|
||||
|
||||
pub fn commit_g2(g2_powers: &[E::G2Affine], poly: &DensePolynomial<E::Fr>) -> E::G2Affine {
|
||||
// todo:MSM?
|
||||
let mut res = g2_powers[0].mul(poly[0]);
|
||||
for i in 1..poly.len() {
|
||||
res = res + g2_powers[i].mul(poly[i])
|
||||
}
|
||||
res.into_affine()
|
||||
}
|
||||
|
||||
// Function to commit to f(X,Y)
|
||||
// here f = [ [a0, a1, a2], [b1, b2, b3] ] represents (a0 + a1 Y + a2 Y^2 ) + X
|
||||
// (b1 + b2 Y + b3 Y^2)
|
||||
//
|
||||
// First we unwrap to get a vector of form [a0, a1, a2, b0, b1, b2]
|
||||
// Then we commit to f as a commitment to f'(X) = a0 + a1 X + a2 X^2 + b0 X^3 +
|
||||
// b1 X^4 + b2 X^5
|
||||
//
|
||||
// We also need to know the maximum degree of (a0 + a1 Y + a2 Y^2 ) to prevent
|
||||
// overflow errors.
|
||||
//
|
||||
// This is described in Section 4.6.2
|
||||
pub fn bipoly_commit(
|
||||
pp: &crate::multi::PublicParameters<E>,
|
||||
poly: &Vec<DensePolynomial<E::Fr>>,
|
||||
deg_x: usize,
|
||||
) -> E::G1Affine {
|
||||
let mut poly_formatted = Vec::new();
|
||||
|
||||
for i in 0..poly.len() {
|
||||
let temp = convert_to_bigints(&poly[i].coeffs);
|
||||
for j in 0..poly[i].len() {
|
||||
poly_formatted.push(temp[j]);
|
||||
}
|
||||
let temp = convert_to_bigints(&[E::Fr::zero()].to_vec())[0];
|
||||
for _ in poly[i].len()..deg_x {
|
||||
poly_formatted.push(temp);
|
||||
}
|
||||
}
|
||||
|
||||
assert!(pp.poly_ck.powers_of_g.len() >= poly_formatted.len());
|
||||
let g1_poly =
|
||||
VariableBaseMSM::multi_scalar_mul(&pp.poly_ck.powers_of_g, poly_formatted.as_slice())
|
||||
.into_affine();
|
||||
|
||||
g1_poly
|
||||
}
|
||||
|
||||
// compute all openings to c_poly using a smart formula
|
||||
// This Code implements an algorithm for calculating n openings of a KZG vector
|
||||
// commitment of size n in n log(n) time. The algorithm is by Feist and
|
||||
// Khovratovich. It is useful for preprocessing.
|
||||
// The full algorithm is described here https://github.com/khovratovich/Kate/blob/master/Kate_amortized.pdf
|
||||
pub fn multiple_open<G>(
|
||||
c_poly: &DensePolynomial<E::Fr>, // c(X)
|
||||
powers: &[G], // SRS
|
||||
p: usize,
|
||||
) -> Vec<G>
|
||||
where
|
||||
G: AffineCurve<ScalarField = E::Fr> + Sized,
|
||||
{
|
||||
let timer = start_timer!(|| "multiple open");
|
||||
|
||||
let degree = c_poly.coeffs.len() - 1;
|
||||
let input_domain: GeneralEvaluationDomain<E::Fr> = EvaluationDomain::new(degree).unwrap();
|
||||
|
||||
let h_timer = start_timer!(|| "compute h");
|
||||
let powers: Vec<G::Projective> = powers.iter().map(|x| x.into_projective()).collect();
|
||||
let h2 = compute_h(c_poly, &powers, p);
|
||||
end_timer!(h_timer);
|
||||
|
||||
let dom_size = input_domain.size();
|
||||
assert_eq!(1 << p, dom_size);
|
||||
assert_eq!(degree + 1, dom_size);
|
||||
|
||||
let dft_timer = start_timer!(|| "G1 dft");
|
||||
let q2 = group_dft::<E::Fr, G::Projective>(&h2, p);
|
||||
end_timer!(dft_timer);
|
||||
|
||||
let res = G::Projective::batch_normalization_into_affine(q2.as_ref());
|
||||
|
||||
end_timer!(timer);
|
||||
res
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////
|
||||
// KZG.Open( srs_KZG, f(X, Y), deg, alpha )
|
||||
// returns ([f(alpha, x)]_1, pi)
|
||||
// Algorithm described in Section 4.6.2, KZG for Bivariate Polynomials
|
||||
pub fn partial_open_g1(
|
||||
pp: &crate::multi::PublicParameters<E>,
|
||||
poly: &[DensePolynomial<E::Fr>],
|
||||
deg_x: usize,
|
||||
point: &E::Fr,
|
||||
) -> (E::G1Affine, E::G1Affine, DensePolynomial<E::Fr>) {
|
||||
let mut poly_partial_eval = DensePolynomial::from_coefficients_vec(vec![E::Fr::zero()]);
|
||||
let mut alpha = E::Fr::one();
|
||||
for i in 0..poly.len() {
|
||||
let pow_alpha = DensePolynomial::from_coefficients_vec(vec![alpha.clone()]);
|
||||
poly_partial_eval = poly_partial_eval + &pow_alpha * &poly[i];
|
||||
alpha = alpha * point;
|
||||
}
|
||||
|
||||
let eval = VariableBaseMSM::multi_scalar_mul(
|
||||
&pp.poly_ck.powers_of_g,
|
||||
convert_to_bigints(&poly_partial_eval.coeffs).as_slice(),
|
||||
)
|
||||
.into_affine();
|
||||
|
||||
let mut witness_bipolynomial = Vec::new();
|
||||
let poly_reverse: Vec<_> = poly.into_iter().rev().collect();
|
||||
witness_bipolynomial.push(poly_reverse[0].clone());
|
||||
|
||||
let alpha = DensePolynomial::from_coefficients_vec(vec![point.clone()]);
|
||||
for i in 1..(poly_reverse.len() - 1) {
|
||||
witness_bipolynomial
|
||||
.push(poly_reverse[i].clone() + &alpha * &witness_bipolynomial[i - 1]);
|
||||
}
|
||||
|
||||
witness_bipolynomial.reverse();
|
||||
|
||||
let proof = Self::bipoly_commit(pp, &witness_bipolynomial, deg_x);
|
||||
|
||||
(eval, proof, poly_partial_eval)
|
||||
}
|
||||
|
||||
// KZG.Open( srs_KZG, f(X), deg, (alpha1, alpha2, ..., alphan) )
|
||||
// returns ([f(alpha1), ..., f(alphan)], pi)
|
||||
// Algorithm described in Section 4.6.1, Multiple Openings
|
||||
pub fn open_g1_batch(
|
||||
poly_ck: &Powers<E>,
|
||||
poly: &DensePolynomial<E::Fr>,
|
||||
max_deg: Option<&usize>,
|
||||
points: &[E::Fr],
|
||||
) -> (Vec<E::Fr>, E::G1Affine) {
|
||||
let mut evals = Vec::new();
|
||||
let mut proofs = Vec::new();
|
||||
for p in points.iter() {
|
||||
let (eval, pi) = Self::open_g1_single(poly_ck, poly, max_deg, p);
|
||||
evals.push(eval);
|
||||
proofs.push(pi);
|
||||
}
|
||||
|
||||
let mut res = E::G1Projective::zero(); // default value
|
||||
|
||||
for j in 0..points.len() {
|
||||
let w_j = points[j];
|
||||
// 1. Computing coefficient [1/prod]
|
||||
let mut prod = E::Fr::one();
|
||||
for (k, p) in points.iter().enumerate() {
|
||||
if k != j {
|
||||
prod *= w_j - p;
|
||||
}
|
||||
}
|
||||
// 2. Summation
|
||||
let q_add = proofs[j].mul(prod.inverse().unwrap()); //[1/prod]Q_{j}
|
||||
res += q_add;
|
||||
}
|
||||
|
||||
(evals, res.into_affine())
|
||||
}
|
||||
|
||||
// KZG.Open( srs_KZG, f(X), deg, alpha ) returns (f(alpha), pi)
|
||||
fn open_g1_single(
|
||||
poly_ck: &Powers<E>,
|
||||
poly: &DensePolynomial<E::Fr>,
|
||||
max_deg: Option<&usize>,
|
||||
point: &E::Fr,
|
||||
) -> (E::Fr, E::G1Affine) {
|
||||
let eval = poly.evaluate(point);
|
||||
|
||||
let global_max_deg = poly_ck.powers_of_g.len();
|
||||
|
||||
let mut d: usize = 0;
|
||||
if max_deg == None {
|
||||
d += global_max_deg;
|
||||
} else {
|
||||
d += max_deg.unwrap();
|
||||
}
|
||||
let divisor = DensePolynomial::from_coefficients_vec(vec![-*point, E::Fr::one()]);
|
||||
let witness_polynomial = poly / &divisor;
|
||||
|
||||
assert!(poly_ck.powers_of_g[(global_max_deg - d)..].len() >= witness_polynomial.len());
|
||||
let proof = VariableBaseMSM::multi_scalar_mul(
|
||||
&poly_ck.powers_of_g[(global_max_deg - d)..],
|
||||
convert_to_bigints(&witness_polynomial.coeffs).as_slice(),
|
||||
)
|
||||
.into_affine();
|
||||
(eval, proof)
|
||||
}
|
||||
|
||||
// KZG.Verify( srs_KZG, F, deg, (alpha1, alpha2, ..., alphan), (v1, ..., vn), pi
|
||||
// ) Algorithm described in Section 4.6.1, Multiple Openings
|
||||
pub fn verify_g1(
|
||||
// TODO: parameters struct
|
||||
// Verify that @c_com is a commitment to C(X) such that C(x)=z
|
||||
powers_of_g1: &[E::G1Affine], // generator of G1
|
||||
powers_of_g2: &[E::G2Affine], // [1]_2, [x]_2, [x^2]_2, ...
|
||||
c_com: &E::G1Affine, // commitment
|
||||
max_deg: Option<&usize>, // max degree
|
||||
points: &[E::Fr], // x such that eval = C(x)
|
||||
evals: &[E::Fr], // evaluation
|
||||
pi: &E::G1Affine, // proof
|
||||
) -> bool {
|
||||
// Interpolation set
|
||||
// tau_i(X) = lagrange_tau[i] = polynomial equal to 0 at point[j] for j!= i and
|
||||
// 1 at points[i]
|
||||
|
||||
let mut lagrange_tau = DensePolynomial::from_coefficients_slice(&[E::Fr::zero()]);
|
||||
let mut prod = DensePolynomial::from_coefficients_slice(&[E::Fr::one()]);
|
||||
let mut components = vec![];
|
||||
for &p in points.iter() {
|
||||
let poly = DensePolynomial::from_coefficients_slice(&[-p, E::Fr::one()]);
|
||||
prod = &prod * (&poly);
|
||||
components.push(poly);
|
||||
}
|
||||
|
||||
for i in 0..points.len() {
|
||||
let mut temp = &prod / &components[i];
|
||||
let lagrange_scalar = temp.evaluate(&points[i]).inverse().unwrap() * evals[i];
|
||||
temp.coeffs.iter_mut().for_each(|x| *x *= lagrange_scalar);
|
||||
lagrange_tau = lagrange_tau + temp;
|
||||
}
|
||||
|
||||
// commit to sum evals[i] tau_i(X)
|
||||
assert!(
|
||||
powers_of_g1.len() >= lagrange_tau.len(),
|
||||
"KZG verifier doesn't have enough g1 powers"
|
||||
);
|
||||
let g1_tau = VariableBaseMSM::multi_scalar_mul(
|
||||
&powers_of_g1[..lagrange_tau.len()],
|
||||
convert_to_bigints(&lagrange_tau.coeffs).as_slice(),
|
||||
);
|
||||
|
||||
// vanishing polynomial
|
||||
let z_tau = prod;
|
||||
|
||||
// commit to z_tau(X) in g2
|
||||
assert!(
|
||||
powers_of_g2.len() >= z_tau.len(),
|
||||
"KZG verifier doesn't have enough g2 powers"
|
||||
);
|
||||
let g2_z_tau = VariableBaseMSM::multi_scalar_mul(
|
||||
&powers_of_g2[..z_tau.len()],
|
||||
convert_to_bigints(&z_tau.coeffs).as_slice(),
|
||||
);
|
||||
|
||||
let global_max_deg = powers_of_g1.len();
|
||||
|
||||
let mut d: usize = 0;
|
||||
if max_deg == None {
|
||||
d += global_max_deg;
|
||||
} else {
|
||||
d += max_deg.unwrap();
|
||||
}
|
||||
|
||||
let pairing_inputs = vec![
|
||||
(
|
||||
E::G1Prepared::from((g1_tau - c_com.into_projective()).into_affine()),
|
||||
E::G2Prepared::from(powers_of_g2[global_max_deg - d]),
|
||||
),
|
||||
(
|
||||
E::G1Prepared::from(*pi),
|
||||
E::G2Prepared::from(g2_z_tau.into_affine()),
|
||||
),
|
||||
];
|
||||
|
||||
E::product_of_pairings(pairing_inputs.iter()).is_one()
|
||||
}
|
||||
|
||||
// KZG.Verify( srs_KZG, F, deg, alpha, F_alpha, pi )
|
||||
// Algorithm described in Section 4.6.2, KZG for Bivariate Polynomials
|
||||
// Be very careful here. Verification is only valid if it is paired with a
|
||||
// degree check.
|
||||
pub fn partial_verify_g1(
|
||||
srs: &crate::multi::PublicParameters<E>,
|
||||
c_com: &E::G1Affine, // commitment
|
||||
deg_x: usize,
|
||||
point: &E::Fr,
|
||||
partial_eval: &E::G1Affine,
|
||||
pi: &E::G1Affine, // proof
|
||||
) -> bool {
|
||||
// todo: pairing product
|
||||
let pairing1 = E::pairing(
|
||||
c_com.into_projective() - partial_eval.into_projective(),
|
||||
srs.g2_powers[0],
|
||||
);
|
||||
let pairing2 = E::pairing(
|
||||
*pi,
|
||||
srs.g2_powers[deg_x].into_projective() - srs.g2_powers[0].mul(*point),
|
||||
);
|
||||
|
||||
pairing1 == pairing2
|
||||
}
|
||||
|
||||
// Algorithm for aggregating KZG proofs into a single proof
|
||||
// Described in Section 4.6.3 Subset openings
|
||||
// compute Q =\sum_{j=1}^m \frac{Q_{i_j}}}{\prod_{1\leq k\leq m,\; k\neq
|
||||
// j}(\omega^{i_j}-\omega^{i_k})}
|
||||
pub fn aggregate_proof_g2(
|
||||
openings: &[E::G2Affine], // Q_i
|
||||
positions: &[usize], // i_j
|
||||
input_domain: &GeneralEvaluationDomain<E::Fr>,
|
||||
) -> E::G2Affine {
|
||||
let m = positions.len();
|
||||
let mut res = openings[0].into_projective(); // default value
|
||||
|
||||
for j in 0..m {
|
||||
let i_j = positions[j];
|
||||
let w_ij = input_domain.element(i_j);
|
||||
// 1. Computing coefficient [1/prod]
|
||||
let mut prod = E::Fr::one();
|
||||
for k in 0..m {
|
||||
let i_k = positions[k];
|
||||
let w_ik = input_domain.element(i_k);
|
||||
if k != j {
|
||||
prod = prod * (w_ij - w_ik);
|
||||
}
|
||||
}
|
||||
// 2. Summation
|
||||
let q_add = openings[i_j].mul(prod.inverse().unwrap()); //[1/prod]Q_{j}
|
||||
if j == 0 {
|
||||
res = q_add;
|
||||
} else {
|
||||
res = res + q_add;
|
||||
}
|
||||
}
|
||||
res.into_affine()
|
||||
}
|
||||
}
|
||||
|
||||
pub fn generate_lagrange_polynomials_subset<E: PairingEngine>(
|
||||
positions: &[usize],
|
||||
srs: &crate::multi::PublicParameters<E>,
|
||||
) -> Vec<DensePolynomial<E::Fr>> {
|
||||
let mut tau_polys = vec![];
|
||||
let m = positions.len();
|
||||
for j in 0..m {
|
||||
let mut tau_j = DensePolynomial::from_coefficients_slice(&[E::Fr::one()]); // start from tau_j =1
|
||||
for k in 0..m {
|
||||
if k != j {
|
||||
// tau_j = prod_{k\neq j} (X-w^(i_k))/(w^(i_j)-w^(i_k))
|
||||
let denum = srs.domain_N.element(positions[j]) - srs.domain_N.element(positions[k]);
|
||||
tau_j = &tau_j
|
||||
* &DensePolynomial::from_coefficients_slice(&[
|
||||
-srs.domain_N.element(positions[k]) / denum, //-w^(i_k))/(w^(i_j)-w^(i_k)
|
||||
E::Fr::one() / denum, // 1//(w^(i_j)-w^(i_k))
|
||||
]);
|
||||
}
|
||||
}
|
||||
tau_polys.push(tau_j.clone());
|
||||
}
|
||||
tau_polys
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub mod tests {
|
||||
|
||||
use super::{generate_lagrange_polynomials_subset, KZGCommit, *};
|
||||
use crate::caulk_single_setup;
|
||||
use ark_bls12_377::Bls12_377;
|
||||
use ark_bls12_381::Bls12_381;
|
||||
use ark_ec::{AffineCurve, PairingEngine, ProjectiveCurve};
|
||||
use ark_poly::{univariate::DensePolynomial, EvaluationDomain, Polynomial, UVPolynomial};
|
||||
use ark_poly_commit::kzg10::KZG10;
|
||||
use ark_std::{test_rng, One, Zero};
|
||||
use std::time::Instant;
|
||||
|
||||
#[test]
|
||||
fn test_lagrange() {
|
||||
test_lagrange_helper::<Bls12_377>();
|
||||
test_lagrange_helper::<Bls12_381>();
|
||||
}
|
||||
|
||||
#[allow(non_snake_case)]
|
||||
fn test_lagrange_helper<E: PairingEngine>() {
|
||||
let p: usize = 8; // bitlength of poly degree
|
||||
let max_degree: usize = (1 << p) + 2;
|
||||
let m: usize = 8;
|
||||
let N: usize = 1 << p;
|
||||
|
||||
let now = Instant::now();
|
||||
let pp = crate::multi::PublicParameters::<E>::setup(&max_degree, &N, &m, &p);
|
||||
println!("time to setup {:?}", now.elapsed());
|
||||
|
||||
let mut positions: Vec<usize> = vec![];
|
||||
for i in 0..m {
|
||||
// generate positions evenly distributed in the set
|
||||
let i_j: usize = i * (max_degree / m);
|
||||
positions.push(i_j);
|
||||
}
|
||||
|
||||
let tau_polys = generate_lagrange_polynomials_subset(&positions, &pp);
|
||||
for j in 0..m {
|
||||
for k in 0..m {
|
||||
if k == j {
|
||||
assert_eq!(
|
||||
tau_polys[j].evaluate(&pp.domain_N.element(positions[k])),
|
||||
E::Fr::one()
|
||||
)
|
||||
} else {
|
||||
assert_eq!(
|
||||
tau_polys[j].evaluate(&pp.domain_N.element(positions[k])),
|
||||
E::Fr::zero()
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[allow(non_snake_case)]
|
||||
pub fn test_Q_g2() {
|
||||
test_Q_g2_helper::<Bls12_381>();
|
||||
test_Q_g2_helper::<Bls12_377>();
|
||||
}
|
||||
|
||||
#[allow(non_snake_case)]
|
||||
pub fn test_Q_g2_helper<E: PairingEngine>() {
|
||||
let rng = &mut ark_std::test_rng();
|
||||
|
||||
// current kzg setup should be changed with output from a setup ceremony
|
||||
let p: usize = 6; // bitlength of poly degree
|
||||
let max_degree: usize = (1 << p) + 2;
|
||||
let actual_degree: usize = (1 << p) - 1;
|
||||
let m: usize = 1 << (p / 2);
|
||||
let N: usize = 1 << p;
|
||||
let pp = crate::multi::PublicParameters::setup(&max_degree, &N, &m, &p);
|
||||
|
||||
// Setting up test instance to run evaluate on.
|
||||
// test randomness for c_poly is same everytime.
|
||||
// test index equals 5 everytime
|
||||
// g_c = g^(c(x))
|
||||
|
||||
let c_poly = DensePolynomial::<E::Fr>::rand(actual_degree, rng);
|
||||
let c_com = KZGCommit::<E>::commit_g1(&pp.poly_ck, &c_poly);
|
||||
|
||||
let now = Instant::now();
|
||||
let openings = KZGCommit::<E>::multiple_open::<E::G2Affine>(&c_poly, &pp.g2_powers, p);
|
||||
println!("Multi advanced computed in {:?}", now.elapsed());
|
||||
|
||||
let mut positions: Vec<usize> = vec![];
|
||||
for i in 0..m {
|
||||
let i_j: usize = i * (max_degree / m);
|
||||
positions.push(i_j);
|
||||
}
|
||||
|
||||
let now = Instant::now();
|
||||
|
||||
// Compute proof
|
||||
let Q: E::G2Affine =
|
||||
KZGCommit::<E>::aggregate_proof_g2(&openings, &positions, &pp.domain_N);
|
||||
println!(
|
||||
"Full proof for {:?} positions computed in {:?}",
|
||||
m,
|
||||
now.elapsed()
|
||||
);
|
||||
|
||||
// Compute commitment to C_I
|
||||
let mut C_I = DensePolynomial::from_coefficients_slice(&[E::Fr::zero()]); // C_I = sum_j c_j*tau_j
|
||||
let tau_polys = generate_lagrange_polynomials_subset(&positions, &pp);
|
||||
for j in 0..m {
|
||||
C_I = &C_I + &(&tau_polys[j] * c_poly.evaluate(&pp.domain_N.element(positions[j])));
|
||||
// sum_j c_j*tau_j
|
||||
}
|
||||
let c_I_com = KZGCommit::<E>::commit_g1(&pp.poly_ck, &C_I);
|
||||
|
||||
// Compute commitment to z_I
|
||||
let mut z_I = DensePolynomial::from_coefficients_slice(&[E::Fr::one()]);
|
||||
for j in 0..m {
|
||||
z_I = &z_I
|
||||
* &DensePolynomial::from_coefficients_slice(&[
|
||||
-pp.domain_N.element(positions[j]),
|
||||
E::Fr::one(),
|
||||
]);
|
||||
}
|
||||
let z_I_com = KZGCommit::<E>::commit_g1(&pp.poly_ck, &z_I);
|
||||
|
||||
// pairing check
|
||||
let pairing1 = E::pairing(
|
||||
(c_com.into_projective() - c_I_com.into_projective()).into_affine(),
|
||||
pp.g2_powers[0],
|
||||
);
|
||||
let pairing2 = E::pairing(z_I_com, Q);
|
||||
assert_eq!(pairing1, pairing2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_single() {
|
||||
test_single_helper::<Bls12_381>();
|
||||
test_single_helper::<Bls12_377>();
|
||||
}
|
||||
|
||||
fn test_single_helper<E: PairingEngine>() {
|
||||
let mut rng = test_rng();
|
||||
|
||||
// setting public parameters
|
||||
// current kzg setup should be changed with output from a setup ceremony
|
||||
let max_degree: usize = 100;
|
||||
let actual_degree: usize = 63;
|
||||
let pp = caulk_single_setup(max_degree, actual_degree, &mut rng);
|
||||
|
||||
// Setting up test instance to run evaluate on.
|
||||
// test randomness for c_poly is same everytime.
|
||||
// test index equals 5 everytime
|
||||
// g_c = g^(c(x))
|
||||
let rng = &mut test_rng();
|
||||
let c_poly = DensePolynomial::<E::Fr>::rand(actual_degree, rng);
|
||||
let (_c_com, c_com_open) = KZG10::<E, _>::commit(&pp.poly_ck, &c_poly, None, None).unwrap();
|
||||
|
||||
let i: usize = 6;
|
||||
let q = single_open_default(&c_poly, &c_com_open, &pp.poly_ck, i, actual_degree);
|
||||
let q2 = single_open_fast(&c_poly, &pp.poly_ck, i, actual_degree);
|
||||
assert_eq!(q, q2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
pub fn test_multi() {
|
||||
test_multi_helper::<Bls12_381>();
|
||||
test_multi_helper::<Bls12_377>();
|
||||
}
|
||||
|
||||
pub fn test_multi_helper<E: PairingEngine>() {
|
||||
let mut rng = test_rng();
|
||||
|
||||
// current kzg setup should be changed with output from a setup ceremony
|
||||
let p: usize = 9;
|
||||
let max_degree: usize = 1 << p + 1;
|
||||
let actual_degree: usize = (1 << p) - 1;
|
||||
let pp = caulk_single_setup(max_degree, actual_degree, &mut rng);
|
||||
|
||||
// Setting up test instance to run evaluate on.
|
||||
// test randomness for c_poly is same everytime.
|
||||
// test index equals 5 everytime
|
||||
// g_c = g^(c(x))
|
||||
let c_poly = DensePolynomial::<E::Fr>::rand(actual_degree, &mut rng);
|
||||
let (c_com, c_com_open) = KZG10::<E, _>::commit(&pp.poly_ck, &c_poly, None, None).unwrap();
|
||||
let _g_c = c_com.0;
|
||||
|
||||
let now = Instant::now();
|
||||
let q = multiple_open_naive(&c_poly, &c_com_open, &pp.poly_ck, actual_degree);
|
||||
println!("Multi naive computed in {:?}", now.elapsed());
|
||||
|
||||
let now = Instant::now();
|
||||
let q2 = KZGCommit::<E>::multiple_open::<E::G1Affine>(&c_poly, &pp.poly_ck.powers_of_g, p);
|
||||
println!("Multi advanced computed in {:?}", now.elapsed());
|
||||
assert_eq!(q, q2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_commit() {
|
||||
test_commit_helper::<Bls12_381>();
|
||||
test_commit_helper::<Bls12_377>();
|
||||
}
|
||||
|
||||
pub fn test_commit_helper<E: PairingEngine>() {
|
||||
let mut rng = test_rng();
|
||||
|
||||
// current kzg setup should be changed with output from a setup ceremony
|
||||
let max_degree: usize = 100;
|
||||
let actual_degree: usize = 63;
|
||||
let pp = caulk_single_setup(max_degree, actual_degree, &mut rng);
|
||||
|
||||
// Setting up test instance to run evaluate on.
|
||||
// test randomness for c_poly is same everytime.
|
||||
// g_c = g^(c(x))
|
||||
let c_poly = DensePolynomial::<E::Fr>::rand(actual_degree, &mut rng);
|
||||
let (c_com, _c_com_open) = KZG10::<E, _>::commit(&pp.poly_ck, &c_poly, None, None).unwrap();
|
||||
let g_c1 = c_com.0;
|
||||
|
||||
let g_c2 = commit_direct(&c_poly, &pp.poly_ck);
|
||||
assert_eq!(g_c1, g_c2);
|
||||
println!("commit test passed")
|
||||
}
|
||||
|
||||
/// Various functions that are used for testing
|
||||
|
||||
fn commit_direct<E: PairingEngine>(
|
||||
c_poly: &DensePolynomial<E::Fr>, // c(X)
|
||||
poly_ck: &Powers<E>, // SRS
|
||||
) -> E::G1Affine {
|
||||
assert!(c_poly.coeffs.len() <= poly_ck.powers_of_g.len());
|
||||
let mut com = poly_ck.powers_of_g[0].mul(c_poly.coeffs[0]);
|
||||
for i in 1..c_poly.coeffs.len() {
|
||||
com = com + poly_ck.powers_of_g[i].mul(c_poly.coeffs[i]);
|
||||
}
|
||||
com.into_affine()
|
||||
}
|
||||
|
||||
// compute all openings to c_poly by mere calling `open` N times
|
||||
fn multiple_open_naive<E: PairingEngine>(
|
||||
c_poly: &DensePolynomial<E::Fr>,
|
||||
c_com_open: &Randomness<E::Fr, DensePolynomial<E::Fr>>,
|
||||
poly_ck: &Powers<E>,
|
||||
degree: usize,
|
||||
) -> Vec<E::G1Affine> {
|
||||
let input_domain: GeneralEvaluationDomain<E::Fr> = EvaluationDomain::new(degree).unwrap();
|
||||
let mut res: Vec<E::G1Affine> = vec![];
|
||||
for i in 0..input_domain.size() {
|
||||
let omega_i = input_domain.element(i);
|
||||
res.push(kzg_open_g1_test::<E>(&c_poly, &omega_i, &c_com_open, &poly_ck).w);
|
||||
}
|
||||
res
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////
|
||||
fn kzg_open_g1_test<E: PairingEngine>(
|
||||
p: &DensePolynomial<E::Fr>,
|
||||
omega_5: &E::Fr,
|
||||
polycom_open: &Randomness<E::Fr, DensePolynomial<E::Fr>>,
|
||||
poly_ck: &Powers<E>,
|
||||
) -> Proof<E> {
|
||||
let rng = &mut ark_std::test_rng();
|
||||
|
||||
let (witness_polynomial, _random_witness_polynomial) =
|
||||
KZG10::<E, _>::compute_witness_polynomial(p, omega_5.clone(), polycom_open).unwrap();
|
||||
|
||||
let (temp0, _temp1) = KZG10::commit(poly_ck, &witness_polynomial, None, Some(rng)).unwrap();
|
||||
Proof {
|
||||
w: temp0.0,
|
||||
random_v: None,
|
||||
}
|
||||
}
|
||||
|
||||
// compute KZG proof Q = g1_q = g^( (c(x) - c(w^i)) / (x - w^i) ) where x is
|
||||
// secret, w^i is the point where we open, and c(X) is the committed polynomial
|
||||
fn single_open_default<E: PairingEngine>(
|
||||
c_poly: &DensePolynomial<E::Fr>, // c(X)
|
||||
c_com_open: &Randomness<E::Fr, DensePolynomial<E::Fr>>, //
|
||||
poly_ck: &Powers<E>,
|
||||
i: usize, //
|
||||
degree: usize,
|
||||
) -> E::G1Affine {
|
||||
let input_domain: GeneralEvaluationDomain<E::Fr> = EvaluationDomain::new(degree).unwrap();
|
||||
let omega_i = input_domain.element(i);
|
||||
let c_poly_open = kzg_open_g1_test(&c_poly, &omega_i, &c_com_open, &poly_ck);
|
||||
c_poly_open.w
|
||||
}
|
||||
|
||||
// KZG proof/opening at point y for c(X) = sum_i c_i X^i
|
||||
//(1)T_y(X) = sum_i t_i X^i
|
||||
//(2) t_{deg-1} = c_deg
|
||||
//(3) t_j = c_{j+1} + y*t_{j+1}
|
||||
fn single_open_fast<E: PairingEngine>(
|
||||
c_poly: &DensePolynomial<E::Fr>, // c(X)
|
||||
poly_ck: &Powers<E>, // SRS
|
||||
i: usize, // y=w^i
|
||||
degree: usize, // degree of c(X)
|
||||
) -> E::G1Affine {
|
||||
// computing opening point
|
||||
let input_domain: GeneralEvaluationDomain<E::Fr> = EvaluationDomain::new(degree).unwrap();
|
||||
let y = input_domain.element(i);
|
||||
|
||||
// compute quotient
|
||||
let mut t_poly = c_poly.clone();
|
||||
t_poly.coeffs.remove(0); // shifting indices
|
||||
for j in (0..t_poly.len() - 1).rev() {
|
||||
t_poly.coeffs[j] = c_poly.coeffs[j + 1] + y * t_poly.coeffs[j + 1]
|
||||
}
|
||||
|
||||
// commit
|
||||
let (t_com, _) = KZG10::commit(&poly_ck, &t_poly, None, None).unwrap();
|
||||
t_com.0
|
||||
}
|
||||
}
|
||||
@@ -1,32 +1,32 @@
|
||||
mod caulk_single;
|
||||
mod caulk_single_setup;
|
||||
mod caulk_single_unity;
|
||||
mod dft;
|
||||
mod kzg;
|
||||
pub mod multi;
|
||||
mod pedersen;
|
||||
mod single;
|
||||
mod transcript;
|
||||
pub(crate) mod util;
|
||||
|
||||
pub use caulk_single::{caulk_single_prove, caulk_single_verify};
|
||||
pub use caulk_single_setup::caulk_single_setup;
|
||||
pub use dft::*;
|
||||
pub use kzg::KZGCommit;
|
||||
pub use multi::PublicParameters;
|
||||
pub use pedersen::PedersenParam;
|
||||
pub use single::{caulk_single_prove, caulk_single_verify, setup::caulk_single_setup};
|
||||
pub use transcript::CaulkTranscript;
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
||||
use crate::caulk_single_setup;
|
||||
use crate::CaulkTranscript;
|
||||
use crate::KZGCommit;
|
||||
use crate::{caulk_single_prove, caulk_single_verify};
|
||||
use ark_bls12_381::{Bls12_381, Fr};
|
||||
use crate::{
|
||||
caulk_single_prove, caulk_single_setup, caulk_single_verify, CaulkTranscript, KZGCommit,
|
||||
};
|
||||
use ark_bls12_381::{Bls12_381, Fr, G1Affine};
|
||||
use ark_ec::{AffineCurve, ProjectiveCurve};
|
||||
use ark_poly::univariate::DensePolynomial;
|
||||
use ark_poly::{EvaluationDomain, GeneralEvaluationDomain, Polynomial, UVPolynomial};
|
||||
use ark_poly::{
|
||||
univariate::DensePolynomial, EvaluationDomain, GeneralEvaluationDomain, Polynomial,
|
||||
UVPolynomial,
|
||||
};
|
||||
use ark_poly_commit::kzg10::KZG10;
|
||||
use ark_std::test_rng;
|
||||
use ark_std::UniformRand;
|
||||
use ark_std::{test_rng, UniformRand};
|
||||
|
||||
type UniPoly381 = DensePolynomial<Fr>;
|
||||
type KzgBls12_381 = KZG10<Bls12_381, UniPoly381>;
|
||||
@@ -50,7 +50,7 @@ mod tests {
|
||||
let (g1_C, _) = KzgBls12_381::commit(&pp.poly_ck, &c_poly, None, None).unwrap();
|
||||
let g1_C = g1_C.0;
|
||||
|
||||
//point at which we will open c_com
|
||||
// point at which we will open c_com
|
||||
let input_domain: GeneralEvaluationDomain<Fr> =
|
||||
EvaluationDomain::new(actual_degree).unwrap();
|
||||
|
||||
@@ -96,7 +96,11 @@ mod tests {
|
||||
}
|
||||
// compute all openings
|
||||
{
|
||||
let g1_qs = KZGCommit::multiple_open(&c_poly, &pp.poly_ck, p);
|
||||
let g1_qs = KZGCommit::<Bls12_381>::multiple_open::<G1Affine>(
|
||||
&c_poly,
|
||||
&pp.poly_ck.powers_of_g,
|
||||
p,
|
||||
);
|
||||
let g1_q = g1_qs[position];
|
||||
|
||||
// run the prover
|
||||
File diff suppressed because it is too large
Load Diff
356
src/multi/setup.rs
Normal file
356
src/multi/setup.rs
Normal file
@@ -0,0 +1,356 @@
|
||||
use crate::util::trim;
|
||||
use ark_ec::{AffineCurve, PairingEngine, ProjectiveCurve};
|
||||
use ark_ff::{PrimeField, UniformRand};
|
||||
use ark_poly::{
|
||||
univariate::DensePolynomial, EvaluationDomain, Evaluations as EvaluationsOnDomain,
|
||||
GeneralEvaluationDomain,
|
||||
};
|
||||
use ark_poly_commit::kzg10::*;
|
||||
use ark_serialize::{CanonicalDeserialize, CanonicalSerialize};
|
||||
use std::{convert::TryInto, io::Write};
|
||||
// use crate::tools::{KzgBls12_381, UniPoly381};
|
||||
use ark_std::{cfg_into_iter, One, Zero};
|
||||
use std::{fs::File, io::Read, time::Instant};
|
||||
|
||||
// structure of public parameters
|
||||
#[allow(non_snake_case)]
|
||||
pub struct PublicParameters<E: PairingEngine> {
|
||||
pub poly_ck: Powers<'static, E>,
|
||||
pub domain_m: GeneralEvaluationDomain<E::Fr>,
|
||||
pub domain_n: GeneralEvaluationDomain<E::Fr>,
|
||||
pub domain_N: GeneralEvaluationDomain<E::Fr>,
|
||||
pub verifier_pp: VerifierPublicParameters<E>,
|
||||
pub lagrange_polynomials_n: Vec<DensePolynomial<E::Fr>>,
|
||||
pub lagrange_polynomials_m: Vec<DensePolynomial<E::Fr>>,
|
||||
pub id_poly: DensePolynomial<E::Fr>,
|
||||
pub N: usize,
|
||||
pub m: usize,
|
||||
pub n: usize,
|
||||
pub g2_powers: Vec<E::G2Affine>,
|
||||
}
|
||||
|
||||
pub struct LookupParameters<F: PrimeField> {
|
||||
m: usize,
|
||||
lagrange_polynomials_m: Vec<DensePolynomial<F>>,
|
||||
domain_m: GeneralEvaluationDomain<F>,
|
||||
id_poly: DensePolynomial<F>,
|
||||
}
|
||||
|
||||
impl<F: PrimeField> LookupParameters<F> {
|
||||
fn new(m: usize) -> Self {
|
||||
let domain_m: GeneralEvaluationDomain<F> = GeneralEvaluationDomain::new(m.clone()).unwrap();
|
||||
|
||||
// id_poly(X) = 1 for omega_m in range and 0 for omega_m not in range.
|
||||
let mut id_vec = Vec::new();
|
||||
for _ in 0..m.clone() {
|
||||
id_vec.push(F::one());
|
||||
}
|
||||
for _ in m.clone()..domain_m.size() {
|
||||
id_vec.push(F::zero());
|
||||
}
|
||||
let id_poly = EvaluationsOnDomain::from_vec_and_domain(id_vec, domain_m).interpolate();
|
||||
let mut lagrange_polynomials_m: Vec<DensePolynomial<F>> = Vec::new();
|
||||
|
||||
for i in 0..domain_m.size() {
|
||||
let evals: Vec<F> = cfg_into_iter!(0..domain_m.size())
|
||||
.map(|k| if k == i { F::one() } else { F::zero() })
|
||||
.collect();
|
||||
lagrange_polynomials_m
|
||||
.push(EvaluationsOnDomain::from_vec_and_domain(evals, domain_m).interpolate());
|
||||
}
|
||||
|
||||
Self {
|
||||
m,
|
||||
lagrange_polynomials_m,
|
||||
domain_m,
|
||||
id_poly,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// smaller set of public parameters used by verifier
|
||||
pub struct VerifierPublicParameters<E: PairingEngine> {
|
||||
pub poly_vk: VerifierKey<E>,
|
||||
pub domain_m_size: usize,
|
||||
}
|
||||
|
||||
impl<E: PairingEngine> PublicParameters<E> {
|
||||
pub fn regenerate_lookup_params(&mut self, m: usize) {
|
||||
let lp = LookupParameters::new(m);
|
||||
self.m = lp.m;
|
||||
self.lagrange_polynomials_m = lp.lagrange_polynomials_m;
|
||||
self.domain_m = lp.domain_m;
|
||||
self.id_poly = lp.id_poly;
|
||||
}
|
||||
|
||||
// store powers of g in a file
|
||||
pub fn store(&self, path: &str) {
|
||||
// 1. Powers of g
|
||||
let mut g_bytes = vec![];
|
||||
let mut f = File::create(path).expect("Unable to create file");
|
||||
let deg: u32 = self.poly_ck.powers_of_g.len().try_into().unwrap();
|
||||
let deg_bytes = deg.to_be_bytes();
|
||||
f.write_all(°_bytes).expect("Unable to write data");
|
||||
let deg32: usize = deg.try_into().unwrap();
|
||||
for i in 0..deg32 {
|
||||
self.poly_ck.powers_of_g[i]
|
||||
.into_projective()
|
||||
.into_affine()
|
||||
.serialize_uncompressed(&mut g_bytes)
|
||||
.ok();
|
||||
}
|
||||
f.write_all(&g_bytes).expect("Unable to write data");
|
||||
|
||||
// 2. Powers of gammag
|
||||
let deg_gamma: u32 = self.poly_ck.powers_of_gamma_g.len().try_into().unwrap();
|
||||
let mut gg_bytes = vec![];
|
||||
let deg_bytes = deg_gamma.to_be_bytes();
|
||||
f.write_all(°_bytes).expect("Unable to write data");
|
||||
let deg32: usize = deg.try_into().unwrap();
|
||||
for i in 0..deg32 {
|
||||
self.poly_ck.powers_of_gamma_g[i]
|
||||
.into_projective()
|
||||
.into_affine()
|
||||
.serialize_uncompressed(&mut gg_bytes)
|
||||
.ok();
|
||||
}
|
||||
f.write_all(&gg_bytes).expect("Unable to write data");
|
||||
|
||||
// 3. Verifier key
|
||||
let mut h_bytes = vec![];
|
||||
self.verifier_pp
|
||||
.poly_vk
|
||||
.h
|
||||
.serialize_uncompressed(&mut h_bytes)
|
||||
.ok();
|
||||
self.verifier_pp
|
||||
.poly_vk
|
||||
.beta_h
|
||||
.serialize_uncompressed(&mut h_bytes)
|
||||
.ok();
|
||||
f.write_all(&h_bytes).expect("Unable to write data");
|
||||
|
||||
// 4. g2 powers
|
||||
let mut g2_bytes = vec![];
|
||||
let deg2: u32 = self.g2_powers.len().try_into().unwrap();
|
||||
let deg2_bytes = deg2.to_be_bytes();
|
||||
f.write_all(°2_bytes).expect("Unable to write data");
|
||||
let deg2_32: usize = deg2.try_into().unwrap();
|
||||
for i in 0..deg2_32 {
|
||||
self.g2_powers[i]
|
||||
.into_projective()
|
||||
.into_affine()
|
||||
.serialize_uncompressed(&mut g2_bytes)
|
||||
.ok();
|
||||
}
|
||||
f.write_all(&g2_bytes).expect("Unable to write data");
|
||||
}
|
||||
|
||||
// load powers of g from a file
|
||||
pub fn load(path: &str) -> (Powers<'static, E>, VerifierKey<E>, Vec<E::G2Affine>) {
|
||||
const G1_UNCOMPR_SIZE: usize = 96;
|
||||
const G2_UNCOMPR_SIZE: usize = 192;
|
||||
let mut data = Vec::new();
|
||||
let mut f = File::open(path).expect("Unable to open file");
|
||||
f.read_to_end(&mut data).expect("Unable to read data");
|
||||
|
||||
// 1. reading g powers
|
||||
let mut cur_counter: usize = 0;
|
||||
let deg_bytes: [u8; 4] = (&data[0..4]).try_into().unwrap();
|
||||
let deg: u32 = u32::from_be_bytes(deg_bytes);
|
||||
let mut powers_of_g = vec![];
|
||||
let deg32: usize = deg.try_into().unwrap();
|
||||
cur_counter += 4;
|
||||
for i in 0..deg32 {
|
||||
let buf_bytes =
|
||||
&data[cur_counter + i * G1_UNCOMPR_SIZE..cur_counter + (i + 1) * G1_UNCOMPR_SIZE];
|
||||
let tmp = E::G1Affine::deserialize_unchecked(buf_bytes).unwrap();
|
||||
powers_of_g.push(tmp);
|
||||
}
|
||||
cur_counter += deg32 * G1_UNCOMPR_SIZE;
|
||||
|
||||
// 2. reading gamma g powers
|
||||
let deg_bytes: [u8; 4] = (&data[cur_counter..cur_counter + 4]).try_into().unwrap();
|
||||
let deg: u32 = u32::from_be_bytes(deg_bytes);
|
||||
let mut powers_of_gamma_g = vec![];
|
||||
let deg32: usize = deg.try_into().unwrap();
|
||||
cur_counter += 4;
|
||||
for i in 0..deg32 {
|
||||
let buf_bytes =
|
||||
&data[cur_counter + i * G1_UNCOMPR_SIZE..cur_counter + (i + 1) * G1_UNCOMPR_SIZE];
|
||||
let tmp = E::G1Affine::deserialize_unchecked(buf_bytes).unwrap();
|
||||
powers_of_gamma_g.push(tmp);
|
||||
}
|
||||
cur_counter += deg32 * G1_UNCOMPR_SIZE;
|
||||
|
||||
// 3. reading verifier key
|
||||
let buf_bytes = &data[cur_counter..cur_counter + G2_UNCOMPR_SIZE];
|
||||
let h = E::G2Affine::deserialize_unchecked(buf_bytes).unwrap();
|
||||
cur_counter += G2_UNCOMPR_SIZE;
|
||||
let buf_bytes = &data[cur_counter..cur_counter + G2_UNCOMPR_SIZE];
|
||||
let beta_h = E::G2Affine::deserialize_unchecked(buf_bytes).unwrap();
|
||||
cur_counter += G2_UNCOMPR_SIZE;
|
||||
|
||||
// 4. reading G2 powers
|
||||
let deg2_bytes: [u8; 4] = (&data[cur_counter..cur_counter + 4]).try_into().unwrap();
|
||||
let deg2: u32 = u32::from_be_bytes(deg2_bytes);
|
||||
let mut g2_powers = vec![];
|
||||
let deg2_32: usize = deg2.try_into().unwrap();
|
||||
cur_counter += 4;
|
||||
for _ in 0..deg2_32 {
|
||||
let buf_bytes = &data[cur_counter..cur_counter + G2_UNCOMPR_SIZE];
|
||||
let tmp = E::G2Affine::deserialize_unchecked(buf_bytes).unwrap();
|
||||
g2_powers.push(tmp);
|
||||
cur_counter += G2_UNCOMPR_SIZE;
|
||||
}
|
||||
|
||||
let vk = VerifierKey {
|
||||
g: powers_of_g[0].clone(),
|
||||
gamma_g: powers_of_gamma_g[0].clone(),
|
||||
h,
|
||||
beta_h,
|
||||
prepared_h: h.into(),
|
||||
prepared_beta_h: beta_h.into(),
|
||||
};
|
||||
|
||||
let powers = Powers {
|
||||
powers_of_g: ark_std::borrow::Cow::Owned(powers_of_g),
|
||||
powers_of_gamma_g: ark_std::borrow::Cow::Owned(powers_of_gamma_g),
|
||||
};
|
||||
|
||||
(powers, vk, g2_powers)
|
||||
}
|
||||
|
||||
// setup algorithm for index_hiding_polycommit
|
||||
// also includes a bunch of precomputation.
|
||||
// @max_degree max degree of table polynomial C(X), also the size of the trusted
|
||||
// setup @N domain size on which proofs are constructed. Should not be
|
||||
// smaller than max_degree @m lookup size. Can be changed later
|
||||
// @n suppl domain for the unity proofs. Should be at least 6+log N
|
||||
#[allow(non_snake_case)]
|
||||
pub fn setup(max_degree: &usize, N: &usize, m: &usize, n: &usize) -> PublicParameters<E> {
|
||||
// Setup algorithm. To be replaced by output of a universal setup before being
|
||||
// production ready.
|
||||
|
||||
// let mut srs = KzgBls12_381::setup(4, true, rng).unwrap();
|
||||
let poly_ck: Powers<'static, E>;
|
||||
let poly_vk: VerifierKey<E>;
|
||||
let mut g2_powers: Vec<E::G2Affine> = Vec::new();
|
||||
|
||||
// try opening the file. If it exists load the setup from there, otherwise
|
||||
// generate
|
||||
let path = format!("srs/srs_{}_{}.setup", max_degree, E::Fq::size_in_bits());
|
||||
let res = File::open(path.clone());
|
||||
let store_to_file: bool;
|
||||
match res {
|
||||
Ok(_) => {
|
||||
let now = Instant::now();
|
||||
let (_poly_ck, _poly_vk, _g2_powers) = PublicParameters::load(&path);
|
||||
println!("time to load powers = {:?}", now.elapsed());
|
||||
store_to_file = false;
|
||||
g2_powers = _g2_powers;
|
||||
poly_ck = _poly_ck;
|
||||
poly_vk = _poly_vk;
|
||||
},
|
||||
Err(_) => {
|
||||
let rng = &mut ark_std::test_rng();
|
||||
let now = Instant::now();
|
||||
let srs = KZG10::<E, DensePolynomial<E::Fr>>::setup(max_degree.clone(), true, rng)
|
||||
.unwrap();
|
||||
println!("time to setup powers = {:?}", now.elapsed());
|
||||
|
||||
// trim down to size
|
||||
let (poly_ck2, poly_vk2) =
|
||||
trim::<E, DensePolynomial<E::Fr>>(&srs, max_degree.clone());
|
||||
poly_ck = Powers {
|
||||
powers_of_g: ark_std::borrow::Cow::Owned(poly_ck2.powers_of_g.into()),
|
||||
powers_of_gamma_g: ark_std::borrow::Cow::Owned(
|
||||
poly_ck2.powers_of_gamma_g.into(),
|
||||
),
|
||||
};
|
||||
poly_vk = poly_vk2;
|
||||
|
||||
// need some powers of g2
|
||||
// arkworks setup doesn't give these powers but the setup does use a fixed
|
||||
// randomness to generate them. so we can generate powers of g2
|
||||
// directly.
|
||||
let rng = &mut ark_std::test_rng();
|
||||
let beta = E::Fr::rand(rng);
|
||||
let mut temp = poly_vk.h.clone();
|
||||
|
||||
for _ in 0..poly_ck.powers_of_g.len() {
|
||||
g2_powers.push(temp.clone());
|
||||
temp = temp.mul(beta).into_affine();
|
||||
}
|
||||
|
||||
store_to_file = true;
|
||||
},
|
||||
}
|
||||
|
||||
// domain where openings {w_i}_{i in I} are embedded
|
||||
let domain_n: GeneralEvaluationDomain<E::Fr> =
|
||||
GeneralEvaluationDomain::new(n.clone()).unwrap();
|
||||
let domain_N: GeneralEvaluationDomain<E::Fr> =
|
||||
GeneralEvaluationDomain::new(N.clone()).unwrap();
|
||||
|
||||
// precomputation to speed up prover
|
||||
// lagrange_polynomials[i] = polynomial equal to 0 at w^j for j!= i and 1 at
|
||||
// w^i
|
||||
let mut lagrange_polynomials_n: Vec<DensePolynomial<E::Fr>> = Vec::new();
|
||||
|
||||
for i in 0..domain_n.size() {
|
||||
let evals: Vec<E::Fr> = cfg_into_iter!(0..domain_n.size())
|
||||
.map(|k| if k == i { E::Fr::one() } else { E::Fr::zero() })
|
||||
.collect();
|
||||
lagrange_polynomials_n
|
||||
.push(EvaluationsOnDomain::from_vec_and_domain(evals, domain_n).interpolate());
|
||||
}
|
||||
|
||||
let lp = LookupParameters::new(*m);
|
||||
|
||||
let verifier_pp = VerifierPublicParameters {
|
||||
poly_vk,
|
||||
domain_m_size: lp.domain_m.size(),
|
||||
};
|
||||
|
||||
let pp = PublicParameters {
|
||||
poly_ck,
|
||||
domain_m: lp.domain_m,
|
||||
domain_n,
|
||||
lagrange_polynomials_n,
|
||||
lagrange_polynomials_m: lp.lagrange_polynomials_m,
|
||||
id_poly: lp.id_poly,
|
||||
domain_N,
|
||||
verifier_pp,
|
||||
N: N.clone(),
|
||||
n: n.clone(),
|
||||
m: lp.m.clone(),
|
||||
g2_powers: g2_powers.clone(),
|
||||
};
|
||||
if store_to_file {
|
||||
pp.store(&path);
|
||||
}
|
||||
pp
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[allow(non_snake_case)]
|
||||
pub fn test_load() {
|
||||
use ark_bls12_381::Bls12_381;
|
||||
|
||||
let n: usize = 4;
|
||||
let N: usize = 1 << n;
|
||||
let powers_size: usize = 4 * N; // SRS SIZE
|
||||
let temp_m = n; // dummy
|
||||
let pp = PublicParameters::<Bls12_381>::setup(&powers_size, &N, &temp_m, &n);
|
||||
let path = "powers.log";
|
||||
pp.store(path);
|
||||
let loaded = PublicParameters::<Bls12_381>::load(path);
|
||||
assert_eq!(pp.poly_ck.powers_of_g, loaded.0.powers_of_g);
|
||||
assert_eq!(pp.poly_ck.powers_of_gamma_g, loaded.0.powers_of_gamma_g);
|
||||
assert_eq!(pp.verifier_pp.poly_vk.h, loaded.1.h);
|
||||
assert_eq!(pp.verifier_pp.poly_vk.beta_h, loaded.1.beta_h);
|
||||
assert_eq!(pp.g2_powers, loaded.2);
|
||||
std::fs::remove_file(&path).expect("File can not be deleted");
|
||||
}
|
||||
@@ -1,61 +1,56 @@
|
||||
/*
|
||||
This file includes the Caulk's unity prover and verifier for multi openings.
|
||||
The protocol is described in Figure 4.
|
||||
*/
|
||||
// This file includes the Caulk's unity prover and verifier for multi openings.
|
||||
// The protocol is described in Figure 4.
|
||||
|
||||
use ark_bls12_381::{Fr, FrParameters, G1Affine};
|
||||
use ark_ec::{msm::VariableBaseMSM, AffineCurve, ProjectiveCurve};
|
||||
use ark_ff::Fp256;
|
||||
use super::setup::PublicParameters;
|
||||
use crate::{util::convert_to_bigints, CaulkTranscript, KZGCommit};
|
||||
use ark_ec::{msm::VariableBaseMSM, AffineCurve, PairingEngine, ProjectiveCurve};
|
||||
use ark_poly::{
|
||||
univariate::DensePolynomial, EvaluationDomain, Evaluations as EvaluationsOnDomain, Polynomial,
|
||||
UVPolynomial,
|
||||
};
|
||||
use ark_std::{One, Zero};
|
||||
|
||||
use crate::caulk_multi_setup::PublicParameters;
|
||||
use crate::tools::{
|
||||
bipoly_commit, convert_to_bigints, hash_caulk_multi, kzg_open_g1_native,
|
||||
kzg_partial_open_g1_native, kzg_partial_verify_g1_native, kzg_verify_g1_native, UniPoly381,
|
||||
};
|
||||
|
||||
// output structure of prove_unity
|
||||
pub struct ProofMultiUnity {
|
||||
pub g1_u_bar: G1Affine,
|
||||
pub g1_h_1: G1Affine,
|
||||
pub g1_h_2: G1Affine,
|
||||
pub g1_u_bar_alpha: G1Affine,
|
||||
pub g1_h_2_alpha: G1Affine,
|
||||
pub v1: Fr,
|
||||
pub v2: Fr,
|
||||
pub v3: Fr,
|
||||
pub pi_1: G1Affine,
|
||||
pub pi_2: G1Affine,
|
||||
pub pi_3: G1Affine,
|
||||
pub pi_4: G1Affine,
|
||||
pub pi_5: G1Affine,
|
||||
pub struct ProofMultiUnity<E: PairingEngine> {
|
||||
pub g1_u_bar: E::G1Affine,
|
||||
pub g1_h_1: E::G1Affine,
|
||||
pub g1_h_2: E::G1Affine,
|
||||
pub g1_u_bar_alpha: E::G1Affine,
|
||||
pub g1_h_2_alpha: E::G1Affine,
|
||||
pub v1: E::Fr,
|
||||
pub v2: E::Fr,
|
||||
pub v3: E::Fr,
|
||||
pub pi_1: E::G1Affine,
|
||||
pub pi_2: E::G1Affine,
|
||||
pub pi_3: E::G1Affine,
|
||||
pub pi_4: E::G1Affine,
|
||||
pub pi_5: E::G1Affine,
|
||||
}
|
||||
|
||||
// Prove knowledge of vec_u_evals such that g1_u = g1^(sum_j u_j mu_j(x)) and u_j^N = 1
|
||||
// Prove knowledge of vec_u_evals such that g1_u = g1^(sum_j u_j mu_j(x)) and
|
||||
// u_j^N = 1
|
||||
#[allow(non_snake_case)]
|
||||
pub fn prove_multiunity(
|
||||
pp: &PublicParameters,
|
||||
hash_input: &mut Fr,
|
||||
g1_u: &G1Affine,
|
||||
mut vec_u_evals: Vec<Fp256<FrParameters>>,
|
||||
u_poly_quotient: UniPoly381,
|
||||
) -> ProofMultiUnity {
|
||||
// The test_rng is deterministic. Should be replaced with actual random generator.
|
||||
pub fn prove_multiunity<E: PairingEngine>(
|
||||
pp: &PublicParameters<E>,
|
||||
transcript: &mut CaulkTranscript<E::Fr>,
|
||||
g1_u: &E::G1Affine,
|
||||
mut vec_u_evals: Vec<E::Fr>,
|
||||
u_poly_quotient: DensePolynomial<E::Fr>,
|
||||
) -> ProofMultiUnity<E> {
|
||||
// The test_rng is deterministic. Should be replaced with actual random
|
||||
// generator.
|
||||
let rng_arkworks = &mut ark_std::test_rng();
|
||||
|
||||
// let rng_arkworks = &mut ark_std::test_rng();
|
||||
let n = pp.n;
|
||||
let deg_blinders = 11 / n;
|
||||
let z_Vm: UniPoly381 = pp.domain_m.vanishing_polynomial().into();
|
||||
let z_Vm: DensePolynomial<E::Fr> = pp.domain_m.vanishing_polynomial().into();
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
// 1. Compute polynomials u_s(X) = vec_u_polys[s] such that u_s( nu_i ) = w_i^{2^s}
|
||||
// 1. Compute polynomials u_s(X) = vec_u_polys[s] such that u_s( nu_i ) =
|
||||
// w_i^{2^s}
|
||||
//////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
let mut vec_u_polys: Vec<UniPoly381> = Vec::new();
|
||||
let mut vec_u_polys: Vec<DensePolynomial<E::Fr>> = Vec::new();
|
||||
|
||||
vec_u_polys.push(
|
||||
EvaluationsOnDomain::from_vec_and_domain(vec_u_evals.clone(), pp.domain_m).interpolate()
|
||||
@@ -70,7 +65,7 @@ pub fn prove_multiunity(
|
||||
vec_u_polys.push(
|
||||
EvaluationsOnDomain::from_vec_and_domain(vec_u_evals.clone(), pp.domain_m)
|
||||
.interpolate()
|
||||
+ (&z_Vm * &UniPoly381::rand(deg_blinders, rng_arkworks)),
|
||||
+ (&z_Vm * &DensePolynomial::<E::Fr>::rand(deg_blinders, rng_arkworks)),
|
||||
);
|
||||
}
|
||||
|
||||
@@ -78,16 +73,18 @@ pub fn prove_multiunity(
|
||||
// 2. Compute U_bar(X,Y) = sum_{s= 1}^n u_{s-1} rho_s(Y)
|
||||
//////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
// bivariate polynomials such that bipoly_U_bar[j] = a_j(Y) where U_bar(X,Y) = sum_j X^j a_j(Y)
|
||||
// bivariate polynomials such that bipoly_U_bar[j] = a_j(Y) where U_bar(X,Y) =
|
||||
// sum_j X^j a_j(Y)
|
||||
let mut bipoly_U_bar = Vec::new();
|
||||
|
||||
// vec_u_polys[0] has an extended degree because it is blinded so use vec_u_polys[1] for the length
|
||||
// vec_u_polys[0] has an extended degree because it is blinded so use
|
||||
// vec_u_polys[1] for the length
|
||||
for j in 0..vec_u_polys[1].len() {
|
||||
/*
|
||||
Denoting u_{s-1}(X) = sum_j u_{s-1, j} X^j then
|
||||
temp is a_j(Y) = sum_{s=1}^n u_{s-1, j} * rho_s(Y)
|
||||
*/
|
||||
let mut temp = DensePolynomial::from_coefficients_slice(&[Fr::zero()]);
|
||||
let mut temp = DensePolynomial::from_coefficients_slice(&[E::Fr::zero()]);
|
||||
|
||||
for s in 1..n {
|
||||
let u_s_j = DensePolynomial::from_coefficients_slice(&[vec_u_polys[s][j]]);
|
||||
@@ -106,7 +103,7 @@ pub fn prove_multiunity(
|
||||
let id_poly = pp.id_poly.clone();
|
||||
|
||||
// Hs(X) = (u_{s-1}^2(X) - u_s(X)) / zVm(X). Abort if doesn't divide.
|
||||
let mut vec_H_s_polys: Vec<DensePolynomial<Fr>> = Vec::new();
|
||||
let mut vec_H_s_polys: Vec<DensePolynomial<E::Fr>> = Vec::new();
|
||||
for s in 1..n {
|
||||
let (poly_H_s, remainder) = (&(&vec_u_polys[s - 1] * &vec_u_polys[s - 1])
|
||||
- &vec_u_polys[s])
|
||||
@@ -138,7 +135,7 @@ pub fn prove_multiunity(
|
||||
|
||||
// In case length of H_1(X) and H_2(X) is different pad with zeros.
|
||||
for _ in vec_H_s_polys[0].len()..vec_H_s_polys[1].len() {
|
||||
let h_0_j = DensePolynomial::from_coefficients_slice(&[Fr::zero()]);
|
||||
let h_0_j = DensePolynomial::from_coefficients_slice(&[E::Fr::zero()]);
|
||||
bipoly_h_2.push(h_0_j);
|
||||
}
|
||||
|
||||
@@ -156,31 +153,26 @@ pub fn prove_multiunity(
|
||||
//////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
// 5. Commit to U_bar(X^n, X) and h_2(X^n, X)
|
||||
//////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
let g1_u_bar = bipoly_commit(pp, &bipoly_U_bar, pp.domain_n.size());
|
||||
let g1_h_2 = bipoly_commit(pp, &bipoly_h_2, pp.domain_n.size());
|
||||
let g1_u_bar = KZGCommit::<E>::bipoly_commit(pp, &bipoly_U_bar, pp.domain_n.size());
|
||||
let g1_h_2 = KZGCommit::<E>::bipoly_commit(pp, &bipoly_h_2, pp.domain_n.size());
|
||||
|
||||
////////////////////////////
|
||||
// 6. alpha = Hash(g1_u, g1_u_bar, g1_h_2)
|
||||
////////////////////////////
|
||||
|
||||
let alpha = hash_caulk_multi::<Fr>(
|
||||
hash_input.clone(),
|
||||
Some(&[&g1_u, &g1_u_bar, &g1_h_2].to_vec()),
|
||||
None,
|
||||
None,
|
||||
);
|
||||
|
||||
*hash_input = alpha.clone();
|
||||
transcript.append_element(b"u", g1_u);
|
||||
transcript.append_element(b"u_bar", &g1_u_bar);
|
||||
transcript.append_element(b"h2", &g1_h_2);
|
||||
let alpha = transcript.get_and_append_challenge(b"alpha");
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
// 7. Compute h_1(Y)
|
||||
//////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
// poly_U_alpha = sum_{s=1}^n u_{s-1}(alpha) rho_s(Y)
|
||||
let mut poly_U_alpha = DensePolynomial::from_coefficients_slice(&[Fr::zero()]);
|
||||
let mut poly_U_alpha = DensePolynomial::from_coefficients_slice(&[E::Fr::zero()]);
|
||||
|
||||
// poly_Usq_alpha = sum_{s=1}^n u_{s-1}^2(alpha) rho_s(Y)
|
||||
let mut poly_Usq_alpha = DensePolynomial::from_coefficients_slice(&[Fr::zero()]);
|
||||
let mut poly_Usq_alpha = DensePolynomial::from_coefficients_slice(&[E::Fr::zero()]);
|
||||
|
||||
for s in 0..n {
|
||||
let u_s_alpha = vec_u_polys[s].evaluate(&alpha);
|
||||
@@ -191,8 +183,8 @@ pub fn prove_multiunity(
|
||||
poly_Usq_alpha = &poly_Usq_alpha + &(&temp * &pp.lagrange_polynomials_n[s]);
|
||||
}
|
||||
|
||||
// divide h1(Y) = [ U^2(alpha,Y) - sum_{s=1}^n u_{s-1}^2(alpha) rho_s(Y) ) ] / zVn(Y)
|
||||
// return an error if division fails
|
||||
// divide h1(Y) = [ U^2(alpha,Y) - sum_{s=1}^n u_{s-1}^2(alpha) rho_s(Y) ) ] /
|
||||
// zVn(Y) return an error if division fails
|
||||
let (poly_h_1, remainder) = (&(&poly_U_alpha * &poly_U_alpha) - &poly_Usq_alpha)
|
||||
.divide_by_vanishing_poly(pp.domain_n)
|
||||
.unwrap();
|
||||
@@ -209,15 +201,15 @@ pub fn prove_multiunity(
|
||||
.into_affine();
|
||||
|
||||
////////////////////////////
|
||||
//9. beta = Hash( g1_h_1 )
|
||||
// 9. beta = Hash( g1_h_1 )
|
||||
////////////////////////////
|
||||
|
||||
let beta = hash_caulk_multi::<Fr>(hash_input.clone(), Some(&[&g1_h_1].to_vec()), None, None);
|
||||
|
||||
*hash_input = beta.clone();
|
||||
transcript.append_element(b"h1", &g1_h_1);
|
||||
let beta = transcript.get_and_append_challenge(b"beta");
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
// 10. Compute p(Y) = (U^2(alpha, beta) - h1(Y) zVn(beta) ) - (u_bar(alpha, beta sigma^(-1)) + id(alpha) rho_n(Y)) - zVm(alpha )h2(alpha,Y)
|
||||
// 10. Compute p(Y) = (U^2(alpha, beta) - h1(Y) zVn(beta) ) - (u_bar(alpha, beta
|
||||
// sigma^(-1)) + id(alpha) rho_n(Y)) - zVm(alpha )h2(alpha,Y)
|
||||
//////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
// p(Y) = U^2(alpha, beta)
|
||||
@@ -229,7 +221,7 @@ pub fn prove_multiunity(
|
||||
// p(Y) = p(Y) - ( u_bar(alpha, beta sigma) + id(alpha) rho_n(beta))
|
||||
|
||||
// u_bar_alpha_shiftbeta = u_bar(alpha, beta sigma)
|
||||
let mut u_bar_alpha_shiftbeta = Fr::zero();
|
||||
let mut u_bar_alpha_shiftbeta = E::Fr::zero();
|
||||
let beta_shift = beta * &pp.domain_n.element(1);
|
||||
for s in 1..n {
|
||||
let u_s_alpha = vec_u_polys[s].evaluate(&alpha);
|
||||
@@ -246,7 +238,7 @@ pub fn prove_multiunity(
|
||||
|
||||
////////////////////////////
|
||||
// p(Y) = p(Y) - h1(Y) zVn(beta)
|
||||
let z_Vn: UniPoly381 = pp.domain_n.vanishing_polynomial().into();
|
||||
let z_Vn: DensePolynomial<E::Fr> = pp.domain_n.vanishing_polynomial().into();
|
||||
let temp = &DensePolynomial::from_coefficients_slice(&[z_Vn.evaluate(&beta)]) * &poly_h_1;
|
||||
poly_p = &poly_p - &temp;
|
||||
|
||||
@@ -254,7 +246,7 @@ pub fn prove_multiunity(
|
||||
// p(Y) = p(Y) - z_Vm(alpha) h_2(alpha, Y)
|
||||
|
||||
// poly_h_2_alpha = h_2(alpha, Y)
|
||||
let mut poly_h_2_alpha = DensePolynomial::from_coefficients_slice(&[Fr::zero()]);
|
||||
let mut poly_h_2_alpha = DensePolynomial::from_coefficients_slice(&[E::Fr::zero()]);
|
||||
for s in 0..vec_H_s_polys.len() {
|
||||
let h_s_j = DensePolynomial::from_coefficients_slice(&[vec_H_s_polys[s].evaluate(&alpha)]);
|
||||
poly_h_2_alpha = &poly_h_2_alpha + &(&h_s_j * &pp.lagrange_polynomials_n[s]);
|
||||
@@ -265,92 +257,83 @@ pub fn prove_multiunity(
|
||||
poly_p = &poly_p - &temp;
|
||||
|
||||
// check p(beta) = 0
|
||||
assert!(poly_p.evaluate(&beta) == Fr::zero());
|
||||
assert!(poly_p.evaluate(&beta) == E::Fr::zero());
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
// 11. Open KZG commitments
|
||||
//////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
// KZG.Open( srs, u(X), deg = bot, X = alpha )
|
||||
let (evals_1, pi_1) = kzg_open_g1_native(&pp.poly_ck, &vec_u_polys[0], None, [&alpha].to_vec());
|
||||
let (evals_1, pi_1) = KZGCommit::open_g1_batch(&pp.poly_ck, &vec_u_polys[0], None, &[alpha]);
|
||||
|
||||
// KZG.Open( srs, U_bar(X,Y), deg = bot, X = alpha )
|
||||
let (g1_u_bar_alpha, pi_2, poly_u_bar_alpha) =
|
||||
kzg_partial_open_g1_native(&pp, &bipoly_U_bar, pp.domain_n.size(), &alpha);
|
||||
KZGCommit::partial_open_g1(&pp, &bipoly_U_bar, pp.domain_n.size(), &alpha);
|
||||
|
||||
// KZG.Open( srs, h_2(X,Y), deg = bot, X = alpha )
|
||||
let (g1_h_2_alpha, pi_3, _) =
|
||||
kzg_partial_open_g1_native(&pp, &bipoly_h_2, pp.domain_n.size(), &alpha);
|
||||
KZGCommit::partial_open_g1(&pp, &bipoly_h_2, pp.domain_n.size(), &alpha);
|
||||
|
||||
// KZG.Open( srs, U_bar(alpha,Y), deg = bot, Y = [1, beta, beta * sigma] ) should evaluate to (0, v2, v3)
|
||||
let (evals_2, pi_4) = kzg_open_g1_native(
|
||||
// KZG.Open( srs, U_bar(alpha,Y), deg = bot, Y = [1, beta, beta * sigma] )
|
||||
// should evaluate to (0, v2, v3)
|
||||
let (evals_2, pi_4) = KZGCommit::open_g1_batch(
|
||||
&pp.poly_ck,
|
||||
&poly_u_bar_alpha,
|
||||
Some(&(pp.domain_n.size() - 1)),
|
||||
[&Fr::one(), &beta, &(beta * &pp.domain_n.element(1))].to_vec(),
|
||||
&[E::Fr::one(), beta, (beta * &pp.domain_n.element(1))],
|
||||
);
|
||||
assert!(evals_2[0] == Fr::zero());
|
||||
assert!(evals_2[0] == E::Fr::zero());
|
||||
|
||||
// KZG.Open(srs, p(Y), deg = n-1, Y = beta)
|
||||
let (evals_3, pi_5) = kzg_open_g1_native(
|
||||
let (evals_3, pi_5) = KZGCommit::open_g1_batch(
|
||||
&pp.poly_ck,
|
||||
&poly_p,
|
||||
Some(&(pp.domain_n.size() - 1)),
|
||||
[&beta].to_vec(),
|
||||
&[beta],
|
||||
);
|
||||
assert!(evals_3[0] == Fr::zero());
|
||||
assert!(evals_3[0] == E::Fr::zero());
|
||||
|
||||
let proof = ProofMultiUnity {
|
||||
g1_u_bar: g1_u_bar,
|
||||
g1_h_1: g1_h_1,
|
||||
g1_h_2: g1_h_2,
|
||||
g1_u_bar_alpha: g1_u_bar_alpha,
|
||||
g1_h_2_alpha: g1_h_2_alpha,
|
||||
g1_u_bar,
|
||||
g1_h_1,
|
||||
g1_h_2,
|
||||
g1_u_bar_alpha,
|
||||
g1_h_2_alpha,
|
||||
v1: evals_1[0],
|
||||
v2: evals_2[1],
|
||||
v3: evals_2[2],
|
||||
pi_1: pi_1,
|
||||
pi_2: pi_2,
|
||||
pi_3: pi_3,
|
||||
pi_4: pi_4,
|
||||
pi_5: pi_5,
|
||||
pi_1,
|
||||
pi_2,
|
||||
pi_3,
|
||||
pi_4,
|
||||
pi_5,
|
||||
};
|
||||
|
||||
proof
|
||||
}
|
||||
|
||||
// Verify that the prover knows vec_u_evals such that g1_u = g1^(sum_j u_j mu_j(x)) and u_j^N = 1
|
||||
// Verify that the prover knows vec_u_evals such that g1_u = g1^(sum_j u_j
|
||||
// mu_j(x)) and u_j^N = 1
|
||||
#[allow(non_snake_case)]
|
||||
pub fn verify_multiunity(
|
||||
pp: &PublicParameters,
|
||||
hash_input: &mut Fr,
|
||||
g1_u: G1Affine,
|
||||
pi_unity: &ProofMultiUnity,
|
||||
pub fn verify_multiunity<E: PairingEngine>(
|
||||
pp: &PublicParameters<E>,
|
||||
transcript: &mut CaulkTranscript<E::Fr>,
|
||||
g1_u: &E::G1Affine,
|
||||
pi_unity: &ProofMultiUnity<E>,
|
||||
) -> bool {
|
||||
////////////////////////////
|
||||
// alpha = Hash(g1_u, g1_u_bar, g1_h_2)
|
||||
////////////////////////////
|
||||
|
||||
let alpha = hash_caulk_multi::<Fr>(
|
||||
hash_input.clone(),
|
||||
Some(&[&g1_u, &pi_unity.g1_u_bar, &pi_unity.g1_h_2].to_vec()),
|
||||
None,
|
||||
None,
|
||||
);
|
||||
|
||||
*hash_input = alpha.clone();
|
||||
transcript.append_element(b"u", g1_u);
|
||||
transcript.append_element(b"u_bar", &pi_unity.g1_u_bar);
|
||||
transcript.append_element(b"h2", &pi_unity.g1_h_2);
|
||||
let alpha = transcript.get_and_append_challenge(b"alpha");
|
||||
|
||||
////////////////////////////
|
||||
// beta = Hash( g1_h_1 )
|
||||
////////////////////////////
|
||||
let beta = hash_caulk_multi::<Fr>(
|
||||
hash_input.clone(),
|
||||
Some(&[&pi_unity.g1_h_1].to_vec()),
|
||||
None,
|
||||
None,
|
||||
);
|
||||
|
||||
*hash_input = beta.clone();
|
||||
transcript.append_element(b"h1", &pi_unity.g1_h_1);
|
||||
let beta = transcript.get_and_append_challenge(b"beta");
|
||||
|
||||
/////////////////////////////
|
||||
// Compute [P]_1
|
||||
@@ -378,45 +361,48 @@ pub fn verify_multiunity(
|
||||
// Check the KZG openings
|
||||
////////////////////////////
|
||||
|
||||
let check1 = kzg_verify_g1_native(
|
||||
&pp,
|
||||
g1_u.clone(),
|
||||
let check1 = KZGCommit::<E>::verify_g1(
|
||||
&pp.poly_ck.powers_of_g,
|
||||
&pp.g2_powers,
|
||||
&g1_u,
|
||||
None,
|
||||
[alpha].to_vec(),
|
||||
[pi_unity.v1].to_vec(),
|
||||
pi_unity.pi_1,
|
||||
&[alpha],
|
||||
&[pi_unity.v1],
|
||||
&pi_unity.pi_1,
|
||||
);
|
||||
let check2 = kzg_partial_verify_g1_native(
|
||||
let check2 = KZGCommit::partial_verify_g1(
|
||||
&pp,
|
||||
pi_unity.g1_u_bar,
|
||||
&pi_unity.g1_u_bar,
|
||||
pp.domain_n.size(),
|
||||
alpha,
|
||||
pi_unity.g1_u_bar_alpha,
|
||||
pi_unity.pi_2,
|
||||
&alpha,
|
||||
&pi_unity.g1_u_bar_alpha,
|
||||
&pi_unity.pi_2,
|
||||
);
|
||||
let check3 = kzg_partial_verify_g1_native(
|
||||
let check3 = KZGCommit::partial_verify_g1(
|
||||
&pp,
|
||||
pi_unity.g1_h_2,
|
||||
&pi_unity.g1_h_2,
|
||||
pp.domain_n.size(),
|
||||
alpha,
|
||||
pi_unity.g1_h_2_alpha,
|
||||
pi_unity.pi_3,
|
||||
&alpha,
|
||||
&pi_unity.g1_h_2_alpha,
|
||||
&pi_unity.pi_3,
|
||||
);
|
||||
let check4 = kzg_verify_g1_native(
|
||||
&pp,
|
||||
pi_unity.g1_u_bar_alpha,
|
||||
let check4 = KZGCommit::<E>::verify_g1(
|
||||
&pp.poly_ck.powers_of_g,
|
||||
&pp.g2_powers,
|
||||
&pi_unity.g1_u_bar_alpha,
|
||||
Some(&(pp.domain_n.size() - 1)),
|
||||
[Fr::one(), beta, beta * &pp.domain_n.element(1)].to_vec(),
|
||||
[Fr::zero(), pi_unity.v2, pi_unity.v3].to_vec(),
|
||||
pi_unity.pi_4,
|
||||
&[E::Fr::one(), beta, beta * &pp.domain_n.element(1)],
|
||||
&[E::Fr::zero(), pi_unity.v2, pi_unity.v3],
|
||||
&pi_unity.pi_4,
|
||||
);
|
||||
let check5 = kzg_verify_g1_native(
|
||||
&pp,
|
||||
g1_P.into_affine(),
|
||||
let check5 = KZGCommit::<E>::verify_g1(
|
||||
&pp.poly_ck.powers_of_g,
|
||||
&pp.g2_powers,
|
||||
&g1_P.into_affine(),
|
||||
Some(&(pp.domain_n.size() - 1)),
|
||||
[beta].to_vec(),
|
||||
[Fr::zero()].to_vec(),
|
||||
pi_unity.pi_5,
|
||||
&[beta],
|
||||
&[E::Fr::zero()],
|
||||
&pi_unity.pi_5,
|
||||
);
|
||||
|
||||
return check1 && check2 && check3 && check4 && check5;
|
||||
@@ -424,24 +410,30 @@ pub fn verify_multiunity(
|
||||
|
||||
#[cfg(test)]
|
||||
pub mod tests {
|
||||
use crate::caulk_multi_setup::setup_multi_lookup;
|
||||
use crate::caulk_multi_unity::{prove_multiunity, verify_multiunity};
|
||||
use crate::tools::{convert_to_bigints, UniPoly381};
|
||||
use super::{prove_multiunity, verify_multiunity};
|
||||
use crate::{util::convert_to_bigints, CaulkTranscript};
|
||||
use ark_bls12_377::Bls12_377;
|
||||
use ark_bls12_381::Bls12_381;
|
||||
use ark_ec::{msm::VariableBaseMSM, PairingEngine, ProjectiveCurve};
|
||||
use ark_poly::{
|
||||
univariate::DensePolynomial, EvaluationDomain, Evaluations as EvaluationsOnDomain,
|
||||
UVPolynomial,
|
||||
};
|
||||
use ark_std::test_rng;
|
||||
use rand::Rng;
|
||||
use std::time::Instant;
|
||||
|
||||
use ark_bls12_381::FrParameters;
|
||||
use ark_ec::{msm::VariableBaseMSM, ProjectiveCurve};
|
||||
use ark_ff::Fp256;
|
||||
use ark_poly::{EvaluationDomain, Evaluations as EvaluationsOnDomain, UVPolynomial};
|
||||
|
||||
//#[test]
|
||||
#[allow(non_snake_case)]
|
||||
#[test]
|
||||
pub fn test_unity() {
|
||||
let mut rng = rand::thread_rng();
|
||||
fn test_unity() {
|
||||
test_unity_helper::<Bls12_377>();
|
||||
test_unity_helper::<Bls12_381>();
|
||||
}
|
||||
|
||||
let n: usize = 8; //bitlength of poly degree
|
||||
#[allow(non_snake_case)]
|
||||
fn test_unity_helper<E: PairingEngine>() {
|
||||
let mut rng = test_rng();
|
||||
|
||||
let n: usize = 8; // bitlength of poly degree
|
||||
let max_degree: usize = (1 << n) + 2;
|
||||
let N: usize = (1 << n) - 1;
|
||||
|
||||
@@ -450,7 +442,7 @@ pub mod tests {
|
||||
|
||||
// run the setup
|
||||
let now = Instant::now();
|
||||
let pp = setup_multi_lookup(&max_degree, &N, &m, &n);
|
||||
let pp = crate::multi::PublicParameters::<E>::setup(&max_degree, &N, &m, &n);
|
||||
println!(
|
||||
"time to setup single openings of table size {:?} = {:?}",
|
||||
N + 1,
|
||||
@@ -462,20 +454,19 @@ pub mod tests {
|
||||
////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
// choose [u1, ..., um] such that uj**N = 1
|
||||
let mut vec_u_evals: Vec<Fp256<FrParameters>> = Vec::new();
|
||||
let mut vec_u_evals: Vec<E::Fr> = Vec::new();
|
||||
for _ in 0..m {
|
||||
let j = rng.gen_range(0, pp.domain_N.size());
|
||||
let j = rng.gen_range(0..pp.domain_N.size());
|
||||
vec_u_evals.push(pp.domain_N.element(j));
|
||||
}
|
||||
|
||||
// choose random quotient polynomial of degree 1.
|
||||
let rng_arkworks = &mut ark_std::test_rng();
|
||||
let u_poly_quotient = UniPoly381::rand(5, rng_arkworks);
|
||||
let u_poly_quotient = DensePolynomial::<E::Fr>::rand(5, &mut rng);
|
||||
|
||||
// X^m - 1
|
||||
let z_Vm: UniPoly381 = pp.domain_m.vanishing_polynomial().into();
|
||||
let z_Vm: DensePolynomial<E::Fr> = pp.domain_m.vanishing_polynomial().into();
|
||||
|
||||
//commit to polynomial u(X) = sum_j uj muj(X) + u_quotient(X) z_Vm(X)
|
||||
// commit to polynomial u(X) = sum_j uj muj(X) + u_quotient(X) z_Vm(X)
|
||||
let u_poly = &EvaluationsOnDomain::from_vec_and_domain(vec_u_evals.clone(), pp.domain_m)
|
||||
.interpolate()
|
||||
+ &(&u_poly_quotient * &z_Vm);
|
||||
@@ -490,14 +481,22 @@ pub mod tests {
|
||||
////////////////////////////////////////////////////////////////////////////////////
|
||||
// run the prover
|
||||
////////////////////////////////////////////////////////////////////////////////////
|
||||
let pi_unity = prove_multiunity(&pp, &g1_u, vec_u_evals.clone(), u_poly_quotient);
|
||||
let mut prover_transcript = CaulkTranscript::new();
|
||||
let pi_unity = prove_multiunity::<E>(
|
||||
&pp,
|
||||
&mut prover_transcript,
|
||||
&g1_u,
|
||||
vec_u_evals.clone(),
|
||||
u_poly_quotient,
|
||||
);
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////////
|
||||
// run the verifier
|
||||
////////////////////////////////////////////////////////////////////////////////////
|
||||
let mut verifier_transcript = CaulkTranscript::new();
|
||||
println!(
|
||||
"unity proof verifies {:?}",
|
||||
verify_multiunity(&pp, g1_u, pi_unity)
|
||||
verify_multiunity::<E>(&pp, &mut verifier_transcript, &g1_u, &pi_unity)
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -1,7 +1,6 @@
|
||||
/*
|
||||
This file includes a prover and verifier for demonstrating knowledge of an opening of a Pedersen commitment.
|
||||
The protocol is informally described in Appendix A.2, Proof of Opening of a Pedersen Commitment
|
||||
*/
|
||||
// This file includes a prover and verifier for demonstrating knowledge of an
|
||||
// opening of a Pedersen commitment. The protocol is informally described in
|
||||
// Appendix A.2, Proof of Opening of a Pedersen Commitment
|
||||
|
||||
use crate::CaulkTranscript;
|
||||
use ark_ec::{AffineCurve, ProjectiveCurve};
|
||||
@@ -1,22 +1,23 @@
|
||||
/*
|
||||
This file includes the Caulk prover and verifier for single openings.
|
||||
The protocol is described in Figure 1.
|
||||
*/
|
||||
// This file includes the Caulk prover and verifier for single openings.
|
||||
// The protocol is described in Figure 1.
|
||||
|
||||
use crate::caulk_single_setup::{PublicParameters, VerifierPublicParameters};
|
||||
use crate::caulk_single_unity::{
|
||||
caulk_single_unity_prove, caulk_single_unity_verify, CaulkProofUnity, PublicParametersUnity,
|
||||
VerifierPublicParametersUnity,
|
||||
pub mod setup;
|
||||
pub mod unity;
|
||||
|
||||
use crate::{
|
||||
pedersen::{PedersenCommit, PedersenProof},
|
||||
CaulkTranscript,
|
||||
};
|
||||
use crate::pedersen::{PedersenCommit, PedersenProof};
|
||||
use crate::CaulkTranscript;
|
||||
use ark_ec::{AffineCurve, PairingEngine, ProjectiveCurve};
|
||||
use ark_ff::{Field, PrimeField};
|
||||
use ark_poly::{EvaluationDomain, GeneralEvaluationDomain};
|
||||
use ark_std::rand::RngCore;
|
||||
use ark_std::UniformRand;
|
||||
use ark_std::{One, Zero};
|
||||
use ark_std::{rand::RngCore, One, UniformRand, Zero};
|
||||
use setup::{PublicParameters, VerifierPublicParameters};
|
||||
use std::ops::Neg;
|
||||
use unity::{
|
||||
caulk_single_unity_prove, caulk_single_unity_verify, CaulkProofUnity, PublicParametersUnity,
|
||||
VerifierPublicParametersUnity,
|
||||
};
|
||||
|
||||
// Structure of opening proofs output by prove.
|
||||
#[allow(non_snake_case)]
|
||||
@@ -28,11 +29,12 @@ pub struct CaulkProof<E: PairingEngine> {
|
||||
pub pi_unity: CaulkProofUnity<E>,
|
||||
}
|
||||
|
||||
//Proves knowledge of (i, Q, z, r) such that
|
||||
// Proves knowledge of (i, Q, z, r) such that
|
||||
// 1) Q is a KZG opening proof that g1_C opens to z at i
|
||||
// 2) cm = g^z h^r
|
||||
|
||||
//Takes as input opening proof Q. Does not need knowledge of contents of C = g1_C.
|
||||
// Takes as input opening proof Q. Does not need knowledge of contents of C =
|
||||
// g1_C.
|
||||
#[allow(non_snake_case)]
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub fn caulk_single_prove<E: PairingEngine, R: RngCore>(
|
||||
@@ -74,7 +76,8 @@ pub fn caulk_single_prove<E: PairingEngine, R: RngCore>(
|
||||
// Pedersen prove
|
||||
///////////////////////////////
|
||||
|
||||
// hash the instance and the proof elements to determine hash inputs for Pedersen prover
|
||||
// hash the instance and the proof elements to determine hash inputs for
|
||||
// Pedersen prover
|
||||
|
||||
transcript.append_element(b"0", &E::Fr::zero());
|
||||
transcript.append_element(b"C", g1_C);
|
||||
@@ -89,7 +92,8 @@ pub fn caulk_single_prove<E: PairingEngine, R: RngCore>(
|
||||
// Unity prove
|
||||
///////////////////////////////
|
||||
|
||||
// hash the last round of the pedersen proof to determine hash input to the unity prover
|
||||
// hash the last round of the pedersen proof to determine hash input to the
|
||||
// unity prover
|
||||
transcript.append_element(b"t1", &pi_ped.t1);
|
||||
transcript.append_element(b"t2", &pi_ped.t2);
|
||||
|
||||
@@ -115,7 +119,7 @@ pub fn caulk_single_prove<E: PairingEngine, R: RngCore>(
|
||||
}
|
||||
}
|
||||
|
||||
//Verifies that the prover knows of (i, Q, z, r) such that
|
||||
// Verifies that the prover knows of (i, Q, z, r) such that
|
||||
// 1) Q is a KZG opening proof that g1_C opens to z at i
|
||||
// 2) cm = g^z h^r
|
||||
#[allow(non_snake_case)]
|
||||
@@ -143,7 +147,8 @@ pub fn caulk_single_verify<E: PairingEngine>(
|
||||
// Pedersen check
|
||||
///////////////////////////////
|
||||
|
||||
// hash the instance and the proof elements to determine hash inputs for Pedersen prover
|
||||
// hash the instance and the proof elements to determine hash inputs for
|
||||
// Pedersen prover
|
||||
transcript.append_element(b"0", &E::Fr::zero());
|
||||
transcript.append_element(b"C", g1_C);
|
||||
transcript.append_element(b"T", &proof.g1_T);
|
||||
@@ -157,7 +162,8 @@ pub fn caulk_single_verify<E: PairingEngine>(
|
||||
// Unity check
|
||||
///////////////////////////////
|
||||
|
||||
// hash the last round of the pedersen proof to determine hash input to the unity prover
|
||||
// hash the last round of the pedersen proof to determine hash input to the
|
||||
// unity prover
|
||||
transcript.append_element(b"t1", &proof.pi_ped.t1);
|
||||
transcript.append_element(b"t2", &proof.pi_ped.t2);
|
||||
|
||||
@@ -1,9 +1,7 @@
|
||||
/*
|
||||
This file includes the setup algorithm for Caulk with single openings.
|
||||
A full description of the setup is not formally given in the paper.
|
||||
*/
|
||||
// This file includes the setup algorithm for Caulk with single openings.
|
||||
// A full description of the setup is not formally given in the paper.
|
||||
|
||||
use crate::PedersenParam;
|
||||
use crate::{util::trim, PedersenParam};
|
||||
use ark_ec::{AffineCurve, PairingEngine, ProjectiveCurve};
|
||||
use ark_ff::{Field, UniformRand};
|
||||
use ark_poly::{
|
||||
@@ -11,8 +9,7 @@ use ark_poly::{
|
||||
GeneralEvaluationDomain, UVPolynomial,
|
||||
};
|
||||
use ark_poly_commit::kzg10::*;
|
||||
use ark_std::{cfg_into_iter, rand::RngCore, One, Zero};
|
||||
use ark_std::{end_timer, start_timer};
|
||||
use ark_std::{cfg_into_iter, end_timer, rand::RngCore, start_timer, One, Zero};
|
||||
#[cfg(feature = "parallel")]
|
||||
use rayon::iter::{IntoParallelIterator, ParallelIterator};
|
||||
use std::cmp::max;
|
||||
@@ -41,36 +38,6 @@ pub struct VerifierPublicParameters<E: PairingEngine> {
|
||||
pub powers_of_g2: Vec<E::G2Affine>,
|
||||
}
|
||||
|
||||
// Reduces full srs down to smaller srs for smaller polynomials
|
||||
// Copied from arkworks library (where same function is private)
|
||||
fn trim<E: PairingEngine, P: UVPolynomial<E::Fr>>(
|
||||
srs: &UniversalParams<E>,
|
||||
mut supported_degree: usize,
|
||||
) -> (Powers<'static, E>, VerifierKey<E>) {
|
||||
if supported_degree == 1 {
|
||||
supported_degree += 1;
|
||||
}
|
||||
|
||||
let powers_of_g = srs.powers_of_g[..=supported_degree].to_vec();
|
||||
let powers_of_gamma_g = (0..=supported_degree)
|
||||
.map(|i| srs.powers_of_gamma_g[&i])
|
||||
.collect();
|
||||
|
||||
let powers = Powers {
|
||||
powers_of_g: ark_std::borrow::Cow::Owned(powers_of_g),
|
||||
powers_of_gamma_g: ark_std::borrow::Cow::Owned(powers_of_gamma_g),
|
||||
};
|
||||
let vk = VerifierKey {
|
||||
g: srs.powers_of_g[0],
|
||||
gamma_g: srs.powers_of_gamma_g[&0],
|
||||
h: srs.h,
|
||||
beta_h: srs.beta_h,
|
||||
prepared_h: srs.prepared_h.clone(),
|
||||
prepared_beta_h: srs.prepared_beta_h.clone(),
|
||||
};
|
||||
(powers, vk)
|
||||
}
|
||||
|
||||
// setup algorithm for Caulk with single openings
|
||||
// also includes a bunch of precomputation.
|
||||
#[allow(non_snake_case)]
|
||||
@@ -96,7 +63,8 @@ pub fn caulk_single_setup<E: PairingEngine, R: RngCore>(
|
||||
// We take the larger of the two.
|
||||
let poly_ck_size = max(actual_degree, 2 * domain_Vn.size() + 3);
|
||||
|
||||
// Setup algorithm. To be replaced by output of a universal setup before being production ready.
|
||||
// Setup algorithm. To be replaced by output of a universal setup before being
|
||||
// production ready.
|
||||
let powers_time = start_timer!(|| "setup powers");
|
||||
let srs = KZG10::<E, DensePolynomial<E::Fr>>::setup(max(max_degree, poly_ck_size), true, rng)
|
||||
.unwrap();
|
||||
@@ -115,7 +83,8 @@ pub fn caulk_single_setup<E: PairingEngine, R: RngCore>(
|
||||
let ped_h: E::G1Affine = E::G1Projective::rand(rng).into_affine();
|
||||
|
||||
// precomputation to speed up prover
|
||||
// lagrange_polynomials_Vn[i] = polynomial equal to 0 at w^j for j!= i and 1 at w^i
|
||||
// lagrange_polynomials_Vn[i] = polynomial equal to 0 at w^j for j!= i and 1 at
|
||||
// w^i
|
||||
let mut lagrange_polynomials_Vn: Vec<DensePolynomial<E::Fr>> = Vec::new();
|
||||
|
||||
// precomputation to speed up verifier.
|
||||
@@ -149,9 +118,9 @@ pub fn caulk_single_setup<E: PairingEngine, R: RngCore>(
|
||||
}
|
||||
lagrange_scalars_Vn.push(temp.inverse().unwrap());
|
||||
|
||||
// poly_prod = (X - 1) (X - w) (X - w^2) (X - w^3) (X - w^4) (X - w^(5 + logN)) (X - w^(6 + logN))
|
||||
// for efficiency not including (X - w^i) for i > 6 + logN
|
||||
// prover sets these evaluations to 0 anyway.
|
||||
// poly_prod = (X - 1) (X - w) (X - w^2) (X - w^3) (X - w^4) (X - w^(5 + logN))
|
||||
// (X - w^(6 + logN)) for efficiency not including (X - w^i) for i > 6 +
|
||||
// logN prover sets these evaluations to 0 anyway.
|
||||
let mut poly_prod = DensePolynomial::from_coefficients_slice(&[E::Fr::one()]);
|
||||
for i in 0..domain_Vn.size() {
|
||||
if i < 5 {
|
||||
@@ -181,8 +150,8 @@ pub fn caulk_single_setup<E: PairingEngine, R: RngCore>(
|
||||
let ped_g = poly_ck.powers_of_g[0];
|
||||
|
||||
// need some powers of g2
|
||||
// arkworks setup doesn't give these powers but the setup does use a fixed randomness to generate them.
|
||||
// so we can generate powers of g2 directly.
|
||||
// arkworks setup doesn't give these powers but the setup does use a fixed
|
||||
// randomness to generate them. so we can generate powers of g2 directly.
|
||||
let rng = &mut ark_std::test_rng();
|
||||
let beta = E::Fr::rand(rng);
|
||||
let mut temp = poly_vk.h;
|
||||
@@ -1,11 +1,8 @@
|
||||
/*
|
||||
This file includes the Caulk's unity prover and verifier for single openings.
|
||||
The protocol is described in Figure 2.
|
||||
*/
|
||||
// This file includes the Caulk's unity prover and verifier for single openings.
|
||||
// The protocol is described in Figure 2.
|
||||
|
||||
use crate::caulk_single_setup::{PublicParameters, VerifierPublicParameters};
|
||||
use crate::kzg::KZGCommit;
|
||||
use crate::CaulkTranscript;
|
||||
use super::setup::{PublicParameters, VerifierPublicParameters};
|
||||
use crate::{kzg::KZGCommit, CaulkTranscript};
|
||||
use ark_ec::{AffineCurve, PairingEngine, ProjectiveCurve};
|
||||
use ark_ff::Field;
|
||||
use ark_poly::{
|
||||
@@ -13,8 +10,7 @@ use ark_poly::{
|
||||
GeneralEvaluationDomain, Polynomial, UVPolynomial,
|
||||
};
|
||||
use ark_poly_commit::kzg10::*;
|
||||
use ark_std::{cfg_into_iter, One, Zero};
|
||||
use ark_std::{rand::RngCore, UniformRand};
|
||||
use ark_std::{cfg_into_iter, rand::RngCore, One, UniformRand, Zero};
|
||||
#[cfg(feature = "parallel")]
|
||||
use rayon::iter::{IntoParallelIterator, ParallelIterator};
|
||||
|
||||
@@ -167,7 +163,8 @@ pub fn caulk_single_unity_prove<E: PairingEngine, R: RngCore>(
|
||||
let mut p_poly =
|
||||
&(&f_poly - &a_poly) * &(&pp.lagrange_polynomials_Vn[0] + &pp.lagrange_polynomials_Vn[1]);
|
||||
|
||||
// p(X) = p(X) + ( (1 - sigma) f(X) - f(sigma^(-2)X) + f(sigma^(-1) X) ) rho_3(X)
|
||||
// p(X) = p(X) + ( (1 - sigma) f(X) - f(sigma^(-2)X) + f(sigma^(-1) X) )
|
||||
// rho_3(X)
|
||||
p_poly = &p_poly
|
||||
+ &(&(&(&(&DensePolynomial::from_coefficients_slice(&[(E::Fr::one() - sigma)])
|
||||
* &f_poly)
|
||||
@@ -186,7 +183,8 @@ pub fn caulk_single_unity_prove<E: PairingEngine, R: RngCore>(
|
||||
p_poly = &p_poly
|
||||
+ &(&(&(&f_poly * &f_poly_shift_1) - &f_poly_shift_2) * &pp.lagrange_polynomials_Vn[4]);
|
||||
|
||||
// p(X) = p(X) + ( f(X) - f(sigma^(-1) X) * f(sigma^(-1)X) ) prod_(i not in [5, .. , logN + 4]) (X - sigma^i)
|
||||
// p(X) = p(X) + ( f(X) - f(sigma^(-1) X) * f(sigma^(-1)X) ) prod_(i not in
|
||||
// [5, .. , logN + 4]) (X - sigma^i)
|
||||
p_poly = &p_poly + &(&(&f_poly - &(&f_poly_shift_1 * &f_poly_shift_1)) * &pp.poly_prod);
|
||||
|
||||
// p(X) = p(X) + ( f(sigma^(-1) X) - 1 ) rho_(logN + 6)(X)
|
||||
@@ -201,8 +199,8 @@ pub fn caulk_single_unity_prove<E: PairingEngine, R: RngCore>(
|
||||
////////////////////////////
|
||||
// Commit to f(X) and h(X)
|
||||
////////////////////////////
|
||||
let g1_F = KZGCommit::<E>::commit(&pp.poly_ck, &f_poly);
|
||||
let h_hat_com = KZGCommit::<E>::commit(&pp.poly_ck, &h_hat_poly);
|
||||
let g1_F = KZGCommit::<E>::commit_g1(&pp.poly_ck, &f_poly);
|
||||
let h_hat_com = KZGCommit::<E>::commit_g1(&pp.poly_ck, &h_hat_poly);
|
||||
|
||||
// g1_H is a commitment to h_hat_poly + X^(d-1) z(X)
|
||||
let g1_H = (h_hat_com.into_projective() + pp.gxd.mul(-*a) + pp.gxpen.mul(*b)).into_affine();
|
||||
@@ -267,14 +265,14 @@ pub fn caulk_single_unity_prove<E: PairingEngine, R: RngCore>(
|
||||
// p_alpha(X) = p_alpha(X) + ( v1 f(X) - v2 ) rho5(alpha)
|
||||
p_alpha_poly = &p_alpha_poly + &(&(&(&f_poly * &pv1) - &pv2) * &prho5);
|
||||
|
||||
// p_alpha(X) = p_alpha(X) + ( f(X) - v1^2 ) prod_(i not in [5, .. , logN + 4]) (alpha - sigma^i)
|
||||
// p_alpha(X) = p_alpha(X) + ( f(X) - v1^2 ) prod_(i not in [5, .. , logN +
|
||||
// 4]) (alpha - sigma^i)
|
||||
p_alpha_poly = &p_alpha_poly + &(&(&f_poly - &(&pv1 * &pv1)) * &ppolyprod);
|
||||
|
||||
/*
|
||||
Differing slightly from paper
|
||||
Paper uses p_alpha(X) = p_alpha(X) + ( v1 - 1 ) rho_(n)(alpha) assuming that logN = n - 6
|
||||
We use p_alpha(X) = p_alpha(X) + ( v1 - 1 ) rho_(logN + 6)(alpha) to allow for any value of logN
|
||||
*/
|
||||
// Differing slightly from paper
|
||||
// Paper uses p_alpha(X) = p_alpha(X) + ( v1 - 1 ) rho_(n)(alpha) assuming that
|
||||
// logN = n - 6 We use p_alpha(X) = p_alpha(X) + ( v1 - 1 ) rho_(logN +
|
||||
// 6)(alpha) to allow for any value of logN
|
||||
p_alpha_poly = &p_alpha_poly
|
||||
+ &(&(&pv1 - &(DensePolynomial::from_coefficients_slice(&[E::Fr::one()]))) * &prhologN6);
|
||||
|
||||
@@ -363,9 +361,11 @@ pub fn caulk_single_unity_verify<E: PairingEngine>(
|
||||
// pprod = prod_(i not in [5,..,logN+4]) (alpha - w^i)
|
||||
let pprod = vk.poly_prod.evaluate(&alpha);
|
||||
|
||||
// P = H^(-z(alpha)) * F^(rho0(alpha) + L_1(alpha) + (1 - w)L_2(alpha) + L_3(alpha) + v1 L_4(alpha)
|
||||
// + prod_(i not in [5,..,logN+4]) (alpha - w^i))
|
||||
// * g^( (v1 -v2)L_2(alpha) + (v2 - w v1)L_3(alpha) - v2 L_4(alpha) + (v1 - 1)L_(logN+5)(alpha)
|
||||
// P = H^(-z(alpha)) * F^(rho0(alpha) + L_1(alpha) + (1 - w)L_2(alpha) +
|
||||
// L_3(alpha) + v1 L_4(alpha) + prod_(i not in
|
||||
// [5,..,logN+4]) (alpha - w^i))
|
||||
// * g^( (v1 -v2)L_2(alpha) + (v2 - w v1)L_3(alpha) - v2
|
||||
// L_4(alpha) + (v1 - 1)L_(logN+5)(alpha)
|
||||
// - v1^2 * prod_(i not in [5,..,logN+4]) (alpha - w^i) )
|
||||
let g1_p = proof.g1_H.mul(-zalpha)
|
||||
+ proof
|
||||
@@ -397,9 +397,9 @@ pub fn caulk_single_unity_verify<E: PairingEngine>(
|
||||
|
||||
let g1_q = proof.pi2;
|
||||
|
||||
// check that e(P Q3^(-alpha), g2)e( g^(-(rho0 + rho1) - zH(alpha) x^(d-1)), A ) e( Q3, g2^x ) = 1
|
||||
// Had to move A from affine to projective and back to affine to get it to compile.
|
||||
// No idea what difference this makes.
|
||||
// check that e(P Q3^(-alpha), g2)e( g^(-(rho0 + rho1) - zH(alpha) x^(d-1)), A )
|
||||
// e( Q3, g2^x ) = 1 Had to move A from affine to projective and back to
|
||||
// affine to get it to compile. No idea what difference this makes.
|
||||
let eq1 = vec![
|
||||
(
|
||||
(g1_p + g1_q.mul(alpha)).into_affine().into(),
|
||||
44
src/util.rs
Normal file
44
src/util.rs
Normal file
@@ -0,0 +1,44 @@
|
||||
use ark_ec::PairingEngine;
|
||||
use ark_ff::PrimeField;
|
||||
use ark_poly::UVPolynomial;
|
||||
use ark_poly_commit::kzg10::*;
|
||||
|
||||
// Reduces full srs down to smaller srs for smaller polynomials
|
||||
// Copied from arkworks library (where same function is private)
|
||||
pub(crate) fn trim<E: PairingEngine, P: UVPolynomial<E::Fr>>(
|
||||
srs: &UniversalParams<E>,
|
||||
mut supported_degree: usize,
|
||||
) -> (Powers<'static, E>, VerifierKey<E>) {
|
||||
if supported_degree == 1 {
|
||||
supported_degree += 1;
|
||||
}
|
||||
|
||||
let powers_of_g = srs.powers_of_g[..=supported_degree].to_vec();
|
||||
let powers_of_gamma_g = (0..=supported_degree)
|
||||
.map(|i| srs.powers_of_gamma_g[&i])
|
||||
.collect();
|
||||
|
||||
let powers = Powers {
|
||||
powers_of_g: ark_std::borrow::Cow::Owned(powers_of_g),
|
||||
powers_of_gamma_g: ark_std::borrow::Cow::Owned(powers_of_gamma_g),
|
||||
};
|
||||
let vk = VerifierKey {
|
||||
g: srs.powers_of_g[0],
|
||||
gamma_g: srs.powers_of_gamma_g[&0],
|
||||
h: srs.h,
|
||||
beta_h: srs.beta_h,
|
||||
prepared_h: srs.prepared_h.clone(),
|
||||
prepared_beta_h: srs.prepared_beta_h.clone(),
|
||||
};
|
||||
(powers, vk)
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////
|
||||
//
|
||||
|
||||
// copied from arkworks
|
||||
pub(crate) fn convert_to_bigints<F: PrimeField>(p: &[F]) -> Vec<F::BigInt> {
|
||||
ark_std::cfg_iter!(p)
|
||||
.map(|s| s.into_repr())
|
||||
.collect::<Vec<_>>()
|
||||
}
|
||||
Reference in New Issue
Block a user