This commit is contained in:
adria0
2021-11-24 20:56:59 +01:00
parent 150aefb741
commit 187ca3e3e6
16 changed files with 1806 additions and 1607 deletions

15
.cargo/katex-header.html Normal file
View File

@@ -0,0 +1,15 @@
<link rel="stylesheet" href="https://cdn.jsdelivr.net/npm/katex@0.10.0/dist/katex.min.css" integrity="sha384-9eLZqc9ds8eNjO3TmqPeYcDj8n+Qfa4nuSiGYa6DjLNcv9BtN69ZIulL9+8CqC9Y" crossorigin="anonymous">
<script src="https://cdn.jsdelivr.net/npm/katex@0.10.0/dist/katex.min.js" integrity="sha384-K3vbOmF2BtaVai+Qk37uypf7VrgBubhQreNQe9aGsz9lB63dIFiQVlJbr92dw2Lx" crossorigin="anonymous"></script>
<script src="https://cdn.jsdelivr.net/npm/katex@0.10.0/dist/contrib/auto-render.min.js" integrity="sha384-kmZOZB5ObwgQnS/DuDg6TScgOiWWBiVt0plIRkZCmE6rDZGrEOQeHM5PcHi+nyqe" crossorigin="anonymous"></script>
<script>
document.addEventListener("DOMContentLoaded", function() {
renderMathInElement(document.body, {
delimiters: [
{left: "$$", right: "$$", display: true},
{left: "\\(", right: "\\)", display: false},
{left: "$", right: "$", display: false},
{left: "\\[", right: "\\]", display: true}
]
});
});
</script>

View File

@@ -1,25 +1,21 @@
#![allow(dead_code, unused_imports)]
#![allow(clippy::len_without_is_empty)]
use std::fmt::Display;
use super::ec::{g1f, g2f, G1Point, G2Point};
use super::field::Field;
use super::matrix::Matrix;
use super::plonk::{f101, F17, P17};
use super::poly::Poly;
use super::ec::Field;
// (q_l * a) + (q_r * b) + (q_o * c) + (q_m * a * b) + q_c = 0
// where a,b,c are the left, right and output wires of the gate
pub struct Gate {
pub q_l: F17,
pub q_r: F17,
pub q_o: F17,
pub q_m: F17,
pub q_c: F17,
pub struct Gate<F: Field> {
pub q_l: F,
pub q_r: F,
pub q_o: F,
pub q_m: F,
pub q_c: F,
}
impl Gate {
pub fn new(q_l: F17, q_r: F17, q_o: F17, q_m: F17, q_c: F17) -> Self {
impl<F: Field> Gate<F> {
pub fn new(q_l: F, q_r: F, q_o: F, q_m: F, q_c: F) -> Self {
Gate {
q_l,
q_r,
@@ -30,28 +26,28 @@ impl Gate {
}
pub fn sum_a_b() -> Self {
Gate {
q_l: F17::one(),
q_r: F17::one(),
q_o: -F17::one(),
q_m: F17::zero(),
q_c: F17::zero(),
q_l: F::one(),
q_r: F::one(),
q_o: -F::one(),
q_m: F::zero(),
q_c: F::zero(),
}
}
pub fn mul_a_b() -> Self {
Gate {
q_l: F17::zero(),
q_r: F17::zero(),
q_o: -F17::one(),
q_m: F17::one(),
q_c: F17::zero(),
q_l: F::zero(),
q_r: F::zero(),
q_o: -F::one(),
q_m: F::one(),
q_c: F::zero(),
}
}
pub fn bind_a(value: F17) -> Self {
pub fn bind_a(value: F) -> Self {
Gate {
q_l: F17::one(),
q_r: F17::zero(),
q_o: F17::zero(),
q_m: F17::one(),
q_l: F::one(),
q_r: F::zero(),
q_o: F::zero(),
q_m: F::one(),
q_c: value,
}
}
@@ -64,7 +60,7 @@ pub enum CopyOf {
C(usize),
}
impl Display for Gate {
impl<F: Field> Display for Gate<F> {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(
f,
@@ -75,37 +71,40 @@ impl Display for Gate {
}
#[derive(Debug)]
pub struct Constrains {
pub q_l: Vec<F17>,
pub q_r: Vec<F17>,
pub q_o: Vec<F17>,
pub q_m: Vec<F17>,
pub q_c: Vec<F17>,
pub struct Constrains<F: Field> {
pub q_l: Vec<F>,
pub q_r: Vec<F>,
pub q_o: Vec<F>,
pub q_m: Vec<F>,
pub q_c: Vec<F>,
pub c_a: Vec<CopyOf>,
pub c_b: Vec<CopyOf>,
pub c_c: Vec<CopyOf>,
}
pub struct Assigment {
pub a: F17,
pub b: F17,
pub c: F17,
pub struct Assigment<F: Field> {
pub a: F,
pub b: F,
pub c: F,
}
impl Assigment {
pub fn new(a: F17, b: F17, c: F17) -> Self {
impl<F: Field> Assigment<F> {
pub fn new(a: F, b: F, c: F) -> Self {
Self { a, b, c }
}
}
pub struct Assigments {
pub a: Vec<F17>,
pub b: Vec<F17>,
pub c: Vec<F17>,
pub struct Assigments<F: Field> {
pub a: Vec<F>,
pub b: Vec<F>,
pub c: Vec<F>,
}
impl Constrains {
pub fn new(gates: &[Gate], copy_constraints: (Vec<CopyOf>, Vec<CopyOf>, Vec<CopyOf>)) -> Self {
impl<F: Field> Constrains<F> {
pub fn new(
gates: &[Gate<F>],
copy_constraints: (Vec<CopyOf>, Vec<CopyOf>, Vec<CopyOf>),
) -> Self {
Self {
q_l: gates.iter().map(|g| g.q_l).collect(),
q_r: gates.iter().map(|g| g.q_r).collect(),
@@ -118,7 +117,7 @@ impl Constrains {
}
}
pub fn satisfies(&self, v: &Assigments) -> bool {
pub fn satisfies(&self, v: &Assigments<F>) -> bool {
// check gates (q_l * a) + (q_r * b) + (q_o * c) + (q_m * a * b) + q_c = 0
assert_eq!(v.a.len(), self.q_l.len());
for n in 0..v.a.len() {
@@ -153,8 +152,8 @@ impl Constrains {
}
}
impl Assigments {
pub fn new(assigments: &[Assigment]) -> Self {
impl<F: Field> Assigments<F> {
pub fn new(assigments: &[Assigment<F>]) -> Self {
Self {
a: assigments.iter().map(|v| v.a).collect(),
b: assigments.iter().map(|v| v.b).collect(),

497
src/ec.rs
View File

@@ -1,435 +1,92 @@
#![allow(dead_code)]
use super::poly::Poly;
use std::{
fmt::Display,
ops::{Add, Mul, Neg},
fmt::{Debug, Display},
ops::{Add, AddAssign, Div, Mul, MulAssign, Neg, Sub, SubAssign},
};
use super::plonk::{f101, F101};
pub trait Field:
Sized
+ Debug
+ Copy
+ Display
+ PartialEq
+ From<i64>
+ From<u64>
+ Add<Output = Self>
+ for<'a> Add<&'a Self, Output = Self>
+ AddAssign
+ for<'a> AddAssign<&'a Self>
+ Div<Output = Option<Self>>
+ Mul<Output = Self>
+ for<'a> Mul<&'a Self, Output = Self>
+ for<'a> MulAssign<&'a Self>
+ Sub<Output = Self>
+ for<'a> SubAssign<&'a Self>
+ Neg<Output = Self>
{
type Order: Display;
/// Helper to build a G1 point at $(x,y)$
#[allow(non_snake_case)]
pub fn g1f(x: u64, y: u64) -> G1Point {
G1Point::new(f101(x), f101(y))
}
/// Helper to build a G2 extension at $a+bu$
#[allow(non_snake_case)]
pub fn g2f(a: u64, b: u64) -> G2Point {
G2Point::new(f101(a), f101(b))
}
/// A point in the $y^2+x^3+3$ curve, on the $\mathbb{F}_{101}$ field.
/// The generator $g=(1,2)$ generates a subgroup of order 17: $17g=g$
///
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash, PartialOrd, Ord)]
pub struct G1Point {
/// x coordinate
pub x: F101,
/// y coordinate
pub y: F101,
/// if point is at infinity
pub infinite: bool,
}
/// An elliptic curve is an equation of the form $y^2=x^3+ax+b$ where $a$, $b$, $x$, and $y$
/// are elements of some field. (In a cryptographic setting the field will be a finite field.)
/// Any $(x,y)$ pairs satisfying the equation are points on the curve.
///
/// We will use an elliptic curve over the field $\mathbb{F}_{101}$ , which makes hand computation easy.
///
/// The elliptic curve $y^2 = x^3 +3$ is a commonly-used elliptic curve equation, and $(1,2)$ is an easy-to-find
/// point on that curve we can use as a generator. In fact, the alt_bn128 curve that is implemented on Ethereum
/// uses this same curve equation and generator, but over a much larger field.
///
/// Point addition:
/// for $P_r = P_p + P_q$ , $x_r = \lambda^2 - x_p - x_q$ $y_r = \lambda(x_p - x_r) - y_p$
/// where $\\lambda = \\frac{y_q - y_p}{x_q - x_p}$
///
/// Point doubling (This doubling formula only works for curves with a=0, like ours)
/// for $P=(x,y)$, $2P=(m^2-2x, m(3x-m^2)-y)$ where $m=\\frac{3x^2}{2y}$
///
/// Point inversion:
/// for $P=(x,y)$, $-P=(x,-y)$
///
/// Elliptic curves also have an abstract "point at infinity" which serves as the group identity. For more on elliptic curve arithmetic, check out [this post from Nick Sullivan](https://blog.cloudflare.com/a-relatively-easy-to-understand-primer-on-elliptic-curve-cryptography/)
///
impl G1Point {
/// Creates a new point at given $(x,y)$
pub fn new(x: F101, y: F101) -> Self {
G1Point {
x,
y,
infinite: false,
}
}
/// Checks if the coordinates are on the curve, so $y^2 = x^3 +3$
pub fn in_curve(&self) -> bool {
self.y.pow(2) == self.x.pow(3) + f101(3)
}
/// Checks if the point is at infinity
pub fn is_identity(&self) -> bool {
self.infinite
}
/// Returns the generator $g=(1,2)$
pub fn generator() -> Self {
G1Point {
x: f101(1),
y: f101(2),
infinite: false,
}
}
/// Returns the size of the subgroup generated by generator $g=(1,2)$
pub fn generator_subgroup_size() -> u64 {
17
}
/// Returns the point at infinity
pub fn identity() -> Self {
G1Point {
x: f101(0),
y: f101(0),
infinite: true,
}
fn order() -> Self::Order;
fn zero() -> Self;
fn is_zero(&self) -> bool;
fn one() -> Self;
fn as_u64(&self) -> u64;
fn in_field(&self) -> bool;
// fn rebase<F1T: FieldT>(&self) -> F1T;
fn inv(&self) -> Option<Self>;
fn pow(&self, exp: u64) -> Self;
fn as_poly(&self) -> Poly<Self> {
Poly::new(vec![*self])
}
}
#[derive(Debug, Copy, Clone, PartialEq)]
pub struct G2Point {
pub a: F101,
pub b: F101,
}
impl G2Point {
// a + b · u
pub fn new(a: F101, b: F101) -> Self {
G2Point { a, b }
}
pub fn generator() -> Self {
G2Point {
a: f101(36),
b: f101(31),
}
}
pub fn embeeding_degree() -> u32 {
2
}
pub fn pow(&self, mut n: u64) -> Self {
// frobenious map reduction: p^101 = -p
let (mut p, mut base) = if n >= 101 {
let base = -self.pow(n / 101);
n = n % 101;
(base, *self)
} else {
(G2Point::new(f101(1), f101(0)), *self)
};
pub trait G1Point:
Copy
+ Display
+ PartialEq
+ std::hash::Hash
+ PartialOrd
+ Ord
+ Neg<Output = Self>
+ Add<Output = Self>
+ Mul<Self::S, Output = Self>
{
type F: Field;
type S: Field;
// montgomery reduction
while n > 0 {
if n % 2 == 1 {
p = p * base;
}
n = n >> 1;
base = base * base;
}
p
}
fn generator() -> Self;
fn generator_subgroup_size() -> Self::F;
fn identity() -> Self;
fn new(x: Self::F, y: Self::F) -> Self;
fn x(&self) -> &Self::F;
fn y(&self) -> &Self::F;
fn in_curve(&self) -> bool;
fn is_identity(&self) -> bool;
}
impl Display for G1Point {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
if self.infinite {
write!(f, "infinite")
} else {
write!(f, "({},{})", self.x, self.y)
}
}
pub trait G2Point:
Display + Copy + PartialEq + Neg<Output = Self> + Add + Mul<Self::S, Output = Self>
{
type F: Field;
type S: Field;
fn x(&self) -> &Self::F;
fn y(&self) -> &Self::F;
fn new(x: Self::F, y: Self::F) -> Self;
fn generator() -> Self;
fn embeeding_degree() -> u64;
}
impl Display for G2Point {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(f, "{}+{}u", self.a, self.b)
}
pub trait GTPoint: Display + Copy + PartialEq + Mul {
type S;
fn pow(&self, n: Self::S) -> Self;
}
impl Neg for G1Point {
type Output = G1Point;
fn neg(self) -> Self::Output {
if self.infinite {
self
} else {
G1Point::new(self.x, -self.y)
}
}
}
impl Neg for G2Point {
type Output = G2Point;
fn neg(self) -> Self::Output {
G2Point::new(self.a, -self.b)
}
}
impl Add for G1Point {
type Output = G1Point;
fn add(self, rhs: G1Point) -> Self {
if self.infinite {
rhs
} else if rhs.infinite {
self
} else if self == -rhs {
G1Point::identity()
} else if self == rhs {
let two = F101::from(2);
let three = F101::from(3);
let m = ((three * self.x.pow(2)) / (two * self.y)).unwrap();
G1Point::new(
m * m - two * self.x,
m * (three * self.x - m.pow(2)) - self.y,
)
} else {
// https://en.wikipedia.org/wiki/Elliptic_curve_point_multiplication#G1Point_addition
let lambda = ((rhs.y - self.y) / (rhs.x - self.x))
.expect(&format!("cannot add {}+{}", self, rhs));
let x = lambda.pow(2) - self.x - rhs.x;
G1Point::new(x, lambda * (self.x - x) - self.y)
}
}
}
impl Add for G2Point {
type Output = G2Point;
fn add(self, rhs: G2Point) -> Self {
if self == rhs {
let two = F101::from(2);
let three = F101::from(3);
let m_u = ((three * self.a.pow(2)) / (two * self.b)).unwrap(); // in u units
let u_pow_2_inv = (-F101::from(2)).inv().unwrap(); // 1/(u^2) = -1/2
let m_pow_2 = m_u.pow(2) * u_pow_2_inv;
G2Point::new(
m_pow_2 - two * self.a,
u_pow_2_inv * m_u * (three * self.a - m_pow_2) - self.b,
)
} else {
let lambda_u = ((rhs.b - self.b) / (rhs.a - self.a)).unwrap();
let lambda_pow_2 = lambda_u.pow(2) * -f101(2);
let a = lambda_pow_2 - self.a - rhs.a;
let b = lambda_u * (self.a - a) - self.b;
G2Point::new(a, b)
}
}
}
impl Mul<F101> for G1Point {
type Output = G1Point;
fn mul(self, rhs: F101) -> Self::Output {
let mut rhs = rhs.as_u64();
if rhs == 0 || self.is_identity() {
return G1Point::identity();
}
let mut result = None;
let mut base = self;
while rhs > 0 {
if rhs % 2 == 1 {
result = Some(if let Some(result) = result {
result + base
} else {
base
})
}
rhs = rhs >> 1;
base = base + base;
}
result.unwrap()
}
}
impl Mul<F101> for G2Point {
type Output = G2Point;
fn mul(self, rhs: F101) -> Self::Output {
let mut rhs = rhs.as_u64();
let mut result = None;
let mut base = self;
while rhs > 0 {
if rhs % 2 == 1 {
result = Some(if let Some(result) = result {
result + base
} else {
base
})
}
rhs = rhs >> 1;
base = base + base;
}
result.unwrap()
}
}
impl Mul<G2Point> for G2Point {
type Output = G2Point;
fn mul(self, rhs: G2Point) -> Self::Output {
G2Point::new(
self.a * rhs.a - f101(2) * self.b * rhs.b,
self.a * rhs.b + self.b * rhs.a,
)
}
}
fn pairing_f(r: u64, p: G1Point, q: G2Point) -> G2Point {
// line equation from a to b point
let l = |a: G1Point, b: G1Point| {
let m = b.x - a.x;
let n = b.y - a.y;
let x = n;
let y = -m;
let c = m * a.y - n * a.x;
(x, y, c)
};
if r == 1 {
g2f(1, 0)
} else if r % 2 == 1 {
let r = r - 1;
let (x, y, c) = l(p * f101(r), p);
pairing_f(r, p, q) * G2Point::new(q.a * x + c, q.b * y)
} else {
let r = r / 2;
let (x, y, c) = l(p * f101(r), -p * f101(r) * f101(2));
pairing_f(r, p, q).pow(2) * G2Point::new(q.a * x + c, q.b * y)
}
}
pub fn pairing(g1: G1Point, g2: G2Point) -> G2Point {
let p = g1.x.order();
let r = G1Point::generator_subgroup_size();
let k = G2Point::embeeding_degree();
let exp = (p.pow(k) - 1) / r;
pairing_f(r, g1, g2).pow(exp)
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_find_subgroups() {
// add all points that are in the curve
let mut points = Vec::new();
for x in 0..101 {
for y in 0..101 {
let p = G1Point::new(f101(x), f101(y));
if p.in_curve() {
points.push(p);
}
}
}
// find subgroups
let mut subgroups = std::collections::HashMap::new();
while points.len() > 0 {
let mut in_subgroup = Vec::new();
// pick one element as a generator
let g = points.pop().unwrap();
in_subgroup.push(g);
// find all m * g != g
let mut mul = 2;
let mut gmul = g * f101(mul);
while g != gmul {
in_subgroup.push(gmul);
mul += 1;
gmul = g * f101(mul);
}
in_subgroup.sort();
subgroups.insert(g, in_subgroup);
}
// find unique subgroups
let mut duplicates = Vec::new();
for (g, e) in &subgroups {
if !duplicates.contains(g) {
subgroups
.iter()
.filter(|(g1, e1)| g1 != &g && e1 == &e)
.for_each(|(g, _)| duplicates.push(*g));
}
}
// remove duplicates
subgroups.retain(|g, _| !duplicates.contains(g));
for (g, e) in &subgroups {
println!("{} {}", g, e.len());
if e.len() < 20 {
for n in 0..e.len() {
println!(" {}", e[n]);
}
}
}
}
#[test]
fn test_1() {
use crate::field::Field;
let a4 = Field::<17>::from(4);
println!("2,3,4 = {} {} {}", a4.pow(2), a4.pow(3), a4.pow(4));
}
#[test]
fn test_g1_vectors() {
let g = G1Point::generator();
let two_g = g + g;
let four_g = two_g + two_g;
let eight_g = four_g + four_g;
let sixteen_g = eight_g + eight_g;
assert_eq!(g1f(1, 99), -g);
assert_eq!(g1f(68, 74), two_g);
assert_eq!(g1f(68, 27), -two_g);
assert_eq!(g1f(65, 98), four_g);
assert_eq!(g1f(65, 3), -four_g);
assert_eq!(g1f(18, 49), eight_g);
assert_eq!(g1f(18, 52), -eight_g);
assert_eq!(g1f(1, 99), sixteen_g);
assert_eq!(g1f(1, 2), -sixteen_g);
// since g = -16 g, this subgroup has order 17
assert_eq!(g1f(26, 45), two_g + g);
assert_eq!(g1f(12, 32), four_g + g);
assert_eq!(g1f(18, 52), eight_g + g);
assert_eq!(four_g + two_g, two_g + four_g);
assert_eq!(g * f101(1), g);
assert_eq!(g * f101(2), g + g);
assert_eq!(g * f101(6), g + g + g + g + g + g);
}
#[test]
fn test_g2_vectors() {
let g = G2Point::generator();
// check point doubling
assert_eq!(g2f(90, 82), g + g);
// check point addition
assert_eq!((g + g) + (g + g), g + g + g + g);
// check point multiplication
assert_eq!(g * f101(6), g + g + g + g + g + g);
// check G2 multiplication
assert_eq!(g2f(26, 97) * g2f(93, 76), g2f(97, 89));
// check G2 exp
assert_eq!(g2f(42, 49).pow(6), g2f(97, 89));
assert_eq!(g2f(93, 76).pow(101), -g2f(93, 76));
assert_eq!(g2f(93, 76).pow(102), (-g2f(93, 76)) * g2f(93, 76));
assert_eq!(g2f(68, 47).pow(600), g2f(97, 89));
}
pub trait Pairing {
type G1: G1Point;
type G2: G2Point;
type GT: GTPoint;
fn pairing(p: Self::G1, q: Self::G2) -> Self::GT;
}

View File

@@ -1,246 +0,0 @@
#![allow(dead_code)]
use super::poly::Poly;
use std::{
fmt::Display,
ops::{Add, AddAssign, Div, Mul, MulAssign, Neg, Sub, SubAssign},
};
fn extended_gcd(a: i64, b: i64) -> (i64, i64, i64) {
let (mut s, mut old_s) = (0, 1);
let (mut t, mut old_t) = (1, 0);
let (mut r, mut old_r) = (b, a);
while r != 0 {
let quotient = &old_r / &r;
old_r -= &quotient * &r;
std::mem::swap(&mut old_r, &mut r);
old_s -= &quotient * &s;
std::mem::swap(&mut old_s, &mut s);
old_t -= quotient * &t;
std::mem::swap(&mut old_t, &mut t);
}
(old_r, old_s, old_t)
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, PartialOrd, Ord)]
pub struct Field<const M: u64>(u64);
#[allow(non_snake_case)]
pub fn Fi64<const M: u64>(n: i64) -> Field<M> {
if n < 0 {
-Field::from(-n as u64)
} else {
Field::from(n as u64)
}
}
impl<const M: u64> Field<M> {
pub fn from(n: u64) -> Self {
Self(n % M)
}
pub fn order(&self) -> u64 {
M
}
pub fn zero() -> Self {
Self::from(0)
}
pub fn is_zero(&self) -> bool {
self.0 == 0
}
pub fn one() -> Self {
Self::from(1)
}
pub fn as_u64(&self) -> u64 {
self.0
}
pub fn in_field(&self) -> bool {
self.0 < M
}
pub fn rebase<const M2: u64>(&self) -> Field<M2> {
Field::<M2>::from(self.0)
}
pub fn inv(&self) -> Option<Self> {
let (gcd, c, _) = extended_gcd(self.0 as i64, M as i64);
if gcd == 1 {
if c < 0 {
Some(Self((M as i64 + c) as u64))
} else {
Some(Self(c as u64))
}
} else {
None
}
}
pub fn pow(&self, mut exp: u64) -> Self {
let mut result = Self::one();
let mut base = *self;
while exp > 0 {
if exp % 2 == 1 {
result = result * base;
}
exp = exp >> 1;
base = base * base;
}
result
}
pub fn as_poly(&self) -> Poly<M> {
Poly::new(vec![*self])
}
}
impl<const M: u64> Display for Field<M> {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(f, "{}", self.0)
}
}
impl<const M: u64> Add for Field<M> {
type Output = Field<M>;
fn add(self, rhs: Self) -> Self::Output {
Field((self.0 + rhs.0) % M)
}
}
impl<const M: u64> Add<&Field<M>> for Field<M> {
type Output = Field<M>;
fn add(self, rhs: &Field<M>) -> Self::Output {
Field((self.0 + rhs.0) % M)
}
}
impl<const M: u64> Add<Field<M>> for &Field<M> {
type Output = Field<M>;
fn add(self, rhs: Field<M>) -> Self::Output {
Field((self.0 + rhs.0) % M)
}
}
impl<const M: u64> Add<&Field<M>> for &Field<M> {
type Output = Field<M>;
fn add(self, rhs: &Field<M>) -> Self::Output {
Field((self.0 + rhs.0) % M)
}
}
impl<const M: u64> AddAssign<&Field<M>> for Field<M> {
fn add_assign(&mut self, rhs: &Field<M>) {
self.0 = (self.0 + rhs.0) % M;
}
}
impl<const M: u64> AddAssign<Field<M>> for Field<M> {
fn add_assign(&mut self, rhs: Field<M>) {
self.0 = (self.0 + rhs.0) % M;
}
}
impl<const M: u64> Sub for Field<M> {
type Output = Field<M>;
fn sub(self, rhs: Self) -> Self::Output {
self + -rhs
}
}
impl<const M: u64> SubAssign<&Field<M>> for Field<M> {
fn sub_assign(&mut self, rhs: &Field<M>) {
*self += -rhs;
}
}
impl<const M: u64> Neg for Field<M> {
type Output = Field<M>;
fn neg(self) -> Self::Output {
Field((M - self.0) % M)
}
}
impl<const M: u64> Neg for &Field<M> {
type Output = Field<M>;
fn neg(self) -> Self::Output {
Field((M - self.0) % M)
}
}
impl<const M: u64> Mul for Field<M> {
type Output = Field<M>;
fn mul(self, rhs: Self) -> Self::Output {
Field((self.0 * rhs.0) % M)
}
}
impl<const M: u64> Mul<&Field<M>> for &Field<M> {
type Output = Field<M>;
fn mul(self, rhs: &Field<M>) -> Self::Output {
Field((self.0 * rhs.0) % M)
}
}
impl<const M: u64> Mul<&Field<M>> for Field<M> {
type Output = Field<M>;
fn mul(self, rhs: &Field<M>) -> Self::Output {
Field((self.0 * rhs.0) % M)
}
}
impl<const M: u64> Mul<Field<M>> for &Field<M> {
type Output = Field<M>;
fn mul(self, rhs: Field<M>) -> Self::Output {
Field((self.0 * rhs.0) % M)
}
}
impl<const M: u64> Mul<Poly<M>> for &Field<M> {
type Output = Poly<M>;
fn mul(self, rhs: Poly<M>) -> Self::Output {
rhs * self
}
}
impl<const M: u64> Mul<Poly<M>> for Field<M> {
type Output = Poly<M>;
fn mul(self, rhs: Poly<M>) -> Self::Output {
rhs * self
}
}
impl<const M: u64> MulAssign<&Field<M>> for Field<M> {
fn mul_assign(&mut self, rhs: &Field<M>) {
self.0 = (self.0 * rhs.0) % M;
}
}
impl<const M: u64> Div for Field<M> {
type Output = Option<Field<M>>;
fn div(self, rhs: Self) -> Self::Output {
rhs.inv().map(|v| v * self)
}
}
#[cfg(test)]
mod tests {
use super::*;
fn f101(n: u64) -> Field<101> {
Field::from(n)
}
#[test]
fn test_base() {
assert_eq!(f101(200), f101(100) + f101(100));
assert_eq!(f101(100), f101(0) - f101(1));
assert_eq!(f101(100), f101(0) - f101(1));
assert_eq!(None, f101(1) / f101(0));
assert_eq!(f101(4), f101(12) * (f101(4) / f101(12)).unwrap());
}
#[test]
fn test_vectors() {
assert_eq!(f101(100), -f101(1));
assert_eq!(f101(50), -(f101(1) / f101(2)).unwrap());
assert_eq!(f101(20), -(f101(1) / f101(5)).unwrap());
assert_eq!(f101(1), f101(100).pow(0));
assert_eq!(f101(100) * f101(100), f101(100).pow(2));
assert_eq!(f101(100) * f101(100) * f101(100), f101(100).pow(3));
}
}

View File

@@ -1,7 +1,7 @@
pub mod constraints;
pub mod ec;
pub mod field;
pub mod matrix;
pub mod pbh;
pub mod plonk;
pub mod poly;
pub mod prover;
pub mod utils;

View File

@@ -1,27 +1,24 @@
#!allow[(unused_imports, dead_code)]
use std::{
fmt::Display,
ops::{Add, Index, IndexMut, Mul},
};
use super::field::Field;
use super::poly::Poly;
use super::{ec::Field, poly::Poly};
#[derive(Debug, PartialEq, Clone)]
pub struct Matrix<const M: u64> {
pub struct Matrix<F: Field> {
m: usize, // rows
n: usize, // cols
v: Vec<Field<M>>,
v: Vec<F>,
}
impl<const M: u64> Matrix<M> {
impl<F: Field> Matrix<F> {
pub fn zero(m: usize, n: usize) -> Self {
let mut v = Vec::new();
v.resize(n * m, Field::<M>::from(0));
v.resize(n * m, F::zero());
Self { m, n, v }
}
pub fn new(v: &[Field<M>], m: usize, n: usize) -> Self {
pub fn new(v: &[F], m: usize, n: usize) -> Self {
assert_eq!(v.len(), m * n);
Matrix {
m,
@@ -35,7 +32,7 @@ impl<const M: u64> Matrix<M> {
Matrix {
m,
n,
v: v.iter().map(|x| Field::<M>::from(*x)).collect(),
v: v.iter().map(|x| F::from(*x)).collect(),
}
}
pub fn cols(&self) -> usize {
@@ -44,7 +41,7 @@ impl<const M: u64> Matrix<M> {
pub fn rows(&self) -> usize {
self.m
}
pub fn inv(&self) -> Matrix<M> {
pub fn inv(&self) -> Self {
let len = self.n;
let mut aug = Matrix::zero(len, len * 2);
for i in 0..len {
@@ -65,7 +62,6 @@ impl<const M: u64> Matrix<M> {
unaug
}
//Begin Generalised Reduced Row Echelon Form
fn gauss_jordan_general(&mut self) {
let mut lead = 0;
let row_count = self.m;
@@ -77,10 +73,10 @@ impl<const M: u64> Matrix<M> {
}
let mut i = r;
while self[(i, lead)] == Field::zero() {
i = i + 1;
i += 1;
if row_count == i {
i = r;
lead = lead + 1;
lead += 1;
if col_count == lead {
break;
}
@@ -107,17 +103,17 @@ impl<const M: u64> Matrix<M> {
}
}
}
lead = lead + 1;
lead += 1;
}
}
pub fn into_poly(self) -> Poly<M> {
pub fn into_poly(self) -> Poly<F> {
assert_eq!(1, self.n);
Poly::new(self.v)
}
}
impl<const M: u64> Index<(usize, usize)> for Matrix<M> {
type Output = Field<M>;
impl<F: Field> Index<(usize, usize)> for Matrix<F> {
type Output = F;
// row, column
fn index(&self, p: (usize, usize)) -> &Self::Output {
assert!(p.0 < self.m && p.1 < self.n);
@@ -125,29 +121,29 @@ impl<const M: u64> Index<(usize, usize)> for Matrix<M> {
}
}
impl<const M: u64> IndexMut<(usize, usize)> for Matrix<M> {
impl<F: Field> IndexMut<(usize, usize)> for Matrix<F> {
fn index_mut(&mut self, p: (usize, usize)) -> &mut Self::Output {
assert!(p.0 < self.m && p.1 < self.n);
&mut self.v[p.1 + p.0 * self.n]
}
}
impl<const M: u64> Mul<Matrix<M>> for Matrix<M> {
type Output = Matrix<M>;
fn mul(self, rhs: Matrix<M>) -> Self::Output {
impl<F: Field> Mul<Matrix<F>> for Matrix<F> {
type Output = Matrix<F>;
fn mul(self, rhs: Matrix<F>) -> Self::Output {
&self * rhs
}
}
impl<const M: u64> Mul<Matrix<M>> for &Matrix<M> {
type Output = Matrix<M>;
fn mul(self, rhs: Matrix<M>) -> Self::Output {
impl<F: Field> Mul<Matrix<F>> for &Matrix<F> {
type Output = Matrix<F>;
fn mul(self, rhs: Matrix<F>) -> Self::Output {
assert!(self.n == rhs.m);
let mut c = Matrix::zero(self.m, rhs.n);
for i in 0..self.m {
for j in 0..rhs.n {
for k in 0..self.n {
c[(i, j)] = c[(i, j)] + self[(i, k)] * rhs[(k, j)];
c[(i, j)] += self[(i, k)] * rhs[(k, j)];
}
}
}
@@ -155,9 +151,9 @@ impl<const M: u64> Mul<Matrix<M>> for &Matrix<M> {
}
}
impl<const M: u64> Add<Matrix<M>> for Matrix<M> {
type Output = Matrix<M>;
fn add(self, rhs: Matrix<M>) -> Self::Output {
impl<F: Field> Add<Matrix<F>> for Matrix<F> {
type Output = Matrix<F>;
fn add(self, rhs: Matrix<F>) -> Self::Output {
assert!(self.m == rhs.m);
assert!(self.n == rhs.n);
let mut c = Matrix::zero(self.m, self.n);
@@ -168,7 +164,7 @@ impl<const M: u64> Add<Matrix<M>> for Matrix<M> {
}
}
impl<const M: u64> Display for Matrix<M> {
impl<F: Field> Display for Matrix<F> {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
writeln!(f, "{}x{}", self.m, self.n)?;
for r in 0..self.m {
@@ -185,8 +181,9 @@ impl<const M: u64> Display for Matrix<M> {
#[cfg(test)]
mod tests {
use super::*;
type F = Field<104729>;
type M = Matrix<104729>;
use crate::utils::U64Field;
type F = U64Field<104729>;
type M = Matrix<F>;
#[test]
fn test_sum_matrix() {
assert_eq!(
@@ -198,7 +195,7 @@ mod tests {
fn test_index_matrix() {
let m = M::from(&[1, 2, 3, 4], 2, 2);
// row 0 column 1
assert_eq!(m[(0, 1)], F::from(2));
assert_eq!(m[(0, 1)], F::from(2u64));
}
#[test]
fn test_mul_matrix() {

261
src/pbh/g1.rs Normal file
View File

@@ -0,0 +1,261 @@
use std::{
fmt::Display,
hash::Hash,
ops::{Add, Mul, Neg},
};
use super::{f101, F101};
use crate::ec::{Field, G1Point};
#[allow(non_snake_case)]
pub fn g1f(x: u64, y: u64) -> G1P {
G1P::new(F101::from(x), F101::from(y))
}
/// A point in the $y^2+x^3+3$ curve, on the $\mathbb{F}_{101}$ field.
/// The generator $g=(1,2)$ generates a subgroup of order 17: $17g=g$
///
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash, PartialOrd, Ord)]
pub struct G1P {
/// x coordinate
pub x: F101,
/// y coordinate
pub y: F101,
/// if point is at infinity
pub infinite: bool,
}
/// An elliptic curve is an equation of the form $y^2=x^3+ax+b$ where $a$, $b$, $x$, and $y$
/// are elements of some field. (In a cryptographic setting the field will be a finite field.)
/// Any $(x,y)$ pairs satisfying the equation are points on the curve.
///
/// We will use an elliptic curve over the field $\mathbb{F}_{101}$ , which makes hand computation easy.
///
/// The elliptic curve $y^2 = x^3 +3$ is a commonly-used elliptic curve equation, and $(1,2)$ is an easy-to-find
/// point on that curve we can use as a generator. In fact, the alt_bn128 curve that is implemented on Ethereum
/// uses this same curve equation and generator, but over a much larger field.
///
/// Point addition:
/// for $P_r = P_p + P_q$ , $x_r = \lambda^2 - x_p - x_q$ $y_r = \lambda(x_p - x_r) - y_p$
/// where $\\lambda = \\frac{y_q - y_p}{x_q - x_p}$
///
/// Point doubling (This doubling formula only works for curves with a=0, like ours)
/// for $P=(x,y)$, $2P=(m^2-2x, m(3x-m^2)-y)$ where $m=\\frac{3x^2}{2y}$
///
/// Point inversion:
/// for $P=(x,y)$, $-P=(x,-y)$
///
/// Elliptic curves also have an abstract "point at infinity" which serves as the group identity. For more on elliptic curve arithmetic, check out [this post from Nick Sullivan](https://blog.cloudflare.com/a-relatively-easy-to-understand-primer-on-elliptic-curve-cryptography/)
///
impl G1Point for G1P {
type F = F101;
type S = F101;
/// Creates a new point at given $(x,y)$
fn new(x: Self::F, y: Self::F) -> Self {
G1P {
x,
y,
infinite: false,
}
}
/// Checks if the coordinates are on the curve, so $y^2 = x^3 +3$
fn in_curve(&self) -> bool {
self.y.pow(2) == self.x.pow(3) + f101(3u64)
}
/// Checks if the point is at infinity
fn is_identity(&self) -> bool {
self.infinite
}
/// Returns the generator $g=(1,2)$
fn generator() -> Self {
G1P {
x: f101(1u64),
y: f101(2u64),
infinite: false,
}
}
/// Returns the size of the subgroup generated by generator $g=(1,2)$
fn generator_subgroup_size() -> Self::F {
f101(17u64)
}
/// Returns the point at infinity
fn identity() -> Self {
G1P {
x: Self::F::zero(),
y: Self::F::zero(),
infinite: true,
}
}
fn x(&self) -> &Self::F {
&self.x
}
fn y(&self) -> &Self::F {
&self.y
}
}
impl Display for G1P {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
if self.infinite {
write!(f, "infinite")
} else {
write!(f, "({},{})", self.x, self.y)
}
}
}
impl Neg for G1P {
type Output = G1P;
fn neg(self) -> Self::Output {
if self.infinite {
self
} else {
G1P::new(self.x, -self.y)
}
}
}
impl Add for G1P {
type Output = G1P;
fn add(self, rhs: G1P) -> Self {
if self.infinite {
rhs
} else if rhs.infinite {
self
} else if self == -rhs {
G1P::identity()
} else if self == rhs {
let two = f101(2);
let three = f101(3);
let m = ((three * self.x.pow(2)) / (two * self.y)).unwrap();
G1P::new(
m * m - two * self.x,
m * (three * self.x - m.pow(2)) - self.y,
)
} else {
// https://en.wikipedia.org/wiki/Elliptic_curve_point_multiplication#G1P_addition
let lambda = ((rhs.y - self.y) / (rhs.x - self.x))
.unwrap_or_else(|| panic!("cannot add {}+{}", self, rhs));
let x = lambda.pow(2) - self.x - rhs.x;
G1P::new(x, lambda * (self.x - x) - self.y)
}
}
}
impl Mul<F101> for G1P {
type Output = G1P;
fn mul(self, rhs: F101) -> Self::Output {
let mut rhs = rhs.as_u64();
if rhs == 0 || self.is_identity() {
return G1P::identity();
}
let mut result = None;
let mut base = self;
while rhs > 0 {
if rhs % 2 == 1 {
result = Some(if let Some(result) = result {
result + base
} else {
base
})
}
rhs >>= 1;
base = base + base;
}
result.unwrap()
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_find_subgroups() {
// add all points that are in the curve
let mut points = Vec::new();
for x in 0..101 {
for y in 0..101 {
let p = G1P::new(f101(x), f101(y));
if p.in_curve() {
points.push(p);
}
}
}
// find subgroups
let mut subgroups = std::collections::HashMap::new();
while points.len() > 0 {
let mut in_subgroup = Vec::new();
// pick one element as a generator
let g = points.pop().unwrap();
in_subgroup.push(g);
// find all m * g != g
let mut mul = 2;
let mut gmul = g * f101(mul);
while g != gmul {
in_subgroup.push(gmul);
mul += 1;
gmul = g * f101(mul);
}
in_subgroup.sort();
subgroups.insert(g, in_subgroup);
}
// find unique subgroups
let mut duplicates = Vec::new();
for (g, e) in &subgroups {
if !duplicates.contains(g) {
subgroups
.iter()
.filter(|(g1, e1)| g1 != &g && e1 == &e)
.for_each(|(g, _)| duplicates.push(*g));
}
}
// remove duplicates
subgroups.retain(|g, _| !duplicates.contains(g));
for (g, e) in &subgroups {
println!("{} {}", g, e.len());
if e.len() < 20 {
for n in 0..e.len() {
println!(" {}", e[n]);
}
}
}
}
#[test]
fn test_g1_vectors() {
let g = G1P::generator();
let two_g = g + g;
let four_g = two_g + two_g;
let eight_g = four_g + four_g;
let sixteen_g = eight_g + eight_g;
assert_eq!(g1f(1, 99), -g);
assert_eq!(g1f(68, 74), two_g);
assert_eq!(g1f(68, 27), -two_g);
assert_eq!(g1f(65, 98), four_g);
assert_eq!(g1f(65, 3), -four_g);
assert_eq!(g1f(18, 49), eight_g);
assert_eq!(g1f(18, 52), -eight_g);
assert_eq!(g1f(1, 99), sixteen_g);
assert_eq!(g1f(1, 2), -sixteen_g);
// since g = -16 g, this subgroup has order 17
assert_eq!(g1f(26, 45), two_g + g);
assert_eq!(g1f(12, 32), four_g + g);
assert_eq!(g1f(18, 52), eight_g + g);
assert_eq!(four_g + two_g, two_g + four_g);
assert_eq!(g * f101(1), g);
assert_eq!(g * f101(2), g + g);
assert_eq!(g * f101(6), g + g + g + g + g + g);
}
}

123
src/pbh/g2.rs Normal file
View File

@@ -0,0 +1,123 @@
use std::{
fmt::Display,
ops::{Add, Mul, Neg},
};
use super::{f101, F101};
use crate::ec::{Field, G2Point};
#[allow(non_snake_case)]
pub fn g2f(a: u64, b: u64) -> G2P {
G2P::new(F101::from(a), F101::from(b))
}
#[derive(Debug, Copy, Clone, PartialEq)]
pub struct G2P {
pub a: F101,
pub b: F101,
}
impl G2Point for G2P {
type F = F101;
type S = F101;
// a + b · u
fn new(a: Self::F, b: Self::F) -> Self {
G2P { a, b }
}
fn generator() -> Self {
G2P {
a: f101(36u64),
b: f101(31u64),
}
}
fn embeeding_degree() -> u64 {
2
}
fn x(&self) -> &Self::F {
&self.a
}
fn y(&self) -> &Self::F {
&self.b
}
}
impl Display for G2P {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(f, "{}+{}u", self.a, self.b)
}
}
impl Neg for G2P {
type Output = G2P;
fn neg(self) -> Self::Output {
G2P::new(self.a, -self.b)
}
}
impl Add for G2P {
type Output = G2P;
fn add(self, rhs: G2P) -> Self {
if self == rhs {
let two = f101(2);
let three = f101(3);
let m_u = ((three * self.a.pow(2)) / (two * self.b)).unwrap(); // in u units
let u_pow_2_inv = (-f101(2)).inv().unwrap(); // 1/(u^2) = -1/2
let m_pow_2 = m_u.pow(2) * u_pow_2_inv;
G2P::new(
m_pow_2 - two * self.a,
u_pow_2_inv * m_u * (three * self.a - m_pow_2) - self.b,
)
} else {
let lambda_u = ((rhs.b - self.b) / (rhs.a - self.a)).unwrap();
let lambda_pow_2 = lambda_u.pow(2) * -f101(2);
let a = lambda_pow_2 - self.a - rhs.a;
let b = lambda_u * (self.a - a) - self.b;
G2P::new(a, b)
}
}
}
impl Mul<F101> for G2P {
type Output = G2P;
fn mul(self, rhs: F101) -> Self::Output {
let mut rhs = rhs.as_u64();
let mut result = None;
let mut base = self;
while rhs > 0 {
if rhs % 2 == 1 {
result = Some(if let Some(result) = result {
result + base
} else {
base
})
}
rhs >>= 1;
base = base + base;
}
result.unwrap()
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_g2_vectors() {
let g = G2P::generator();
// check point doubling
assert_eq!(g2f(90, 82), g + g);
// check point addition
assert_eq!((g + g) + (g + g), g + g + g + g);
// check point multiplication
assert_eq!(g * f101(6), g + g + g + g + g + g);
// check G2 multiplication
assert_eq!(g2f(26, 97) * g2f(93, 76), g2f(97, 89));
}
}

56
src/pbh/gt.rs Normal file
View File

@@ -0,0 +1,56 @@
#![allow(dead_code)]
use std::ops::Mul;
use super::{f101, g2::G2P, F101};
use crate::ec::{Field, G2Point, GTPoint};
impl GTPoint for G2P {
type S = u64;
fn pow(&self, mut n: Self::S) -> Self {
// frobenious map reduction: p^101 = -p
let (mut p, mut base) = if n >= 101 {
let base = -self.pow(n / 101);
n %= 101;
(base, *self)
} else {
(G2P::new(F101::one(), F101::zero()), *self)
};
// montgomery reduction
while n > 0 {
if n % 2 == 1 {
p = p * base;
}
n >>= 1;
base = base * base;
}
p
}
}
impl Mul<G2P> for G2P {
type Output = G2P;
fn mul(self, rhs: G2P) -> Self::Output {
G2P::new(
self.a * rhs.a - f101(2) * self.b * rhs.b,
self.a * rhs.b + self.b * rhs.a,
)
}
}
#[cfg(test)]
mod tests {
use super::{super::g2::g2f, *};
#[test]
fn test_gt_vectors() {
// check GT multiplication
assert_eq!(g2f(26, 97) * g2f(93, 76), g2f(97, 89));
// check GT exp
assert_eq!(g2f(42, 49).pow(6), g2f(97, 89));
assert_eq!(g2f(93, 76).pow(101), -g2f(93, 76));
assert_eq!(g2f(93, 76).pow(102), (-g2f(93, 76)) * g2f(93, 76));
assert_eq!(g2f(68, 47).pow(600), g2f(97, 89));
}
}

126
src/pbh/mod.rs Normal file
View File

@@ -0,0 +1,126 @@
pub mod g1;
pub mod g2;
pub mod gt;
pub mod pairing;
use crate::{ec::Field, plonk::PlonkTypes, utils::U64Field};
pub type F101 = U64Field<101>;
pub fn f101(x: u64) -> F101 {
F101::from(x)
}
pub type F17 = U64Field<17>;
pub fn f17(x: u64) -> F17 {
F17::from(x)
}
#[derive(Debug, PartialEq)]
pub struct PlonkByHandTypes {}
impl PlonkTypes for PlonkByHandTypes {
type G1 = g1::G1P;
type G2 = g2::G2P;
type GT = g2::G2P;
type E = pairing::PBHPairing;
type GF = F101;
type SF = F17;
fn gf(sg: F17) -> F101 {
F101::from(sg.as_u64())
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::{
constraints::{Assigment, Assigments, Constrains, CopyOf, Gate},
pbh::g1::g1f,
plonk::{Challange, Plonk, Proof, SRS},
};
#[test]
fn test_plonk_gen_proof() {
// create the trusted setup
let s = f101(2); // the toxic waste
let srs = SRS::<PlonkByHandTypes>::create(s, 6);
let plonk = Plonk::new(
srs,
f17(4), // omega
4, // omega pows
f17(2), // k1
f17(3), // k1
);
// constraints and assigments
let constraints = Constrains::new(
&[
Gate::mul_a_b(),
Gate::mul_a_b(),
Gate::mul_a_b(),
Gate::sum_a_b(),
],
(
vec![CopyOf::B(1), CopyOf::B(2), CopyOf::B(3), CopyOf::C(1)],
vec![CopyOf::A(1), CopyOf::A(2), CopyOf::A(3), CopyOf::C(2)],
vec![CopyOf::A(4), CopyOf::B(4), CopyOf::C(4), CopyOf::C(3)],
),
);
let assigments = Assigments::new(&[
Assigment::new(f17(3), f17(3), f17(9)),
Assigment::new(f17(4), f17(4), f17(16)),
Assigment::new(f17(5), f17(5), f17(25)),
Assigment::new(f17(9), f17(16), f17(25)),
]);
// random numbers (the b's)
let rand = [
f17(7),
f17(4),
f17(11),
f17(12),
f17(16),
f17(2),
f17(14),
f17(11),
f17(7),
];
// values that are sent from the verifier to the prover
let challange = Challange {
alpha: f17(15),
beta: f17(12),
gamma: f17(13),
z: f17(5),
v: f17(12),
};
let proof = plonk.prove(&constraints, &assigments, &challange, rand);
let expected = Proof::<PlonkByHandTypes> {
a_s: g1f(91, 66),
b_s: g1f(26, 45),
c_s: g1f(91, 35),
z_s: g1f(32, 59),
t_lo_s: g1f(12, 32),
t_mid_s: g1f(26, 45),
t_hi_s: g1f(91, 66),
w_z_s: g1f(91, 35),
w_z_omega_s: g1f(65, 98),
a_z: f17(15),
b_z: f17(13),
c_z: f17(5),
s_sigma_1_z: f17(1),
s_sigma_2_z: f17(12),
r_z: f17(15),
z_omega_z: f17(15),
};
assert_eq!(proof, expected);
let rand = [f17(4)];
assert!(plonk.verify(&constraints, &proof, &challange, rand));
}
}

79
src/pbh/pairing.rs Normal file
View File

@@ -0,0 +1,79 @@
#![allow(clippy::many_single_char_names)]
use super::{
f101,
g1::G1P,
g2::{g2f, G2P},
};
use crate::ec::{Field, G1Point, G2Point, GTPoint, Pairing};
pub struct PBHPairing {}
impl Pairing for PBHPairing {
type G1 = G1P;
type G2 = G2P;
type GT = G2P;
fn pairing(g1: Self::G1, g2: Self::G2) -> Self::GT {
let p = <G1P as G1Point>::F::order();
let r = G1P::generator_subgroup_size().as_u64();
let k = G2P::embeeding_degree();
let exp = (p.pow(k as u32) - 1) / r;
pairing_f(r, g1, g2).pow(exp)
}
}
fn pairing_f(r: u64, p: G1P, q: G2P) -> G2P {
// line equation from a to b point
let l = |a: G1P, b: G1P| {
let m = b.x - a.x;
let n = b.y - a.y;
let x = n;
let y = -m;
let c = m * a.y - n * a.x;
(x, y, c)
};
if r == 1 {
g2f(1, 0)
} else if r % 2 == 1 {
let r = r - 1;
let (x, y, c) = l(p * f101(r), p);
pairing_f(r, p, q) * G2P::new(q.a * x + c, q.b * y)
} else {
let r = r / 2;
let (x, y, c) = l(p * f101(r), -p * f101(r) * f101(2));
pairing_f(r, p, q).pow(2) * G2P::new(q.a * x + c, q.b * y)
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::ops::Mul;
#[test]
fn test_parings() {
let p = G1P::generator().mul(f101(1));
let q = G2P::generator().mul(f101(3));
let a = f101(5);
let ê = |g1, g2| PBHPairing::pairing(g1, g2);
// ê(aP, Q) = ê(P,aQ)
assert_eq!(ê(p * a, q), ê(p, q * a));
// ê(aP,Q) = ê(P,Q)^a
assert_eq!(ê(p * a, q), ê(p, q).pow(a.as_u64()));
// ê(aP,Q) = ê((a-1)p,Q) * ê(P,Q)
assert_eq!(ê(p * a, q), ê(p * (a - f101(1)), q) * ê(p, q));
}
}

View File

@@ -1,21 +1,620 @@
#![allow(dead_code, unused_imports)]
#![allow(clippy::many_single_char_names)]
use std::fmt::Display;
use crate::{
ec::{GTPoint, Pairing},
poly::Field,
};
use super::field::Field;
use super::poly::Poly;
use super::{
constraints::*,
ec::{G1Point, G2Point},
matrix::Matrix,
poly::Poly,
};
pub type F17 = Field<17>;
pub type P17 = Poly<17>;
pub type F101 = Field<101>;
pub fn f101(n: u64) -> F101 {
F101::from(n)
pub trait PlonkTypes: PartialEq {
type GF: Field;
type SF: Field;
type G1: G1Point<S = Self::GF>;
type G2: G2Point<S = Self::GF>;
type GT: GTPoint;
type E: Pairing<G1 = Self::G1, G2 = Self::G2, GT = Self::GT>;
fn gf(sf: Self::SF) -> Self::GF;
}
pub fn f17(n: u64) -> F17 {
F17::from(n)
pub struct SRS<P: PlonkTypes> {
pub g1s: Vec<P::G1>,
pub g2_1: P::G2,
pub g2_s: P::G2,
}
pub fn p17(n: &[i64]) -> P17 {
P17::from(n)
impl<P: PlonkTypes> SRS<P> {
pub fn create(s: P::GF, n: usize) -> Self {
let mut g1s = Vec::new();
let mut s_pow = s;
g1s.push(P::G1::generator());
for _ in 0..n {
g1s.push(P::G1::generator() * s_pow);
s_pow = s_pow * s;
}
Self {
g1s,
g2_1: P::G2::generator(),
g2_s: P::G2::generator() * s,
}
}
// evaluate a polinomil at secret point s using SRS G1s
pub fn eval_at_s(&self, vs: &Poly<P::SF>) -> P::G1 {
vs.coeffs()
.iter()
.enumerate()
.fold(G1Point::identity(), |acc, (n, v)| {
acc + self.g1s[n] * P::gf(*v)
})
}
}
#[derive(Debug, PartialEq)]
pub struct Proof<P: PlonkTypes> {
pub a_s: P::G1,
pub b_s: P::G1,
pub c_s: P::G1,
pub z_s: P::G1,
pub t_lo_s: P::G1,
pub t_mid_s: P::G1,
pub t_hi_s: P::G1,
pub w_z_s: P::G1,
pub w_z_omega_s: P::G1,
pub a_z: P::SF,
pub b_z: P::SF,
pub c_z: P::SF,
pub s_sigma_1_z: P::SF,
pub s_sigma_2_z: P::SF,
pub r_z: P::SF,
pub z_omega_z: P::SF,
}
pub struct Challange<P: PlonkTypes> {
pub alpha: P::SF,
pub beta: P::SF,
pub gamma: P::SF,
pub z: P::SF,
pub v: P::SF,
}
pub struct Plonk<P: PlonkTypes> {
srs: SRS<P>,
omega: P::SF,
h: Vec<P::SF>,
h_pows_inv: Matrix<P::SF>,
k1: P::SF,
k2: P::SF,
k1_h: Vec<P::SF>,
k2_h: Vec<P::SF>,
z_h_x: Poly<P::SF>,
}
impl<P: PlonkTypes> Plonk<P> {
pub fn new(srs: SRS<P>, omega: P::SF, omega_pows: usize, k1: P::SF, k2: P::SF) -> Self {
// This roots of unity should be able to be generated through a generator
// So the generator (called omega) creates these roots of unity (H)
let h: Vec<_> = (0..omega_pows).map(|n| omega.pow(n as u64)).collect();
// We need to label all of the values in our assignment with different field elements.
// To do this, will use the roots of unity H along with two cosets of H.
// We can get these cosets by multiplying each element of H by each of two constants: $k_1$ and $k_2$.
// The constant $k_1$ is chosen so that it is not an element of $H$, and $k_2$ is chosen so that it is
// neither an element of H nor $k_1H$. This ensures we have all the field elements to use as labels.
// $H$ will be used to index $a$ values, $k_1H$ for $b$ values, $k_2H$ for $cC values
assert!(!h.contains(&k1));
assert!(!h.contains(&k2));
let k1_h: Vec<_> = h.iter().map(|r| *r * k1).collect(); // k1_h is a coset of H
assert!(!k1_h.contains(&k2));
let k2_h: Vec<_> = h.iter().map(|r| *r * k2).collect(); // k2_h is a coset of H
// In some point we will want to build polinomials that evaluates with specific values
// at the roots of unity, for instance at the $a$ values:
// $f_a(x)$ = polinomial that evaluates
// $f_a(w^0)$=a[0]
// $f_a(w^1)$=a[1]
// ...
// $f_a(w^n)$=a[n]
//
// we create here the h_pows_inv, an interpolation matrix, where the values
// that we want to interpolate are multiplied (as a row matrix) with the
// interpolation matrix to get the interpolation polinomial.
let mut h_pows = Matrix::<P::SF>::zero(h.len(), h.len());
for c in 0..h_pows.cols() {
for r in 0..h_pows.rows() {
h_pows[(r, c)] = h[r].pow(c as u64);
}
}
let h_pows_inv = h_pows.inv();
// The polynomial $Z_H$ is the polynomial that is zero on all the elements of
// our subgroup $H$.
let z_h_x = Poly::z(&h);
Plonk {
srs,
omega,
h,
h_pows_inv,
k1,
k2,
k1_h,
k2_h,
z_h_x,
}
}
fn interpolate(&self, vv: &[P::SF]) -> Poly<P::SF> {
(&self.h_pows_inv * Matrix::<_>::new(vv, vv.len(), 1)).into_poly()
}
fn copy_constraints_to_roots(&self, c: &[CopyOf]) -> Vec<P::SF> {
c.iter()
.map(|c| match c {
CopyOf::A(n) => self.h[n - 1],
CopyOf::B(n) => self.k1_h[n - 1],
CopyOf::C(n) => self.k2_h[n - 1],
})
.collect::<Vec<_>>()
}
pub fn prove(
&self,
constraints: &Constrains<P::SF>,
assigments: &Assigments<P::SF>,
challange: &Challange<P>,
rand: [P::SF; 9],
) -> Proof<P> {
// check that the constraints satisfies the assigments
assert!(constraints.satisfies(assigments));
// In a non-interactive protocol, these values are derived from a
// cryptographic hash of a transcript of the Prover's process. These values
// would be unpredictable by the either the Prover or Verifier, but each party
// is be able to compute the same challenge values using the same hash function
// on the transcript. This is known as the Fiat-Shamir transform which can turn
// an interactive protocol non-interactive.
let Challange {
alpha,
beta,
gamma,
z,
v,
} = challange;
let n = constraints.c_a.len() as u64;
// create sigmas, mappings from the copy constraints to roots of unity elements
// ---------------------------------------------------------------------------
let sigma_1 = self.copy_constraints_to_roots(&constraints.c_a);
let sigma_2 = self.copy_constraints_to_roots(&constraints.c_b);
let sigma_3 = self.copy_constraints_to_roots(&constraints.c_c);
// create a set of polinomials, those polinomials evaluates at roots of unity
// for all the components of the plonk circuit
//
// (a,b,c) : assigments / values
// (o,m,l,r,c) : gate constraints
// (sigma1, sigma2, sigma3) : copy constraints
// ---------------------------------------------------------------------------
let f_a_x = self.interpolate(&assigments.a);
let f_b_x = self.interpolate(&assigments.b);
let f_c_x = self.interpolate(&assigments.c);
let q_o_x = self.interpolate(&constraints.q_o);
let q_m_x = self.interpolate(&constraints.q_m);
let q_l_x = self.interpolate(&constraints.q_l);
let q_r_x = self.interpolate(&constraints.q_r);
let q_c_x = self.interpolate(&constraints.q_c);
let s_sigma_1 = self.interpolate(&sigma_1);
let s_sigma_2 = self.interpolate(&sigma_2);
let s_sigma_3 = self.interpolate(&sigma_3);
// round 1 - eval a(x), b(x), c(x) at s
// ---------------------------------------------------------------------------
let (b1, b2, b3, b4, b5, b6) = (rand[0], rand[1], rand[2], rand[3], rand[4], rand[5]);
let a_x = Poly::new(vec![b2, b1]) * &self.z_h_x + f_a_x;
let b_x = Poly::new(vec![b4, b3]) * &self.z_h_x + f_b_x;
let c_x = Poly::new(vec![b6, b5]) * &self.z_h_x + f_c_x;
// ouput of first step
let a_s = self.srs.eval_at_s(&a_x);
let b_s = self.srs.eval_at_s(&b_x);
let c_s = self.srs.eval_at_s(&c_x);
// round 2 - eval acummulator vector polinomial at s
// ---------------------------------------------------------------------------
// check https://vitalik.ca/general/2019/09/22/plonk.html
//
// alpha, beta and gamma are "random" that comes from the H(transcript)
// b7, b8, b9 are random
//
let (b7, b8, b9) = (rand[6], rand[7], rand[8]);
// create the accumulator vector, it has the property that is going to have
// the same value than
//
let mut acc = vec![P::SF::one()];
for i in 1..n as usize {
let a = assigments.a[i - 1];
let b = assigments.b[i - 1];
let c = assigments.c[i - 1];
// omega_pow is the root of unity
let omega_pow = self.omega.pow((i as u64) - 1);
// combine each wire value with its index position
let dend = (a + *beta * omega_pow + gamma)
* (b + *beta * self.k1 * omega_pow + gamma)
* (c + *beta * self.k2 * omega_pow + gamma);
// combine each wire value with its *permuted* index position
let dsor = (a + *beta * s_sigma_1.eval(&omega_pow) + gamma)
* (b + *beta * s_sigma_2.eval(&omega_pow) + gamma)
* (c + *beta * s_sigma_3.eval(&omega_pow) + gamma);
let v = acc[i - 1] * (dend / dsor).unwrap();
acc.push(v);
}
// the accumulator vector is interpolated into a polynomial acc(x)
// and this is used to create the polynomial z.
let acc_x = self.interpolate(&acc);
let z_x = Poly::new(vec![b9, b8, b7]) * &self.z_h_x + acc_x;
// Then z is evaluated at the secret number s using the SRS from the setup.
// output of second step
let z_s = self.srs.eval_at_s(&z_x);
// round 3 - compute the quotient polynomial
// ---------------------------------------------------------------------------
// Next comes the most massive computation of the entire protocol.
// Our goal is to compute the polynomial t, which will be of degree 3n+5 for n gates.
// The polynomial t encodes the majority of the information contained in our circuit
// and assignments all at once.
// L1 refers to a Lagrange basis polynomial over our roots of unity H.
// Specifically L1(1)=1, but takes the value 0 on each of the other roots of unity.
// The coefficients for L1 can be found by interpolating the vector
let lagrange_vector: Vec<_> = std::iter::once(P::SF::one())
.chain(std::iter::repeat(P::SF::zero()))
.take(self.h.len())
.collect();
let l_1_x = self.interpolate(&lagrange_vector);
// public inputs
let p_i_x = P::SF::zero().as_poly(); // Public input
// helper variables
let a_x_b_x_q_m_x = (&a_x * &b_x) * &q_m_x;
let a_x_q_l_x = &a_x * &q_l_x;
let b_x_q_r_x = &b_x * &q_r_x;
let c_x_q_o_x = &c_x * &q_o_x;
let alpha_a_x_beta_x_gamma = &(&a_x + &Poly::new(vec![*gamma, *beta])) * alpha;
let b_x_beta_k1_x_gamma = &b_x + &Poly::new(vec![*gamma, *beta * self.k1]);
let c_x_beta_k2_x_gamma = &c_x + &Poly::new(vec![*gamma, *beta * self.k2]);
let z_omega_x = Poly::new(
z_x.coeffs()
.iter()
.enumerate()
.map(|(n, c)| *c * self.omega.pow(n as u64))
.collect::<Vec<_>>(),
);
let alpha_a_x_beta_s_sigma1_x_gamma = (&a_x + &s_sigma_1 * beta + gamma) * alpha;
let b_x_beta_s_sigma2_x_gamma = &b_x + &s_sigma_2 * beta + gamma;
let c_x_beta_s_sigma3_x_gamma = &c_x + &s_sigma_3 * beta + gamma;
let alpha_2_z_x_1_l_1_x = ((&z_x + (-P::SF::one()).as_poly()) * alpha.pow(2)) * &l_1_x;
// compute t(x)
let t_1_z_h = a_x_b_x_q_m_x + a_x_q_l_x + b_x_q_r_x + c_x_q_o_x + p_i_x + &q_c_x;
let t_2_z_h = alpha_a_x_beta_x_gamma * b_x_beta_k1_x_gamma * c_x_beta_k2_x_gamma * &z_x;
let t_3_z_h = alpha_a_x_beta_s_sigma1_x_gamma
* b_x_beta_s_sigma2_x_gamma
* c_x_beta_s_sigma3_x_gamma
* &z_omega_x;
let t_4_z_h = alpha_2_z_x_1_l_1_x;
let (t_x, rem) = (t_1_z_h + t_2_z_h - t_3_z_h + t_4_z_h) / self.z_h_x.clone();
assert_eq!(rem, Poly::zero());
// It turns out that for n constraints, t will have degree 3n+5, which is too large to use the SRS
// from the setup phase in Part 1. However we can break t into three parts of degree n+1 each.
// Each of these polynomials will use n+2 coefficients from t(x)
let t_hi_x = Poly::new(t_x.coeffs()[12..18].to_owned());
let t_mid_x = Poly::new(t_x.coeffs()[6..12].to_owned());
let t_lo_x = Poly::new(t_x.coeffs()[0..6].to_owned());
// After dividing into three parts, we evaluate each part at s using the SRS.
// output of the third step
let t_hi_s = self.srs.eval_at_s(&t_hi_x);
let t_mid_s = self.srs.eval_at_s(&t_mid_x);
let t_lo_s = self.srs.eval_at_s(&t_lo_x);
// round 4 - compute linearization polynomial
// ---------------------------------------------------------------------------
// we create a polynomial r that is a kind of partner to t, where many of the polynomials that were
// included in t are replaced by field elements that are evaluations of those polynomials at a
// challenge value 𝔷.
let a_z = a_x.eval(z);
let b_z = b_x.eval(z);
let c_z = c_x.eval(z);
let s_sigma_1_z = s_sigma_1.eval(z);
let s_sigma_2_z = s_sigma_2.eval(z);
let t_z = t_x.eval(z);
let z_omega_z = z_omega_x.eval(z);
let a_z_b_z_q_m_x = &q_m_x * a_z * b_z;
let a_z_q_l_x = &q_l_x * a_z;
let b_z_q_r_x = &q_r_x * b_z;
let c_z_q_o_x = &q_o_x * c_z;
let r_1_x = a_z_b_z_q_m_x + a_z_q_l_x + b_z_q_r_x + c_z_q_o_x + q_c_x;
let r_2_x = &z_x
* ((a_z + *beta * z + gamma)
* (b_z + *beta * self.k1 * z + gamma)
* (c_z + *beta * self.k2 * z + gamma)
* alpha);
let r_3_x = &z_x
* (s_sigma_3 * *beta * z_omega_z)
* ((a_z + *beta * s_sigma_1_z + gamma) * (b_z + *beta * s_sigma_2_z + gamma) * alpha);
let r_4_x = &z_x * l_1_x.eval(z) * alpha.pow(2);
let r_x = r_1_x + r_2_x + r_3_x + r_4_x;
// compute linearization evaluation
let r_z = r_x.eval(z);
// round 5
// ---------------------------------------------------------------------------
// we create two large polynomials that combine all the polynomials we've been
// using so far and we output commitments to them.
// compute opening proof polynomial w_z_x
let w_z_x = (t_lo_x + t_mid_x * z.pow(n + 2) + t_hi_x * z.pow(2 * n + 4) - t_z)
+ (r_x - r_z) * v
+ (a_x - a_z) * v.pow(2)
+ (b_x - b_z) * v.pow(3)
+ (c_x - c_z) * v.pow(4)
+ (s_sigma_1 - s_sigma_1_z) * v.pow(5)
+ (s_sigma_2 - s_sigma_2_z) * v.pow(6);
let (w_z_x, rem) = w_z_x / Poly::new(vec![-*z, P::SF::one()]);
assert_eq!(rem, Poly::zero());
// compute opening proof polinomial w_zw_x
let (w_z_omega_x, rem) =
(z_x - z_omega_z) / Poly::new(vec![-*z * self.omega, P::SF::one()]);
assert_eq!(rem, Poly::zero());
// compute opening proof polinomials at s
let w_z_s = self.srs.eval_at_s(&w_z_x);
let w_z_omega_s = self.srs.eval_at_s(&w_z_omega_x);
Proof {
a_s,
b_s,
c_s,
z_s,
t_lo_s,
t_mid_s,
t_hi_s,
w_z_s,
w_z_omega_s,
a_z,
b_z,
c_z,
s_sigma_1_z,
s_sigma_2_z,
r_z,
z_omega_z,
}
}
pub fn verify(
&self,
constraints: &Constrains<P::SF>,
proof: &Proof<P>,
challange: &Challange<P>,
rand: [P::SF; 1],
) -> bool {
let Proof {
a_s,
b_s,
c_s,
z_s,
t_lo_s,
t_mid_s,
t_hi_s,
w_z_s,
w_z_omega_s,
a_z,
b_z,
c_z,
s_sigma_1_z,
s_sigma_2_z,
r_z,
z_omega_z,
} = proof;
let Challange {
alpha,
beta,
gamma,
z,
v,
} = challange;
// verifier preprocessing
// ---------------------------------------------------------------------------
let sigma_1 = self.copy_constraints_to_roots(&constraints.c_a);
let sigma_2 = self.copy_constraints_to_roots(&constraints.c_b);
let sigma_3 = self.copy_constraints_to_roots(&constraints.c_c);
let q_m_s = self.srs.eval_at_s(&self.interpolate(&constraints.q_m));
let q_l_s = self.srs.eval_at_s(&self.interpolate(&constraints.q_l));
let q_r_s = self.srs.eval_at_s(&self.interpolate(&constraints.q_r));
let q_o_s = self.srs.eval_at_s(&self.interpolate(&constraints.q_o));
let q_c_s = self.srs.eval_at_s(&self.interpolate(&constraints.q_c));
let sigma_1_s = self.srs.eval_at_s(&self.interpolate(&sigma_1));
let sigma_2_s = self.srs.eval_at_s(&self.interpolate(&sigma_2));
let sigma_3_s = self.srs.eval_at_s(&self.interpolate(&sigma_3));
let u = rand[0];
// Step 1. Validate proof points in G1
if !a_s.in_curve()
|| !b_s.in_curve()
|| !c_s.in_curve()
|| !z_s.in_curve()
|| !t_lo_s.in_curve()
|| !t_mid_s.in_curve()
|| !t_hi_s.in_curve()
|| !w_z_s.in_curve()
|| !w_z_omega_s.in_curve()
{
return false;
}
// Step 2. Validate proof fields in SF
if !a_z.in_field()
|| !b_z.in_field()
|| !c_z.in_field()
|| !s_sigma_1_z.in_field()
|| !s_sigma_2_z.in_field()
|| !r_z.in_field()
|| !z_omega_z.in_field()
{
return false;
}
// Step 3. We do not have public inputs, nothing to do
// Step 4. Evaluate z_h at z
let z_h_z = self.z_h_x.eval(z);
// Step 5. Evaluate lagrange on z
let lagrange_vector: Vec<_> = std::iter::once(P::SF::one())
.chain(std::iter::repeat(P::SF::zero()))
.take(self.h.len())
.collect();
let l_1_z = self.interpolate(&lagrange_vector).eval(z);
// Step 6. We do not have public inputs, nothing to do
let p_i_z = P::SF::zero();
// Step 7. Compute quotient polinomial evaluation
let a_z_beta_s_sigma_1_z_gamma = *beta * s_sigma_1_z + gamma + a_z;
let b_z_beta_s_sigma_2_z_gamma = *beta * s_sigma_2_z + gamma + b_z;
let c_z_gamma = *c_z + gamma;
let l_1_z_alpha_2 = l_1_z * alpha.pow(2);
let t_z = ((*r_z + p_i_z
- (a_z_beta_s_sigma_1_z_gamma * b_z_beta_s_sigma_2_z_gamma * c_z_gamma * z_omega_z)
- l_1_z_alpha_2)
/ z_h_z)
.unwrap();
// Step 8. Compute the first part of batched polinomial commitment
let d_1_s = q_m_s * P::gf(*a_z * b_z * v)
+ q_l_s * P::gf(*a_z * v)
+ q_r_s * P::gf(*b_z * v)
+ q_o_s * P::gf(*c_z * v)
+ q_c_s * P::gf(*v);
let d_2_s = *z_s
* P::gf(
(*a_z + *beta * z + gamma)
* (*b_z + *beta * self.k1 * z + gamma)
* (*c_z + *beta * self.k2 * z + gamma)
* alpha
* v
+ l_1_z * alpha.pow(2) * v
+ u,
);
let d_3_s = sigma_3_s
* P::gf(
(*a_z + *beta * s_sigma_1_z + gamma)
* (*b_z + *beta * s_sigma_2_z + gamma)
* alpha
* v
* beta
* z_omega_z,
);
let d_s = d_1_s + d_2_s + -d_3_s;
// Step 9. Compute full batched polinomial commitment
let n = constraints.c_a.len() as u64;
let f_s = t_lo_s.to_owned()
+ t_mid_s.to_owned() * P::gf(z.pow(n + 2))
+ t_hi_s.to_owned() * P::gf(z.pow(2 * n + 4))
+ d_s
+ a_s.to_owned() * P::gf(v.pow(2))
+ b_s.to_owned() * P::gf(v.pow(3))
+ c_s.to_owned() * P::gf(v.pow(4))
+ sigma_1_s.to_owned() * P::gf(v.pow(5))
+ sigma_2_s.to_owned() * P::gf(v.pow(6));
// Step 10. Compute group encoded batch evaluation [E]
let e_s = self.srs.eval_at_s(&Poly::from(&[1]))
* P::gf(
t_z + *v * r_z
+ v.pow(2) * a_z
+ v.pow(3) * b_z
+ v.pow(4) * c_z
+ v.pow(5) * s_sigma_1_z
+ v.pow(6) * s_sigma_2_z
+ u * z_omega_z,
);
// Step 11. Batch validate all equations
let e_1_q1 = *w_z_s + *w_z_omega_s * P::gf(u);
let e_1_q2 = self.srs.g2_s;
let e_2_q1 = *w_z_s * P::gf(*z) + *w_z_omega_s * P::gf(u * z * self.omega) + f_s + -e_s;
let e_2_q2 = self.srs.g2_1;
let e_1 = P::E::pairing(e_1_q1, e_1_q2);
let e_2 = P::E::pairing(e_2_q1, e_2_q2);
e_1 == e_2
}
}

View File

@@ -1,6 +1,4 @@
//! This module provides an implementation of polinomials over bls12_381::Field<M>
pub use super::field::Field;
pub use crate::ec::Field;
use std::{
cmp::max,
fmt::{Display, Formatter, Result},
@@ -8,14 +6,13 @@ use std::{
ops::{Add, AddAssign, Div, Mul, MulAssign, Sub, SubAssign},
};
/// A polinomial with bl12_381::Field<M> coeffs
#[derive(Clone, Debug, PartialEq)]
pub struct Poly<const M: u64>(Vec<Field<M>>);
pub struct Poly<F: Field>(Vec<F>);
impl<const M: u64> Poly<M> {
impl<F: Field> Poly<F> {
/// Creates a new Poly from its `coeffs`icients, first element the coefficient for x^0
/// for safetly, input value is normalized (trailing zeroes are removed)
pub fn new(coeffs: Vec<Field<M>>) -> Self {
pub fn new(coeffs: Vec<F>) -> Self {
let mut poly = Poly(coeffs);
poly.normalize();
poly
@@ -23,35 +20,24 @@ impl<const M: u64> Poly<M> {
/// Creates a new polinomial where the `coeffs` fits in u64 values
pub fn from(coeffs: &[i64]) -> Self {
Poly::new(
coeffs
.iter()
.map(|n| {
if *n >= 0 {
Field::<M>::from(*n as u64)
} else {
-Field::<M>::from(-*n as u64)
}
})
.collect::<Vec<Field<M>>>(),
)
Poly::new(coeffs.iter().map(|n| F::from(*n)).collect::<Vec<F>>())
}
pub fn coeffs(&self) -> &[Field<M>] {
pub fn coeffs(&self) -> &[F] {
&self.0
}
/// Returns p(x)=0
pub fn zero() -> Self {
Poly(vec![Field::<M>::zero()])
Poly(vec![F::zero()])
}
/// Returns p(x)=1
pub fn one() -> Self {
Poly(vec![Field::<M>::one()])
Poly(vec![F::one()])
}
/// Creates a polinomial that contains a set of `p` points, by using lagrange
/// see <https://en.wikipedia.org/wiki/Lagrange_polynomial>
pub fn lagrange(p: &[(Field<M>, Field<M>)]) -> Self {
pub fn lagrange(p: &[(F, F)]) -> Self {
let k = p.len();
let mut l = Poly::zero();
for j in 0..k {
@@ -59,41 +45,38 @@ impl<const M: u64> Poly<M> {
for i in 0..k {
if i != j {
let c = (p[j].0 - p[i].0).inv();
assert!(
bool::from(c.is_some()),
"lagrange polinomial x points must be unique"
);
assert!(c.is_some(), "lagrange polinomial x points must be unique");
let c = c.unwrap();
l_j = &l_j * &Poly::new(vec![-(c * p[i].0), c]);
}
}
l += &(&l_j * &p[j].1);
l += &(&l_j * p[j].1);
}
l
}
/// Creates a polinomial that has roots at the selected points (x-p_1)(x-p_2)...(x-p_n)
pub fn z(points: &[Field<M>]) -> Poly<M> {
points.iter().fold(Poly::one(), |acc, x| {
&acc * &Poly::new(vec![-x, Field::<M>::one()])
})
pub fn z(points: &[F]) -> Self {
points
.iter()
.fold(Poly::one(), |acc, x| &acc * &Poly::new(vec![-*x, F::one()]))
}
/// Evals the polinomial at the desired point
pub fn eval(&self, x: &Field<M>) -> Field<M> {
let mut x_pow = Field::<M>::one();
pub fn eval(&self, x: &F) -> F {
let mut x_pow = F::one();
let mut y = self.0[0];
for (i, _) in self.0.iter().enumerate().skip(1) {
x_pow *= x;
y += &(x_pow * self.0[i]);
y += x_pow * self.0[i];
}
y
}
/// Evals the polinomial suplying the `x_pows` x^0, x^1, x^2
pub fn eval_with_pows(&self, x_pow: &[Field<M>]) -> Field<M> {
pub fn eval_with_pows(&self, x_pow: &[F]) -> F {
let mut y = self.0[0];
for (i, _) in self.0.iter().enumerate() {
y += &(x_pow[i] * self.0[i]);
y += x_pow[i] * self.0[i];
}
y
}
@@ -106,12 +89,11 @@ impl<const M: u64> Poly<M> {
/// Normalizes the coefficients, removing ending zeroes
pub fn normalize(&mut self) {
if self.0.len() > 1 && self.0[self.0.len() - 1].is_zero() {
let first_non_zero = self.0.iter().rev().position(|p| *p != Field::<M>::zero());
let first_non_zero = self.0.iter().rev().position(|p| !p.is_zero());
if let Some(first_non_zero) = first_non_zero {
self.0
.resize(self.0.len() - first_non_zero, Field::<M>::zero());
self.0.resize(self.0.len() - first_non_zero, F::zero());
} else {
self.0.resize(1, Field::<M>::zero());
self.0.resize(1, F::zero());
}
}
}
@@ -122,21 +104,21 @@ impl<const M: u64> Poly<M> {
}
/// Sets the `i`-th coefficient to the selected `p` value
pub fn set(&mut self, i: usize, p: Field<M>) {
pub fn set(&mut self, i: usize, p: F) {
if self.0.len() < i + 1 {
self.0.resize(i + 1, Field::<M>::zero());
self.0.resize(i + 1, F::zero());
}
self.0[i] = p;
self.normalize();
}
/// Returns the `i`-th coefficient
pub fn get(&mut self, i: usize) -> Option<&Field<M>> {
pub fn get(&mut self, i: usize) -> Option<&F> {
self.0.get(i)
}
}
impl<const M: u64> Display for Poly<M> {
impl<F: Field> Display for Poly<F> {
fn fmt(&self, f: &mut Formatter<'_>) -> Result {
let mut first: bool = true;
for i in 0..=self.degree() {
@@ -163,8 +145,8 @@ impl<const M: u64> Display for Poly<M> {
}
/* main arithmetics */
impl<const M: u64> AddAssign<&Poly<M>> for Poly<M> {
fn add_assign(&mut self, rhs: &Poly<M>) {
impl<F: Field> AddAssign<&Poly<F>> for Poly<F> {
fn add_assign(&mut self, rhs: &Poly<F>) {
for n in 0..max(self.0.len(), rhs.0.len()) {
if n >= self.0.len() {
self.0.push(rhs.0[n]);
@@ -176,22 +158,22 @@ impl<const M: u64> AddAssign<&Poly<M>> for Poly<M> {
}
}
impl<const M: u64> AddAssign<&Field<M>> for Poly<M> {
fn add_assign(&mut self, rhs: &Field<M>) {
impl<F: Field> AddAssign<&F> for Poly<F> {
fn add_assign(&mut self, rhs: &F) {
self.0[0] += rhs;
self.normalize();
}
}
impl<const M: u64> SubAssign<&Field<M>> for Poly<M> {
fn sub_assign(&mut self, rhs: &Field<M>) {
impl<F: Field> SubAssign<&F> for Poly<F> {
fn sub_assign(&mut self, rhs: &F) {
self.0[0] -= rhs;
self.normalize();
}
}
impl<const M: u64> SubAssign<&Poly<M>> for Poly<M> {
fn sub_assign(&mut self, rhs: &Poly<M>) {
impl<F: Field> SubAssign<&Poly<F>> for Poly<F> {
fn sub_assign(&mut self, rhs: &Poly<F>) {
for n in 0..max(self.0.len(), rhs.0.len()) {
if n >= self.0.len() {
self.0.push(rhs.0[n]);
@@ -203,10 +185,10 @@ impl<const M: u64> SubAssign<&Poly<M>> for Poly<M> {
}
}
impl<const M: u64> Mul<&Poly<M>> for &Poly<M> {
type Output = Poly<M>;
fn mul(self, rhs: &Poly<M>) -> Self::Output {
let mut mul: Vec<Field<M>> = iter::repeat(Field::<M>::zero())
impl<F: Field> Mul<&Poly<F>> for &Poly<F> {
type Output = Poly<F>;
fn mul(self, rhs: &Poly<F>) -> Self::Output {
let mut mul: Vec<F> = iter::repeat(F::zero())
.take(self.0.len() + rhs.0.len() - 1)
.collect();
for n in 0..self.0.len() {
@@ -220,8 +202,8 @@ impl<const M: u64> Mul<&Poly<M>> for &Poly<M> {
}
}
impl<const M: u64> MulAssign<&Field<M>> for Poly<M> {
fn mul_assign(&mut self, rhs: &Field<M>) {
impl<F: Field> MulAssign<&F> for Poly<F> {
fn mul_assign(&mut self, rhs: &F) {
if rhs.is_zero() {
*self = Poly::zero()
} else {
@@ -230,10 +212,10 @@ impl<const M: u64> MulAssign<&Field<M>> for Poly<M> {
}
}
impl<const M: u64> Div for Poly<M> {
type Output = (Poly<M>, Poly<M>);
impl<F: Field> Div for Poly<F> {
type Output = (Poly<F>, Poly<F>);
fn div(self, rhs: Poly<M>) -> Self::Output {
fn div(self, rhs: Poly<F>) -> Self::Output {
let (mut q, mut r) = (Poly::zero(), self);
while !r.is_zero() && r.degree() >= rhs.degree() {
let lead_r = r.0[r.0.len() - 1];
@@ -250,132 +232,130 @@ impl<const M: u64> Div for Poly<M> {
}
/* helpers */
impl<const M: u64> Add for Poly<M> {
type Output = Poly<M>;
fn add(self, rhs: Poly<M>) -> Self::Output {
impl<F: Field> Add for Poly<F> {
type Output = Poly<F>;
fn add(mut self, rhs: Poly<F>) -> Self::Output {
self += &rhs;
self
}
}
impl<F: Field> Add<Poly<F>> for &Poly<F> {
type Output = Poly<F>;
fn add(self, rhs: Poly<F>) -> Self::Output {
let mut v = self.clone();
v += &rhs;
v
}
}
impl<const M: u64> Add<Poly<M>> for &Poly<M> {
type Output = Poly<M>;
fn add(self, rhs: Poly<M>) -> Self::Output {
let mut v = self.clone();
v += &rhs;
v
}
}
impl<const M: u64> Add<&Poly<M>> for Poly<M> {
type Output = Poly<M>;
fn add(mut self, rhs: &Poly<M>) -> Self::Output {
impl<F: Field> Add<&Poly<F>> for Poly<F> {
type Output = Poly<F>;
fn add(mut self, rhs: &Poly<F>) -> Self::Output {
self += rhs;
self
}
}
impl<const M: u64> Add<&Poly<M>> for &Poly<M> {
type Output = Poly<M>;
fn add(self, rhs: &Poly<M>) -> Self::Output {
impl<F: Field> Add<&Poly<F>> for &Poly<F> {
type Output = Poly<F>;
fn add(self, rhs: &Poly<F>) -> Self::Output {
let mut v = self.clone();
v += rhs;
v
}
}
impl<const M: u64> Add<&Field<M>> for Poly<M> {
type Output = Poly<M>;
fn add(self, rhs: &Field<M>) -> Self::Output {
let mut cloned = self.clone();
cloned += rhs;
cloned
impl<F: Field> Add<&F> for Poly<F> {
type Output = Poly<F>;
fn add(mut self, rhs: &F) -> Self::Output {
self += rhs;
self
}
}
impl<const M: u64> Add<Field<M>> for Poly<M> {
type Output = Poly<M>;
fn add(self, rhs: Field<M>) -> Self::Output {
let mut cloned = self.clone();
cloned += &rhs;
cloned
impl<F: Field> Add<F> for Poly<F> {
type Output = Poly<F>;
fn add(mut self, rhs: F) -> Self::Output {
self += &rhs;
self
}
}
impl<const M: u64> Sub<Field<M>> for Poly<M> {
type Output = Poly<M>;
fn sub(self, rhs: Field<M>) -> Self::Output {
let mut cloned = self.clone();
cloned -= &rhs;
cloned
}
}
impl<const M: u64> Add<Field<M>> for &Poly<M> {
type Output = Poly<M>;
fn add(self, rhs: Field<M>) -> Self::Output {
let mut cloned = self.clone();
cloned += &rhs;
cloned
}
}
impl<const M: u64> Sub<Poly<M>> for Poly<M> {
type Output = Poly<M>;
fn sub(mut self, rhs: Poly<M>) -> Self::Output {
impl<F: Field> Sub<F> for Poly<F> {
type Output = Poly<F>;
fn sub(mut self, rhs: F) -> Self::Output {
self -= &rhs;
self
}
}
impl<const M: u64> Mul<&Poly<M>> for Poly<M> {
type Output = Poly<M>;
fn mul(self, rhs: &Poly<M>) -> Self::Output {
impl<F: Field> Add<F> for &Poly<F> {
type Output = Poly<F>;
fn add(self, rhs: F) -> Self::Output {
let mut cloned = self.clone();
cloned += &rhs;
cloned
}
}
impl<F: Field> Sub<Poly<F>> for Poly<F> {
type Output = Poly<F>;
fn sub(mut self, rhs: Poly<F>) -> Self::Output {
self -= &rhs;
self
}
}
impl<F: Field> Mul<&Poly<F>> for Poly<F> {
type Output = Poly<F>;
fn mul(self, rhs: &Poly<F>) -> Self::Output {
&self * rhs
}
}
impl<const M: u64> Mul<Poly<M>> for &Poly<M> {
type Output = Poly<M>;
fn mul(self, rhs: Poly<M>) -> Self::Output {
impl<F: Field> Mul<Poly<F>> for &Poly<F> {
type Output = Poly<F>;
fn mul(self, rhs: Poly<F>) -> Self::Output {
self * &rhs
}
}
impl<const M: u64> Mul<Poly<M>> for Poly<M> {
type Output = Poly<M>;
fn mul(self, rhs: Poly<M>) -> Self::Output {
impl<F: Field> Mul<Poly<F>> for Poly<F> {
type Output = Poly<F>;
fn mul(self, rhs: Poly<F>) -> Self::Output {
self * &rhs
}
}
impl<const M: u64> Mul<&Field<M>> for &Poly<M> {
type Output = Poly<M>;
fn mul(self, rhs: &Field<M>) -> Self::Output {
impl<F: Field> Mul<&F> for &Poly<F> {
type Output = Poly<F>;
fn mul(self, rhs: &F) -> Self::Output {
let mut m = self.clone();
m *= rhs;
m
}
}
impl<const M: u64> Mul<&Field<M>> for Poly<M> {
type Output = Poly<M>;
fn mul(self, rhs: &Field<M>) -> Self::Output {
impl<F: Field> Mul<&F> for Poly<F> {
type Output = Poly<F>;
fn mul(self, rhs: &F) -> Self::Output {
&self * rhs
}
}
impl<const M: u64> Mul<Field<M>> for Poly<M> {
type Output = Poly<M>;
fn mul(self, rhs: Field<M>) -> Self::Output {
#[allow(clippy::op_ref)]
impl<F: Field> Mul<F> for Poly<F> {
type Output = Poly<F>;
fn mul(self, rhs: F) -> Self::Output {
self * &rhs
}
}
impl<const M: u64> Mul<Field<M>> for &Poly<M> {
type Output = Poly<M>;
fn mul(self, rhs: Field<M>) -> Self::Output {
#[allow(clippy::op_ref)]
impl<F: Field> Mul<F> for &Poly<F> {
type Output = Poly<F>;
fn mul(self, rhs: F) -> Self::Output {
self * &rhs
}
}
@@ -383,8 +363,9 @@ impl<const M: u64> Mul<Field<M>> for &Poly<M> {
#[cfg(test)]
mod tests {
use super::*;
type F = Field<15485863>;
type P = Poly<15485863>;
use crate::utils::U64Field;
type F = U64Field<15485863>;
type P = Poly<F>;
#[test]
fn test_poly_add() {
@@ -446,10 +427,10 @@ mod tests {
#[test]
fn test_poly_lagrange_multi() {
let points = vec![
(F::from(1), F::from(2)),
(F::from(5), F::from(7)),
(F::from(7), F::from(9)),
(F::from(3), F::from(1)),
(F::from(1u64), F::from(2u64)),
(F::from(5u64), F::from(7u64)),
(F::from(7u64), F::from(9u64)),
(F::from(3u64), F::from(1u64)),
];
let l = Poly::lagrange(&points);
points.iter().for_each(|p| assert_eq!(l.eval(&p.0), p.1));
@@ -457,14 +438,14 @@ mod tests {
#[test]
fn test_poly_z() {
assert_eq!(
P::z(&vec![F::from(1), F::from(5)]),
P::z(&vec![F::from(1u64), F::from(5u64)]),
P::from(&[5, -6, 1]) // f(x) = (x-1)(x-5) = x^2-6x+5
);
}
#[test]
fn test_poly_eval() {
// check that (x^2+2x+1)(2) = 9
assert_eq!(P::from(&[1, 2, 1]).eval(&F::from(2)), F::from(9));
assert_eq!(P::from(&[1, 2, 1]).eval(&F::from(2u64)), F::from(9u64));
}
#[test]
fn test_poly_normalize() {

View File

@@ -1,699 +0,0 @@
#![allow(dead_code, unused_imports)]
use super::constraints::*;
use super::ec::{g1f, g2f, pairing, G1Point, G2Point};
use super::field::Field;
use super::matrix::Matrix;
use super::plonk::{f101, f17, p17, F101, F17, P17};
use super::poly::Poly;
pub struct SRS {
pub g1s: Vec<G1Point>,
pub g2_1: G2Point,
pub g2_s: G2Point,
}
impl SRS {
pub fn create(s: F101, n: usize, g1: G1Point, g2: G2Point) -> SRS {
let mut g1s = Vec::new();
let mut s_pow = s;
g1s.push(g1);
for _ in 0..n {
g1s.push(g1 * s_pow);
s_pow = s_pow * s;
}
Self {
g1s,
g2_1: g2,
g2_s: g2 * s,
}
}
// evaluate a polinomial at secret point s using SRS G1s
pub fn eval_at_s(&self, vs: &Poly<17>) -> G1Point {
vs.coeffs()
.iter()
.enumerate()
.fold(G1Point::identity(), |acc, (n, v)| {
acc + self.g1s[n] * v.rebase::<101>()
})
}
}
#[derive(Debug, PartialEq)]
pub struct Proof {
a_s: G1Point,
b_s: G1Point,
c_s: G1Point,
z_s: G1Point,
t_lo_s: G1Point,
t_mid_s: G1Point,
t_hi_s: G1Point,
w_z_s: G1Point,
w_z_omega_s: G1Point,
a_z: Field<17>,
b_z: Field<17>,
c_z: Field<17>,
s_sigma_1_z: Field<17>,
s_sigma_2_z: Field<17>,
r_z: Field<17>,
z_omega_z: Field<17>,
}
pub struct Challange {
pub alpha: F17,
pub beta: F17,
pub gamma: F17,
pub z: F17,
pub v: F17,
}
struct Plonk {
srs: SRS,
omega: F17,
h: Vec<F17>,
h_pows_inv: Matrix<17>,
k1: F17,
k2: F17,
k1_h: Vec<F17>,
k2_h: Vec<F17>,
z_h_x: P17,
}
impl Plonk {
pub fn new(srs: SRS) -> Plonk {
let num_assigments = 4;
// We need as much roots of unity in $\mathbb{F}_{17} as assigments (4) we have
// The nth roots of unity of a field are the field elements x that satisfy $x^n = 1$.
//
let roots_of_unity: Vec<F17> = (0u64..17)
.map(|x| F17::from(x))
.filter(|x| x.pow(num_assigments) == F17::one())
.collect();
// This roots of unity should be able to be generated through a generator
// So find the generator (called omega) that creates these roots of unity (H)
let (omega, h) = (|| {
for g in 0u64..17 {
let pows: Vec<_> = (0..num_assigments).map(|n| F17::from(g).pow(n)).collect();
let mut pows_sorted = pows.clone();
pows_sorted.sort();
if pows_sorted == roots_of_unity {
return (F17::from(g), pows);
}
}
unreachable!();
})();
// We need to label all of the values in our assignment with different field elements.
// To do this, will use the roots of unity H along with two cosets of H.
// We can get these cosets by multiplying each element of H by each of two constants: $k_1$ and $k_2$.
// The constant $k_1$ is chosen so that it is not an element of $H$, and $k_2$ is chosen so that it is
// neither an element of H nor $k_1H$. This ensures we have all the field elements to use as labels.
// $H$ will be used to index $a$ values, $k_1H$ for $b$ values, $k_2H$ for $cC values
let (k1, k2) = (f17(2), f17(3));
assert!(!h.contains(&k1));
assert!(!h.contains(&k2));
let k1_h: Vec<F17> = h.iter().map(|r| *r * k1).collect();
let k2_h: Vec<F17> = h.iter().map(|r| *r * k2).collect();
// In some point we will want to build polinomials that evaluates with specific values
// at the roots of unity, for instance at the $a$ values:
// $f_a(x)$ = polinomial that evaluates
// $f_a(w^0)$=a[0]
// $f_a(w^1)$=a[1]
// ...
// $f_a(w^n)$=a[n]
//
// we create here the h_pows_inv, an interpolation matrix, where the values
// that we want to interpolate are multiplied (as a row matrix) with the
// interpolation matrix to get the interpolation polinomial.
let mut h_pows = Matrix::<17>::zero(h.len(), h.len());
for c in 0..h_pows.cols() {
for r in 0..h_pows.rows() {
h_pows[(r, c)] = h[r].pow(c as u64);
}
}
let h_pows_inv = h_pows.inv();
// The polynomial $Z_H$ is the polynomial that is zero on all the elements of
// our subgroup $H$.
let z_h_x = Poly::z(&h);
Plonk {
srs,
omega,
h,
h_pows_inv,
k1,
k2,
k1_h,
k2_h,
z_h_x,
}
}
fn interpolate(&self, vv: &[Field<17>]) -> P17 {
(&self.h_pows_inv * Matrix::<17>::new(&vv, vv.len(), 1)).into_poly()
}
fn copy_constraints_to_roots(&self, c: &[CopyOf]) -> Vec<F17> {
c.iter()
.map(|c| match c {
CopyOf::A(n) => self.h[n - 1],
CopyOf::B(n) => self.k1_h[n - 1],
CopyOf::C(n) => self.k2_h[n - 1],
})
.collect::<Vec<_>>()
}
pub fn prove(
&self,
constraints: &Constrains,
assigments: &Assigments,
challange: &Challange,
rand: [F17; 9],
) -> Proof {
// check that the constraints satisfies the assigments
assert!(constraints.satisfies(&assigments));
// In a non-interactive protocol, these values are derived from a
// cryptographic hash of a transcript of the Prover's process. These values
// would be unpredictable by the either the Prover or Verifier, but each party
// is be able to compute the same challenge values using the same hash function
// on the transcript. This is known as the Fiat-Shamir transform which can turn
// an interactive protocol non-interactive.
let Challange {
alpha,
beta,
gamma,
z,
v,
} = challange;
let n = constraints.c_a.len() as u64;
// create sigmas, mappings from the copy constraints to roots of unity elements
// ---------------------------------------------------------------------------
let sigma_1 = self.copy_constraints_to_roots(&constraints.c_a);
let sigma_2 = self.copy_constraints_to_roots(&constraints.c_b);
let sigma_3 = self.copy_constraints_to_roots(&constraints.c_c);
// create a set of polinomials, those polinomials evaluates at roots of unity
// for all the components of the plonk circuit
//
// (a,b,c) : assigments / values
// (o,m,l,r,c) : gate constraints
// (sigma1, sigma2, sigma3) : copy constraints
// ---------------------------------------------------------------------------
let f_a_x = self.interpolate(&assigments.a);
let f_b_x = self.interpolate(&assigments.b);
let f_c_x = self.interpolate(&assigments.c);
let q_o_x = self.interpolate(&constraints.q_o);
let q_m_x = self.interpolate(&constraints.q_m);
let q_l_x = self.interpolate(&constraints.q_l);
let q_r_x = self.interpolate(&constraints.q_r);
let q_c_x = self.interpolate(&constraints.q_c);
let s_sigma_1 = self.interpolate(&sigma_1);
let s_sigma_2 = self.interpolate(&sigma_2);
let s_sigma_3 = self.interpolate(&sigma_3);
// round 1 - eval a(x), b(x), c(x) at s
// ---------------------------------------------------------------------------
let (b1, b2, b3, b4, b5, b6) = (rand[0], rand[1], rand[2], rand[3], rand[4], rand[5]);
let a_x = P17::new(vec![b2, b1]) * &self.z_h_x + f_a_x;
let b_x = P17::new(vec![b4, b3]) * &self.z_h_x + f_b_x;
let c_x = P17::new(vec![b6, b5]) * &self.z_h_x + f_c_x;
// ouput of first step
let a_s = self.srs.eval_at_s(&a_x);
let b_s = self.srs.eval_at_s(&b_x);
let c_s = self.srs.eval_at_s(&c_x);
// round 2 - eval acummulator vector polinomial at s
// ---------------------------------------------------------------------------
// check https://vitalik.ca/general/2019/09/22/plonk.html
//
// alpha, beta and gamma are "random" that comes from the H(transcript)
// b7, b8, b9 are random
//
let (b7, b8, b9) = (rand[6], rand[7], rand[8]);
// create the accumulator vector, it has the property that is going to have
// the same value than
//
let mut acc = vec![f17(1)];
for i in 1..n as usize {
let a = assigments.a[i - 1];
let b = assigments.b[i - 1];
let c = assigments.c[i - 1];
// omega_pow is the root of unity
let omega_pow = self.omega.pow((i as u64) - 1);
// combine each wire value with its index position
let dend = (a + beta * omega_pow + gamma)
* (b + beta * self.k1 * omega_pow + gamma)
* (c + beta * self.k2 * omega_pow + gamma);
// combine each wire value with its *permuted* index position
let dsor = (a + beta * s_sigma_1.eval(&omega_pow) + gamma)
* (b + beta * s_sigma_2.eval(&omega_pow) + gamma)
* (c + beta * s_sigma_3.eval(&omega_pow) + gamma);
let v = acc[i - 1] * (dend / dsor).unwrap();
acc.push(v);
}
// the accumulator vector is interpolated into a polynomial acc(x)
// and this is used to create the polynomial z.
let acc_x = self.interpolate(&acc);
let z_x = P17::new(vec![b9, b8, b7]) * &self.z_h_x + acc_x;
// Then z is evaluated at the secret number s using the SRS from the setup.
// output of second step
let z_s = self.srs.eval_at_s(&z_x);
// round 3 - compute the quotient polynomial
// ---------------------------------------------------------------------------
// Next comes the most massive computation of the entire protocol.
// Our goal is to compute the polynomial t, which will be of degree 3n+5 for n gates.
// The polynomial t encodes the majority of the information contained in our circuit
// and assignments all at once.
// L1 refers to a Lagrange basis polynomial over our roots of unity H.
// Specifically L1(1)=1, but takes the value 0 on each of the other roots of unity.
// The coefficients for L1 can be found by interpolating the vector
let lagrange_vector: Vec<_> = std::iter::once(f17(1))
.chain(std::iter::repeat(f17(0)))
.take(self.h.len())
.collect();
let l_1_x = self.interpolate(&lagrange_vector);
// public inputs
let p_i_x = f17(0).as_poly(); // Public input
// helper variables
let a_x_b_x_q_m_x = (&a_x * &b_x) * &q_m_x;
let a_x_q_l_x = &a_x * &q_l_x;
let b_x_q_r_x = &b_x * &q_r_x;
let c_x_q_o_x = &c_x * &q_o_x;
let alpha_a_x_beta_x_gamma = &(&a_x + &Poly::new(vec![*gamma, *beta])) * alpha;
let b_x_beta_k1_x_gamma = &b_x + &Poly::new(vec![*gamma, beta * self.k1]);
let c_x_beta_k2_x_gamma = &c_x + &Poly::new(vec![*gamma, beta * self.k2]);
let z_omega_x = Poly::new(
z_x.coeffs()
.iter()
.enumerate()
.map(|(n, c)| c * &self.omega.pow(n as u64))
.collect::<Vec<_>>(),
);
let alpha_a_x_beta_s_sigma1_x_gamma = (&a_x + &s_sigma_1 * beta + gamma) * alpha;
let b_x_beta_s_sigma2_x_gamma = &b_x + &s_sigma_2 * beta + gamma;
let c_x_beta_s_sigma3_x_gamma = &c_x + &s_sigma_3 * beta + gamma;
let alpha_2_z_x_1_l_1_x = ((&z_x + (-f17(1)).as_poly()) * alpha.pow(2)) * &l_1_x;
// compute t(x)
let t_1_z_h = a_x_b_x_q_m_x + a_x_q_l_x + b_x_q_r_x + c_x_q_o_x + p_i_x + &q_c_x;
let t_2_z_h = alpha_a_x_beta_x_gamma * b_x_beta_k1_x_gamma * c_x_beta_k2_x_gamma * &z_x;
let t_3_z_h = alpha_a_x_beta_s_sigma1_x_gamma
* b_x_beta_s_sigma2_x_gamma
* c_x_beta_s_sigma3_x_gamma
* &z_omega_x;
let t_4_z_h = alpha_2_z_x_1_l_1_x;
let (t_x, rem) = (t_1_z_h + t_2_z_h - t_3_z_h + t_4_z_h) / self.z_h_x.clone();
assert_eq!(rem, Poly::zero());
// It turns out that for n constraints, t will have degree 3n+5, which is too large to use the SRS
// from the setup phase in Part 1. However we can break t into three parts of degree n+1 each.
// Each of these polynomials will use n+2 coefficients from t(x)
let t_hi_x = Poly::new(t_x.coeffs()[2 * 6..3 * 6].to_owned());
let t_mid_x = Poly::new(t_x.coeffs()[1 * 6..2 * 6].to_owned());
let t_lo_x = Poly::new(t_x.coeffs()[0 * 6..1 * 6].to_owned());
// After dividing into three parts, we evaluate each part at s using the SRS.
// output of the third step
let t_hi_s = self.srs.eval_at_s(&t_hi_x);
let t_mid_s = self.srs.eval_at_s(&t_mid_x);
let t_lo_s = self.srs.eval_at_s(&t_lo_x);
// round 4 - compute linearization polynomial
// ---------------------------------------------------------------------------
// we create a polynomial r that is a kind of partner to t, where many of the polynomials that were
// included in t are replaced by field elements that are evaluations of those polynomials at a
// challenge value 𝔷.
let a_z = a_x.eval(&z);
let b_z = b_x.eval(&z);
let c_z = c_x.eval(&z);
let s_sigma_1_z = s_sigma_1.eval(&z);
let s_sigma_2_z = s_sigma_2.eval(&z);
let t_z = t_x.eval(&z);
let z_omega_z = z_omega_x.eval(&z);
let a_z_b_z_q_m_x = &q_m_x * &a_z * &b_z;
let a_z_q_l_x = &q_l_x * &a_z;
let b_z_q_r_x = &q_r_x * &b_z;
let c_z_q_o_x = &q_o_x * &c_z;
let r_1_x = a_z_b_z_q_m_x + a_z_q_l_x + b_z_q_r_x + c_z_q_o_x + q_c_x;
let r_2_x = &z_x
* ((a_z + beta * z + gamma)
* (b_z + beta * self.k1 * z + gamma)
* (c_z + beta * self.k2 * z + gamma)
* alpha);
let r_3_x = &z_x
* ((a_z + beta * s_sigma_1_z + gamma)
* (b_z + beta * s_sigma_2_z + gamma)
* (beta * z_omega_z * s_sigma_3)
* alpha);
let r_4_x = &z_x * l_1_x.eval(&z) * alpha.pow(2);
let r_x = r_1_x + r_2_x + r_3_x + r_4_x;
// compute linearization evaluation
let r_z = r_x.eval(&z);
// round 5
// ---------------------------------------------------------------------------
// we create two large polynomials that combine all the polynomials we've been
// using so far and we output commitments to them.
// compute opening proof polynomial w_z_x
let w_z_x = (t_lo_x + t_mid_x * z.pow(n + 2) + t_hi_x * z.pow(2 * n + 4) - t_z)
+ v * (r_x - r_z)
+ v.pow(2) * (a_x - a_z)
+ v.pow(3) * (b_x - b_z)
+ v.pow(4) * (c_x - c_z)
+ v.pow(5) * (s_sigma_1 - s_sigma_1_z)
+ v.pow(6) * (s_sigma_2 - s_sigma_2_z);
let (w_z_x, rem) = w_z_x / Poly::new(vec![-z, f17(1)]);
assert_eq!(rem, Poly::zero());
// compute opening proof polinomial w_zw_x
let (w_z_omega_x, rem) = (z_x - z_omega_z) / Poly::new(vec![-z * self.omega, f17(1)]);
assert_eq!(rem, Poly::zero());
// compute opening proof polinomials at s
let w_z_s = self.srs.eval_at_s(&w_z_x);
let w_z_omega_s = self.srs.eval_at_s(&w_z_omega_x);
Proof {
a_s,
b_s,
c_s,
z_s,
t_lo_s,
t_mid_s,
t_hi_s,
w_z_s,
w_z_omega_s,
a_z,
b_z,
c_z,
s_sigma_1_z,
s_sigma_2_z,
r_z,
z_omega_z,
}
}
fn verify(
&self,
constraints: &Constrains,
proof: &Proof,
challange: &Challange,
rand: [F17; 1],
) -> bool {
let Proof {
a_s,
b_s,
c_s,
z_s,
t_lo_s,
t_mid_s,
t_hi_s,
w_z_s,
w_z_omega_s,
a_z,
b_z,
c_z,
s_sigma_1_z,
s_sigma_2_z,
r_z,
z_omega_z,
} = proof;
let Challange {
alpha,
beta,
gamma,
z,
v,
} = challange;
// verifier preprocessing
// ---------------------------------------------------------------------------
let sigma_1 = self.copy_constraints_to_roots(&constraints.c_a);
let sigma_2 = self.copy_constraints_to_roots(&constraints.c_b);
let sigma_3 = self.copy_constraints_to_roots(&constraints.c_c);
let q_m_s = self.srs.eval_at_s(&self.interpolate(&constraints.q_m));
let q_l_s = self.srs.eval_at_s(&self.interpolate(&constraints.q_l));
let q_r_s = self.srs.eval_at_s(&self.interpolate(&constraints.q_r));
let q_o_s = self.srs.eval_at_s(&self.interpolate(&constraints.q_o));
let q_c_s = self.srs.eval_at_s(&self.interpolate(&constraints.q_c));
let sigma_1_s = self.srs.eval_at_s(&self.interpolate(&sigma_1));
let sigma_2_s = self.srs.eval_at_s(&self.interpolate(&sigma_2));
let sigma_3_s = self.srs.eval_at_s(&self.interpolate(&sigma_3));
let u = rand[0];
// Step 1. Validate proof points in G1
if !a_s.in_curve()
|| !b_s.in_curve()
|| !c_s.in_curve()
|| !z_s.in_curve()
|| !t_lo_s.in_curve()
|| !t_mid_s.in_curve()
|| !t_hi_s.in_curve()
|| !w_z_s.in_curve()
|| !w_z_omega_s.in_curve()
{
return false;
}
// Step 2. Validate proof fields in F17
if !a_z.in_field()
|| !b_z.in_field()
|| !c_z.in_field()
|| !s_sigma_1_z.in_field()
|| !s_sigma_2_z.in_field()
|| !r_z.in_field()
|| !z_omega_z.in_field()
{
return false;
}
// Step 3. We do not have public inputs, nothing to do
// Step 4. Evaluate z_h at z
let z_h_z = self.z_h_x.eval(&z);
// Step 5. Evaluate lagrange on z
let l_1_z = self.interpolate(&[f17(1), f17(0), f17(0), f17(0)]).eval(&z);
// Step 6. We do not have public inputs, nothing to do
let p_i_z = F17::zero();
// Step 7. Compute quotient polinomial evaluation
let a_z_beta_s_sigma_1_z_gamma = a_z + beta * s_sigma_1_z + gamma;
let b_z_beta_s_sigma_2_z_gamma = b_z + beta * s_sigma_2_z + gamma;
let c_z_gamma = c_z + gamma;
let l_1_z_alpha_2 = l_1_z * alpha.pow(2);
let t_z = ((r_z + p_i_z
- (a_z_beta_s_sigma_1_z_gamma * b_z_beta_s_sigma_2_z_gamma * c_z_gamma * z_omega_z)
- l_1_z_alpha_2)
/ z_h_z)
.unwrap();
// Step 8. Compute the first part of batched polinomial commitment
let d_1_s = q_m_s * (a_z * b_z * v).rebase::<101>()
+ q_l_s * (a_z * v).rebase::<101>()
+ q_r_s * (b_z * v).rebase::<101>()
+ q_o_s * (c_z * v).rebase::<101>()
+ q_c_s * (v).rebase::<101>();
let d_2_s = *z_s
* ((a_z + beta * z + gamma)
* (b_z + beta * self.k1 * z + gamma)
* (c_z + beta * self.k2 * z + gamma)
* alpha
* v
+ l_1_z * alpha.pow(2) * v
+ u)
.rebase::<101>();
let d_3_s = sigma_3_s
* ((a_z + beta * s_sigma_1_z + gamma)
* (b_z + beta * s_sigma_2_z + gamma)
* alpha
* v
* beta
* z_omega_z)
.rebase::<101>();
let d_s = d_1_s + d_2_s + -d_3_s;
// Step 9. Compute full batched polinomial commitment
let n = constraints.c_a.len() as u64;
let f_s = t_lo_s.to_owned()
+ t_mid_s.to_owned() * z.pow(n + 2).rebase::<101>()
+ t_hi_s.to_owned() * z.pow(2 * n + 4).rebase::<101>()
+ d_s
+ a_s.to_owned() * v.pow(2).rebase::<101>()
+ b_s.to_owned() * v.pow(3).rebase::<101>()
+ c_s.to_owned() * v.pow(4).rebase::<101>()
+ sigma_1_s.to_owned() * v.pow(5).rebase::<101>()
+ sigma_2_s.to_owned() * v.pow(6).rebase::<101>();
// Step 10. Compute group encoded batch evaluation [E]
let e_s = self.srs.eval_at_s(&p17(&[1]))
* (t_z
+ v * r_z
+ v.pow(2) * a_z
+ v.pow(3) * b_z
+ v.pow(4) * c_z
+ v.pow(5) * s_sigma_1_z
+ v.pow(6) * s_sigma_2_z
+ u * z_omega_z)
.rebase::<101>();
// Step 11. Batch validate all equations
let e_1_q1 = *w_z_s + *w_z_omega_s * u.rebase::<101>();
let e_1_q2 = self.srs.g2_s;
let e_2_q1 = *w_z_s * z.rebase::<101>()
+ *w_z_omega_s * (u * z * self.omega).rebase::<101>()
+ f_s
+ -e_s;
let e_2_q2 = self.srs.g2_1;
let e_1 = pairing(e_1_q1, e_1_q2);
let e_2 = pairing(e_2_q1, e_2_q2);
e_1 == e_2
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_plonk_gen_proof() {
// create the trusted setup
let s = 2; // the toxic waste
let srs = SRS::create(f101(s), 6, G1Point::generator(), G2Point::generator());
let plonk = Plonk::new(srs);
// constraints and assigments
let constraints = Constrains::new(
&[
Gate::mul_a_b(),
Gate::mul_a_b(),
Gate::mul_a_b(),
Gate::sum_a_b(),
],
(
vec![CopyOf::B(1), CopyOf::B(2), CopyOf::B(3), CopyOf::C(1)],
vec![CopyOf::A(1), CopyOf::A(2), CopyOf::A(3), CopyOf::C(2)],
vec![CopyOf::A(4), CopyOf::B(4), CopyOf::C(4), CopyOf::C(3)],
),
);
let assigments = Assigments::new(&[
Assigment::new(Field::from(3), Field::from(3), Field::from(9)),
Assigment::new(Field::from(4), Field::from(4), Field::from(16)),
Assigment::new(Field::from(5), Field::from(5), Field::from(25)),
Assigment::new(Field::from(9), Field::from(16), Field::from(25)),
]);
// random numbers (the b's)
let rand = [
f17(7),
f17(4),
f17(11),
f17(12),
f17(16),
f17(2),
f17(14),
f17(11),
f17(7),
];
// values that are sent from the verifier to the prover
let challange = Challange {
alpha: f17(15),
beta: f17(12),
gamma: f17(13),
z: f17(5),
v: f17(12),
};
let proof = plonk.prove(&constraints, &assigments, &challange, rand);
let expected = Proof {
a_s: g1f(91, 66),
b_s: g1f(26, 45),
c_s: g1f(91, 35),
z_s: g1f(32, 59),
t_lo_s: g1f(12, 32),
t_mid_s: g1f(26, 45),
t_hi_s: g1f(91, 66),
w_z_s: g1f(91, 35),
w_z_omega_s: g1f(65, 98),
a_z: f17(15),
b_z: f17(13),
c_z: f17(5),
s_sigma_1_z: f17(1),
s_sigma_2_z: f17(12),
r_z: f17(15),
z_omega_z: f17(15),
};
assert_eq!(proof, expected);
let rand = [f17(4)];
assert!(plonk.verify(&constraints, &proof, &challange, rand));
}
}

3
src/utils/mod.rs Normal file
View File

@@ -0,0 +1,3 @@
mod u64field;
pub use u64field::U64Field;

248
src/utils/u64field.rs Normal file
View File

@@ -0,0 +1,248 @@
#![allow(clippy::many_single_char_names)]
use crate::{ec::Field, poly::Poly};
use std::{
fmt::Display,
ops::{Add, AddAssign, Div, Mul, MulAssign, Neg, Sub, SubAssign},
};
fn extended_gcd(a: i64, b: i64) -> (i64, i64, i64) {
let (mut s, mut old_s) = (0, 1);
let (mut t, mut old_t) = (1, 0);
let (mut r, mut old_r) = (b, a);
while r != 0 {
let quotient = old_r / r;
old_r -= quotient * r;
std::mem::swap(&mut old_r, &mut r);
old_s -= quotient * s;
std::mem::swap(&mut old_s, &mut s);
old_t -= quotient * t;
std::mem::swap(&mut old_t, &mut t);
}
(old_r, old_s, old_t)
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, PartialOrd, Ord)]
pub struct U64Field<const M: u64>(u64);
#[allow(non_snake_case)]
impl<const M: u64> Field for U64Field<M> {
type Order = u64;
fn order() -> Self::Order {
M
}
fn zero() -> Self {
Self::from(0u64)
}
fn is_zero(&self) -> bool {
self.0 == 0
}
fn one() -> Self {
Self::from(1u64)
}
fn as_u64(&self) -> u64 {
self.0
}
fn in_field(&self) -> bool {
self.0 < M
}
fn inv(&self) -> Option<Self> {
let (gcd, c, _) = extended_gcd(self.0 as i64, M as i64);
if gcd == 1 {
if c < 0 {
Some(Self((M as i64 + c) as u64))
} else {
Some(Self(c as u64))
}
} else {
None
}
}
fn pow(&self, mut exp: u64) -> Self {
let mut result = Self::one();
let mut base = *self;
while exp > 0 {
if exp % 2 == 1 {
result = result * base;
}
exp >>= 1;
base = base * base;
}
result
}
}
impl<const M: u64> From<i64> for U64Field<M> {
fn from(n: i64) -> Self {
if n < 0 {
-Self::from(-n as u64)
} else {
Self::from(n as u64)
}
}
}
impl<const M: u64> From<u64> for U64Field<M> {
fn from(n: u64) -> Self {
Self(n % M)
}
}
impl<const M: u64> Display for U64Field<M> {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(f, "{}", self.0)
}
}
impl<const M: u64> Add for U64Field<M> {
type Output = U64Field<M>;
fn add(self, rhs: Self) -> Self::Output {
U64Field((self.0 + rhs.0) % M)
}
}
impl<const M: u64> Add<&U64Field<M>> for U64Field<M> {
type Output = U64Field<M>;
fn add(self, rhs: &U64Field<M>) -> Self::Output {
U64Field((self.0 + rhs.0) % M)
}
}
impl<const M: u64> Add<U64Field<M>> for &U64Field<M> {
type Output = U64Field<M>;
fn add(self, rhs: U64Field<M>) -> Self::Output {
U64Field((self.0 + rhs.0) % M)
}
}
impl<const M: u64> Add<&U64Field<M>> for &U64Field<M> {
type Output = U64Field<M>;
fn add(self, rhs: &U64Field<M>) -> Self::Output {
U64Field((self.0 + rhs.0) % M)
}
}
impl<const M: u64> AddAssign<&U64Field<M>> for U64Field<M> {
fn add_assign(&mut self, rhs: &U64Field<M>) {
self.0 = (self.0 + rhs.0) % M;
}
}
impl<const M: u64> AddAssign<U64Field<M>> for U64Field<M> {
fn add_assign(&mut self, rhs: U64Field<M>) {
self.0 = (self.0 + rhs.0) % M;
}
}
impl<const M: u64> Sub for U64Field<M> {
type Output = U64Field<M>;
fn sub(self, rhs: Self) -> Self::Output {
self + -rhs
}
}
impl<const M: u64> SubAssign<&U64Field<M>> for U64Field<M> {
fn sub_assign(&mut self, rhs: &U64Field<M>) {
*self += -rhs;
}
}
impl<const M: u64> Neg for U64Field<M> {
type Output = U64Field<M>;
fn neg(self) -> Self::Output {
U64Field((M - self.0) % M)
}
}
impl<const M: u64> Neg for &U64Field<M> {
type Output = U64Field<M>;
fn neg(self) -> Self::Output {
U64Field((M - self.0) % M)
}
}
impl<const M: u64> Mul for U64Field<M> {
type Output = U64Field<M>;
fn mul(self, rhs: Self) -> Self::Output {
U64Field((self.0 * rhs.0) % M)
}
}
impl<const M: u64> Mul<&U64Field<M>> for &U64Field<M> {
type Output = U64Field<M>;
fn mul(self, rhs: &U64Field<M>) -> Self::Output {
U64Field((self.0 * rhs.0) % M)
}
}
impl<const M: u64> Mul<&U64Field<M>> for U64Field<M> {
type Output = U64Field<M>;
fn mul(self, rhs: &U64Field<M>) -> Self::Output {
U64Field((self.0 * rhs.0) % M)
}
}
impl<const M: u64> Mul<U64Field<M>> for &U64Field<M> {
type Output = U64Field<M>;
fn mul(self, rhs: U64Field<M>) -> Self::Output {
U64Field((self.0 * rhs.0) % M)
}
}
impl<const M: u64> Mul<Poly<U64Field<M>>> for &U64Field<M> {
type Output = Poly<U64Field<M>>;
fn mul(self, rhs: Poly<U64Field<M>>) -> Self::Output {
rhs * self
}
}
impl<const M: u64> Mul<Poly<U64Field<M>>> for U64Field<M> {
type Output = Poly<U64Field<M>>;
fn mul(self, rhs: Poly<U64Field<M>>) -> Self::Output {
rhs * self
}
}
impl<const M: u64> MulAssign<&U64Field<M>> for U64Field<M> {
fn mul_assign(&mut self, rhs: &U64Field<M>) {
self.0 = (self.0 * rhs.0) % M;
}
}
impl<const M: u64> Div for U64Field<M> {
type Output = Option<U64Field<M>>;
fn div(self, rhs: Self) -> Self::Output {
rhs.inv().map(|v| v * self)
}
}
#[cfg(test)]
mod tests {
use super::*;
fn f101(n: u64) -> U64Field<101> {
U64Field::from(n)
}
#[test]
fn test_base() {
assert_eq!(f101(200), f101(100) + f101(100));
assert_eq!(f101(100), f101(0) - f101(1));
assert_eq!(f101(100), f101(0) - f101(1));
assert_eq!(None, f101(1) / f101(0));
assert_eq!(f101(4), f101(12) * (f101(4) / f101(12)).unwrap());
}
#[test]
fn test_vectors() {
assert_eq!(f101(100), -f101(1));
assert_eq!(f101(50), -(f101(1) / f101(2)).unwrap());
assert_eq!(f101(20), -(f101(1) / f101(5)).unwrap());
assert_eq!(f101(1), f101(100).pow(0));
assert_eq!(f101(100) * f101(100), f101(100).pow(2));
assert_eq!(f101(100) * f101(100) * f101(100), f101(100).pow(3));
}
}