Moved everything over to f_inner

This commit is contained in:
vub
2022-09-29 12:22:27 +02:00
parent c2c59d20cb
commit e424d2c86b
5 changed files with 223 additions and 309 deletions

View File

@@ -1,138 +1,13 @@
# A verification key generator for a simple zk language, reverse-engineered
# to match https://zkrepl.dev/ output
import py_ecc.bn128 as b
from py_ecc.fields.field_elements import FQ as Field
from fft import fft
from Crypto.Hash import keccak
from functools import cache
f = b.FQ
f2 = b.FQ2
class f_inner(Field):
field_modulus = b.curve_order
primitive_root = 5
@cache
def get_root_of_unity(group_order):
return pow(primitive_root, (b.curve_order - 1) // group_order, b.curve_order)
@cache
def get_roots_of_unity(group_order):
o = [1, get_root_of_unity(group_order)]
while len(o) < group_order:
o.append(o[-1] * o[1] % b.curve_order)
return o
def keccak256(x):
return keccak.new(digest_bits=256).update(x).digest()
def serialize_point(pt):
return pt[0].n.to_bytes(32, 'big') + pt[1].n.to_bytes(32, 'big')
def binhash_to_f_inner(h):
return f_inner(int.from_bytes(h, 'big'))
def ec_lincomb(pairs):
o = b.Z1
for pt, coeff in pairs:
if hasattr(coeff, 'n'):
coeff = coeff.n
o = b.add(o, b.multiply(pt, coeff % b.curve_order))
return o
SETUP_FILE_G1_STARTPOS = 80
SETUP_FILE_POWERS_POS = 60
class Setup(object):
def __init__(self, G1_side, X2):
self.G1_side = G1_side
self.X2 = X2
@classmethod
def from_file(cls, filename):
contents = open(filename, 'rb').read()
# Byte 60 gives you the base-2 log of how many powers there are
powers = 2**contents[SETUP_FILE_POWERS_POS]
# Extract G1 points, which start at byte 80
values = [
int.from_bytes(contents[i: i+32], 'little')
for i in range(SETUP_FILE_G1_STARTPOS,
SETUP_FILE_G1_STARTPOS + 32 * powers * 2, 32)
]
assert max(values) < b.field_modulus
# The points are encoded in a weird encoding, where all x and y points
# are multiplied by a factor (for montgomery optimization?). We can extract
# the factor because we know that the first point is the generator.
factor = f(values[0]) / b.G1[0]
values = [f(x) / factor for x in values]
G1_side = [(values[i*2], values[i*2+1]) for i in range(powers)]
print("Extracted G1 side, X^1 point: {}".format(G1_side[1]))
# Search for start of G2 points. We again know that the first point is
# the generator.
pos = SETUP_FILE_G1_STARTPOS + 32 * powers * 2
target = (factor * b.G2[0].coeffs[0]).n
while pos < len(contents):
v = int.from_bytes(contents[pos: pos+32], 'little')
if v == target:
break
pos += 1
print("Detected start of G2 side at byte {}".format(pos))
X2_encoding = contents[pos + 32 * 4: pos + 32 * 8]
X2_values = [
f(int.from_bytes(X2_encoding[i: i + 32], 'little')) / factor
for i in range(0, 128, 32)
]
X2 = (f2(X2_values[:2]), f2(X2_values[2:]))
assert b.is_on_curve(X2, b.b2)
print("Extracted G2 side, X^1 point: {}".format(X2))
# assert b.pairing(b.G2, G1_side[1]) == b.pairing(X2, b.G1)
# print("X^1 points checked consistent")
return cls(G1_side, X2)
# Encodes the KZG commitment to the given polynomial coeffs
def powers_to_point(setup, powers):
if len(powers) > len(setup.G1_side):
raise Exception("Not enough powers in setup")
o = b.Z1
for x, y in zip(powers, setup.G1_side):
if hasattr(x, 'n'):
x = x.n
o = b.add(o, b.multiply(y, x % b.curve_order))
return o
# Encodes the KZG commitment that evaluates to the given values in the group
def evaluations_to_point(setup, group_order, vals):
if hasattr(vals[0], 'n'):
vals = [x.n for x in vals]
powers = fft(vals, b.curve_order, get_root_of_unity(group_order), inv=True)
return powers_to_point(setup, powers)
# Extracts a point from JSON in circom format
def interpret_json_point(p):
if len(p) == 3 and isinstance(p[0], str) and p[2] == "1":
return (f(int(p[0])), f(int(p[1])))
elif len(p) == 3 and p == ["0", "1", "0"]:
return b.Z1
elif len(p) == 3 and isinstance(p[0], list) and p[2] == ["1", "0"]:
return (
f2([int(p[0][0]), int(p[0][1])]),
f2([int(p[1][0]), int(p[1][1])]),
)
elif len(p) == 3 and p == [["0", "0"], ["1", "0"], ["0", "0"]]:
return b.Z2
raise Exception("cannot interpret that point: {}".format(p))
from utils import *
# Creates the inner-field representation of a given (section, index)
# Expects section = 1 for left, 2 for right, 3 for output
def S_position_to_field(group_order, index, section):
def S_position_to_f_inner(group_order, index, section):
assert section in (1, 2, 3) and index < group_order
return (
get_roots_of_unity(group_order)[index] * section
) % b.curve_order
return get_roots_of_unity(group_order)[index] * section
# Expects input in the form: [['a', 'b', 'c'], ...]
def make_s_polynomials(group_order, wires):
@@ -165,7 +40,7 @@ def make_s_polynomials(group_order, wires):
uses = sorted(uses)
for i in range(len(uses)):
next_i = (i+1) % len(uses)
S[uses[next_i][1]][uses[next_i][0]] = S_position_to_field(
S[uses[next_i][1]][uses[next_i][0]] = S_position_to_f_inner(
group_order, uses[i][0], uses[i][1]
)
return (S[1], S[2], S[3])
@@ -281,18 +156,19 @@ def eq_to_coeffs(eq):
# Right: coeffs, {'': constant term, in_L: L term, in_R: R term,
# in_L*in_R: product term, '$flip_output': flip output to neg?}
def make_gate_polynomials(group_order, eqs):
L = [0] * group_order
R = [0] * group_order
M = [0] * group_order
O = [0] * group_order
C = [0] * group_order
L = [f_inner(0) for _ in range(group_order)]
R = [f_inner(0) for _ in range(group_order)]
M = [f_inner(0) for _ in range(group_order)]
O = [f_inner(0) for _ in range(group_order)]
C = [f_inner(0) for _ in range(group_order)]
for i, (variables, coeffs) in enumerate(eqs):
L[i] = -coeffs.get(variables[0], 0)
R[i] = -coeffs.get(variables[1], 0)
C[i] = -coeffs.get('', 0)
O[i] = (-1 if '$flip_output' in coeffs else 1)
L[i] = f_inner(-coeffs.get(variables[0], 0))
R[i] = f_inner(-coeffs.get(variables[1], 0))
C[i] = f_inner(-coeffs.get('', 0))
O[i] = f_inner(-1 if '$flip_output' in coeffs else 1)
if None not in variables:
M[i] = -coeffs.get(min(variables[:2])+'*'+max(variables[:2]), 0)
merged_key = min(variables[:2])+'*'+max(variables[:2])
M[i] = f_inner(-coeffs.get(merged_key, 0))
return L, R, M, O, C
# Generate the verification key with the given setup, group order and equations

View File

@@ -1,108 +0,0 @@
def _simple_ft(vals, modulus, roots_of_unity):
L = len(roots_of_unity)
o = []
for i in range(L):
last = 0
for j in range(L):
last += vals[j] * roots_of_unity[(i*j)%L]
o.append(last % modulus)
return o
def _fft(vals, modulus, roots_of_unity):
if len(vals) <= 4:
#return vals
return _simple_ft(vals, modulus, roots_of_unity)
L = _fft(vals[::2], modulus, roots_of_unity[::2])
R = _fft(vals[1::2], modulus, roots_of_unity[::2])
o = [0 for i in vals]
for i, (x, y) in enumerate(zip(L, R)):
y_times_root = y*roots_of_unity[i]
o[i] = (x+y_times_root) % modulus
o[i+len(L)] = (x-y_times_root) % modulus
return o
def expand_root_of_unity(root_of_unity, modulus):
# Build up roots of unity
rootz = [1, root_of_unity]
while rootz[-1] != 1:
rootz.append((rootz[-1] * root_of_unity) % modulus)
return rootz
def fft(vals, modulus, root_of_unity, inv=False):
rootz = expand_root_of_unity(root_of_unity, modulus)
# Fill in vals with zeroes if needed
if len(rootz) > len(vals) + 1:
vals = vals + [0] * (len(rootz) - len(vals) - 1)
if inv:
# Inverse FFT
invlen = pow(len(vals), modulus-2, modulus)
return [(x*invlen) % modulus for x in
_fft(vals, modulus, rootz[:0:-1])]
else:
# Regular FFT
return _fft(vals, modulus, rootz[:-1])
# Evaluates f(x) for f in evaluation form
def inv_fft_at_point(vals, modulus, root_of_unity, x):
if len(vals) == 1:
return vals[0]
# 1/2 in the field
half = (modulus + 1)//2
# 1/w
inv_root = pow(root_of_unity, len(vals)-1, modulus)
# f(-x) in evaluation form
f_of_minus_x_vals = vals[len(vals)//2:] + vals[:len(vals)//2]
# e(x) = (f(x) + f(-x)) / 2 in evaluation form
evens = [(f+g) * half % modulus for f,g in zip(vals, f_of_minus_x_vals)]
# o(x) = (f(x) - f(-x)) / 2 in evaluation form
odds = [(f-g) * half % modulus for f,g in zip(vals, f_of_minus_x_vals)]
# e(x^2) + coordinate * x * o(x^2) in evaluation form
comb = [(o * x * inv_root**i + e) % modulus for i, (o, e) in enumerate(zip(odds, evens))]
return inv_fft_at_point(comb[:len(comb)//2], modulus, root_of_unity ** 2 % modulus, x**2 % modulus)
def shift_domain(vals, modulus, root_of_unity, factor):
if len(vals) == 1:
return vals
# 1/2 in the field
half = (modulus + 1)//2
# 1/w
inv_factor = pow(factor, modulus - 2, modulus)
half_length = len(vals)//2
# f(-x) in evaluation form
f_of_minus_x_vals = vals[half_length:] + vals[:half_length]
# e(x) = (f(x) + f(-x)) / 2 in evaluation form
evens = [(f+g) * half % modulus for f,g in zip(vals, f_of_minus_x_vals)]
print('e', evens)
# o(x) = (f(x) - f(-x)) / 2 in evaluation form
odds = [(f-g) * half % modulus for f,g in zip(vals, f_of_minus_x_vals)]
print('o', odds)
shifted_evens = shift_domain(evens[:half_length], modulus, root_of_unity ** 2 % modulus, factor ** 2 % modulus)
print('se', shifted_evens)
shifted_odds = shift_domain(odds[:half_length], modulus, root_of_unity ** 2 % modulus, factor ** 2 % modulus)
print('so', shifted_odds)
return (
[(e + inv_factor * o) % modulus for e, o in zip(shifted_evens, shifted_odds)] +
[(e - inv_factor * o) % modulus for e, o in zip(shifted_evens, shifted_odds)]
)
def shift_poly(poly, modulus, factor):
factor_power = 1
inv_factor = pow(factor, modulus - 2, modulus)
o = []
for p in poly:
o.append(p * factor_power % modulus)
factor_power = factor_power * inv_factor % modulus
return o
def mul_polys(a, b, modulus, root_of_unity):
rootz = [1, root_of_unity]
while rootz[-1] != 1:
rootz.append((rootz[-1] * root_of_unity) % modulus)
if len(rootz) > len(a) + 1:
a = a + [0] * (len(rootz) - len(a) - 1)
if len(rootz) > len(b) + 1:
b = b + [0] * (len(rootz) - len(b) - 1)
x1 = _fft(a, modulus, rootz[:-1])
x2 = _fft(b, modulus, rootz[:-1])
return _fft([(v1*v2)%modulus for v1,v2 in zip(x1,x2)],
modulus, rootz[:0:-1])

View File

@@ -7,13 +7,13 @@ def prove_from_witness(setup, group_order, eqs, var_assignments):
var_assignments[None] = 0
variables = [v for (v, c) in eqs]
# Compute wire assignments
A = [0] * group_order
B = [0] * group_order
C = [0] * group_order
A = [f_inner(0) for _ in range(group_order)]
B = [f_inner(0) for _ in range(group_order)]
C = [f_inner(0) for _ in range(group_order)]
for i, (in_L, in_R, out) in enumerate(variables):
A[i] = var_assignments[in_L]
B[i] = var_assignments[in_R]
C[i] = var_assignments[out]
A[i] = f_inner(var_assignments[in_L])
B[i] = f_inner(var_assignments[in_R])
C[i] = f_inner(var_assignments[out])
A_pt = evaluations_to_point(setup, group_order, A)
B_pt = evaluations_to_point(setup, group_order, B)
C_pt = evaluations_to_point(setup, group_order, C)
@@ -38,7 +38,7 @@ def prove_from_witness(setup, group_order, eqs, var_assignments):
(B[i] + beta * S2[i] + gamma) /
(C[i] + beta * S3[i] + gamma)
)
assert Z.pop().n == 1
assert Z.pop() == 1
Z_pt = evaluations_to_point(setup, group_order, Z)
alpha = binhash_to_f_inner(keccak256(serialize_point(Z_pt)))
print("Permutation accumulator polynomial successfully generated")
@@ -53,27 +53,17 @@ def prove_from_witness(setup, group_order, eqs, var_assignments):
# divide polys without the 0/0 issue
fft_offset = binhash_to_f_inner(keccak256(keccak256(serialize_point(Z_pt))))
def fft_expand(values):
if hasattr(values[0], 'n'):
values = [x.n for x in values]
x_powers = fft(values, b.curve_order, roots_of_unity[1], inv=True)
def fft_expand(vals):
x_powers = f_inner_fft(vals, inv=True)
x_powers = [
(fft_offset**i * x).n for i, x in enumerate(x_powers)
] + [0] * (group_order * 3)
return [
f_inner(x) for x in fft(x_powers, b.curve_order, quarter_roots[1])
]
(fft_offset**i * x) for i, x in enumerate(x_powers)
] + [f_inner(0)] * (group_order * 3)
return f_inner_fft(x_powers)
def expanded_evaluations_to_coeffs(evals):
shifted_coeffs = fft(
[i.n for i in evals], b.curve_order, quarter_roots[1], inv=True
)
inv_offset = (1 / fft_offset).n
return [
(pow(inv_offset, i, b.curve_order) * v) % b.curve_order for
(i, v) in enumerate(shifted_coeffs)
]
shifted_coeffs = f_inner_fft(evals, inv=True)
inv_offset = (1 / fft_offset)
return [v * inv_offset ** i for (i, v) in enumerate(shifted_coeffs)]
A_big = fft_expand(A)
B_big = fft_expand(B)
@@ -90,7 +80,10 @@ def prove_from_witness(setup, group_order, eqs, var_assignments):
(fft_expand(x) for x in (QL, QR, QM, QO, QC))
for i in range(group_order):
assert (A[i] * QL[i] + B[i] * QR[i] + A[i] * B[i] * QM[i] + C[i] * QO[i] + QC[i]) % b.curve_order == 0
assert (
A[i] * QL[i] + B[i] * QR[i] + A[i] * B[i] * QM[i] +
C[i] * QO[i] + QC[i] == 0
)
QUOT_part_1_big = [(
A_big[i] * QL_big[i] +
@@ -141,7 +134,7 @@ def prove_from_witness(setup, group_order, eqs, var_assignments):
)
print("Generated part 2 of the quotient polynomial")
L1_big = fft_expand([1] + [0] * (group_order - 1))
L1_big = fft_expand([f_inner(1)] + [f_inner(0)] * (group_order - 1))
QUOT_part_3_big = [(
(Z_big[i] - 1) * L1_big[i] * alpha**2
@@ -158,11 +151,9 @@ def prove_from_witness(setup, group_order, eqs, var_assignments):
for i in range(4 * group_order)
])
(T1, T2, T3) = (
fft(all_coeffs[group_order*i : group_order*(i+1)],
b.curve_order, roots_of_unity[1])
for i in range(3)
)
T1 = f_inner_fft(all_coeffs[:group_order])
T2 = f_inner_fft(all_coeffs[group_order: group_order*2])
T3 = f_inner_fft(all_coeffs[group_order*2: group_order*3])
T1_pt = evaluations_to_point(setup, group_order, T1)
T2_pt = evaluations_to_point(setup, group_order, T2)
@@ -231,7 +222,7 @@ def prove_from_witness(setup, group_order, eqs, var_assignments):
R_coeffs = expanded_evaluations_to_coeffs(R_big)
assert R_coeffs[group_order:] == [0] * (group_order * 3)
R = fft(R_coeffs[:group_order], b.curve_order, roots_of_unity[1])
R = f_inner_fft(R_coeffs[:group_order])
print('R_pt', evaluations_to_point(setup, group_order, R))
@@ -240,7 +231,7 @@ def prove_from_witness(setup, group_order, eqs, var_assignments):
print("Generated linearization polynomial R")
buf3 = b''.join([
x.n.to_bytes(32, 'big') for x in
serialize_int(x) for x in
(A_ev, B_ev, C_ev, S1_ev, S2_ev, Z_shifted_ev)
])
v = binhash_to_f_inner(keccak256(buf3))
@@ -256,7 +247,7 @@ def prove_from_witness(setup, group_order, eqs, var_assignments):
W_z_coeffs = expanded_evaluations_to_coeffs(W_z_big)
assert W_z_coeffs[group_order:] == [0] * (group_order * 3)
W_z = fft(W_z_coeffs[:group_order], b.curve_order, roots_of_unity[1])
W_z = f_inner_fft(W_z_coeffs[:group_order])
W_z_pt = evaluations_to_point(setup, group_order, W_z)
W_zw_big = [
@@ -266,7 +257,7 @@ def prove_from_witness(setup, group_order, eqs, var_assignments):
W_zw_coeffs = expanded_evaluations_to_coeffs(W_zw_big)
assert W_zw_coeffs[group_order:] == [0] * (group_order * 3)
W_zw = fft(W_zw_coeffs[:group_order], b.curve_order, roots_of_unity[1])
W_zw = f_inner_fft(W_zw_coeffs[:group_order])
W_zw_pt = evaluations_to_point(setup, group_order, W_zw)
print("Generated final quotient witness polynomials")

View File

@@ -0,0 +1,150 @@
import py_ecc.bn128 as b
from py_ecc.fields.field_elements import FQ as Field
from functools import cache
from Crypto.Hash import keccak
import py_ecc.bn128 as b
from py_ecc.fields.field_elements import FQ as Field
f = b.FQ
f2 = b.FQ2
class f_inner(Field):
field_modulus = b.curve_order
primitive_root = 5
@cache
def get_root_of_unity(group_order):
return f_inner(5) ** ((b.curve_order - 1) // group_order)
@cache
def get_roots_of_unity(group_order):
o = [f_inner(1), get_root_of_unity(group_order)]
while len(o) < group_order:
o.append(o[-1] * o[1])
return o
def keccak256(x):
return keccak.new(digest_bits=256).update(x).digest()
def serialize_int(x):
return x.n.to_bytes(32, 'big')
def serialize_point(pt):
return pt[0].n.to_bytes(32, 'big') + pt[1].n.to_bytes(32, 'big')
def binhash_to_f_inner(h):
return f_inner(int.from_bytes(h, 'big'))
def ec_mul(pt, coeff):
if hasattr(coeff, 'n'):
coeff = coeff.n
return b.multiply(pt, coeff % b.curve_order)
def ec_lincomb(pairs):
o = b.Z1
for pt, coeff in pairs:
o = b.add(o, ec_mul(pt, coeff))
return o
# Encodes the KZG commitment to the given polynomial coeffs
def powers_to_point(setup, powers):
if len(powers) > len(setup.G1_side):
raise Exception("Not enough powers in setup")
o = b.Z1
for x, y in zip(powers, setup.G1_side):
o = b.add(o, b.multiply(y, x.n))
return o
# Encodes the KZG commitment that evaluates to the given values in the group
def evaluations_to_point(setup, group_order, evals):
return powers_to_point(setup, f_inner_fft(evals, inv=True))
SETUP_FILE_G1_STARTPOS = 80
SETUP_FILE_POWERS_POS = 60
class Setup(object):
def __init__(self, G1_side, X2):
self.G1_side = G1_side
self.X2 = X2
@classmethod
def from_file(cls, filename):
contents = open(filename, 'rb').read()
# Byte 60 gives you the base-2 log of how many powers there are
powers = 2**contents[SETUP_FILE_POWERS_POS]
# Extract G1 points, which start at byte 80
values = [
int.from_bytes(contents[i: i+32], 'little')
for i in range(SETUP_FILE_G1_STARTPOS,
SETUP_FILE_G1_STARTPOS + 32 * powers * 2, 32)
]
assert max(values) < b.field_modulus
# The points are encoded in a weird encoding, where all x and y points
# are multiplied by a factor (for montgomery optimization?). We can extract
# the factor because we know that the first point is the generator.
factor = f(values[0]) / b.G1[0]
values = [f(x) / factor for x in values]
G1_side = [(values[i*2], values[i*2+1]) for i in range(powers)]
print("Extracted G1 side, X^1 point: {}".format(G1_side[1]))
# Search for start of G2 points. We again know that the first point is
# the generator.
pos = SETUP_FILE_G1_STARTPOS + 32 * powers * 2
target = (factor * b.G2[0].coeffs[0]).n
while pos < len(contents):
v = int.from_bytes(contents[pos: pos+32], 'little')
if v == target:
break
pos += 1
print("Detected start of G2 side at byte {}".format(pos))
X2_encoding = contents[pos + 32 * 4: pos + 32 * 8]
X2_values = [
f(int.from_bytes(X2_encoding[i: i + 32], 'little')) / factor
for i in range(0, 128, 32)
]
X2 = (f2(X2_values[:2]), f2(X2_values[2:]))
assert b.is_on_curve(X2, b.b2)
print("Extracted G2 side, X^1 point: {}".format(X2))
# assert b.pairing(b.G2, G1_side[1]) == b.pairing(X2, b.G1)
# print("X^1 points checked consistent")
return cls(G1_side, X2)
# Extracts a point from JSON in circom format
def interpret_json_point(p):
if len(p) == 3 and isinstance(p[0], str) and p[2] == "1":
return (f(int(p[0])), f(int(p[1])))
elif len(p) == 3 and p == ["0", "1", "0"]:
return b.Z1
elif len(p) == 3 and isinstance(p[0], list) and p[2] == ["1", "0"]:
return (
f2([int(p[0][0]), int(p[0][1])]),
f2([int(p[1][0]), int(p[1][1])]),
)
elif len(p) == 3 and p == [["0", "0"], ["1", "0"], ["0", "0"]]:
return b.Z2
raise Exception("cannot interpret that point: {}".format(p))
def _fft(vals, modulus, roots_of_unity):
if len(vals) == 1:
return vals
L = _fft(vals[::2], modulus, roots_of_unity[::2])
R = _fft(vals[1::2], modulus, roots_of_unity[::2])
o = [0 for i in vals]
for i, (x, y) in enumerate(zip(L, R)):
y_times_root = y*roots_of_unity[i]
o[i] = (x+y_times_root) % modulus
o[i+len(L)] = (x-y_times_root) % modulus
return o
def f_inner_fft(vals, inv=False):
roots = [x.n for x in get_roots_of_unity(len(vals))]
o, nvals = b.curve_order, [x.n for x in vals]
if inv:
# Inverse FFT
invlen = f_inner(1) / len(vals)
reversed_roots = [roots[0]] + roots[1:][::-1]
return [f_inner(x) * invlen for x in _fft(nvals, o, reversed_roots)]
else:
# Regular FFT
return [f_inner(x) for x in _fft(nvals, o, roots)]

View File

@@ -25,7 +25,7 @@ def verify_proof(setup, group_order, vk, proof):
zed = binhash_to_f_inner(keccak256(buf))
buf3 = b''.join([
x.n.to_bytes(32, 'big') for x in
serialize_int(x) for x in
(A_ev, B_ev, C_ev, S1_ev, S2_ev, Z_shifted_ev)
])
v = binhash_to_f_inner(keccak256(buf3))
@@ -48,26 +48,29 @@ def verify_proof(setup, group_order, vk, proof):
)
R_pt = ec_lincomb([
(Qm_pt, (A_ev * B_ev).n),
(Ql_pt, A_ev.n),
(Qr_pt, B_ev.n),
(Qo_pt, C_ev.n),
(Qm_pt, A_ev * B_ev),
(Ql_pt, A_ev),
(Qr_pt, B_ev),
(Qo_pt, C_ev),
(Qc_pt, 1),
(Z_pt, ((
(Z_pt, (
(A_ev + beta * zed + gamma) *
(B_ev + beta * 2 * zed + gamma) *
(C_ev + beta * 3 * zed + gamma)
) * alpha).n),
(S3_pt, ((
(C_ev + beta * 3 * zed + gamma) *
alpha
)),
(S3_pt, (
-(A_ev + beta * S1_ev + gamma) *
(B_ev + beta * S2_ev + gamma) *
beta
) * alpha * Z_shifted_ev).n),
(b.G1, ((
beta *
alpha * Z_shifted_ev
)),
(b.G1, (
-(A_ev + beta * S1_ev + gamma) *
(B_ev + beta * S2_ev + gamma) *
(C_ev + gamma)
) * alpha * Z_shifted_ev).n),
(C_ev + gamma) *
alpha * Z_shifted_ev
)),
(Z_pt, L1_ev * alpha ** 2),
(b.G1, -L1_ev * alpha ** 2),
(T1_pt, -ZH_ev),
@@ -89,21 +92,23 @@ def verify_proof(setup, group_order, vk, proof):
)
D_pt = ec_lincomb([
(Qm_pt, (A_ev * B_ev).n),
(Ql_pt, A_ev.n),
(Qr_pt, B_ev.n),
(Qo_pt, C_ev.n),
(Qm_pt, A_ev * B_ev),
(Ql_pt, A_ev),
(Qr_pt, B_ev),
(Qo_pt, C_ev),
(Qc_pt, 1),
(Z_pt, ((
(Z_pt, (
(A_ev + beta * zed + gamma) *
(B_ev + beta * 2 * zed + gamma) *
(C_ev + beta * 3 * zed + gamma)
) * alpha + L1_ev * alpha**2 + u).n),
(C_ev + beta * 3 * zed + gamma) * alpha +
L1_ev * alpha ** 2 +
u
)),
(S3_pt, (
-(A_ev + beta * S1_ev + gamma) *
(B_ev + beta * S2_ev + gamma) *
alpha * beta * Z_shifted_ev
).n),
)),
(T1_pt, -ZH_ev),
(T2_pt, -ZH_ev * zed**group_order),
(T3_pt, -ZH_ev * zed**(group_order*2)),
@@ -119,10 +124,10 @@ def verify_proof(setup, group_order, vk, proof):
(S1_pt, v**4),
(S2_pt, v**5),
])
E_pt = b.multiply(b.G1, (
E_pt = ec_mul(b.G1, (
-r0 + v * A_ev + v**2 * B_ev + v**3 * C_ev +
v**4 * S1_ev + v**5 * S2_ev + u * Z_shifted_ev
).n)
))
assert b.pairing(
b.G2,
@@ -140,7 +145,7 @@ def verify_proof(setup, group_order, vk, proof):
(b.G1, -v**5 * S2_ev),
])
) == b.pairing(
b.add(X2, b.multiply(b.G2, (-zed).n)),
b.add(X2, ec_mul(b.G2, -zed)),
W_z_pt
)
print("done check 1")
@@ -152,7 +157,7 @@ def verify_proof(setup, group_order, vk, proof):
(b.G1, -Z_shifted_ev)
])
) == b.pairing(
b.add(X2, b.multiply(b.G2, (-zed*root_of_unity).n)),
b.add(X2, ec_mul(b.G2, -zed * root_of_unity)),
W_zw_pt
)
print("done check 2")