Merge pull request #1 from tlsnotary/rust_impl

Rust impl
This commit is contained in:
Dan
2022-10-12 15:42:50 +00:00
committed by GitHub
41 changed files with 16305 additions and 199 deletions

59
Cargo.toml Normal file
View File

@@ -0,0 +1,59 @@
[package]
name = "authdecode"
version = "0.1.0"
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[lib]
name = "authdecode"
[package.metadata.wasm-pack.profile.release]
wasm-opt = false
[profile.release]
lto = true
[dependencies]
thiserror = "1.0"
num = { version = "0.4"}
rand = "0.8.5"
json = "0.12.4"
aes = { version = "0.7.5"}
cipher = "0.3"
uuid = { version = "0.8.1", features = ["serde", "v4"] }
rand_chacha = "0.3"
sha2 = { version = "0.10.1", features = ["compress"] }
lazy_static = "1.4.0"
regex = "1.6.0"
ark-bn254 = { git = "https://github.com/arkworks-rs/curves" }
ark-ff = { git = "https://github.com/arkworks-rs/algebra" }
# using arbitrary Poseidon rate is not possible in crates.io's version "0.3.0" of ark-sponge.
# temporarily pulling from github
ark-sponge = { git = "https://github.com/arkworks-rs/sponge", default-features = false }
#dependencies for halo2
halo2_proofs = { git = "https://github.com/zcash/halo2"}
halo2_gadgets = { git = "https://github.com/zcash/halo2"}
ff = "0.12"
group = "0.12"
pasta_curves = "0.4"
getrandom = { version = "0.2", features = ["js"] }
wasm-bindgen = { version = "0.2", features = ["serde-serialize"] }
instant = { version = "0.1", features = [ "wasm-bindgen", "inaccurate" ] }
rayon = "1.5"
web-sys = { version = "0.3.56", features = ['console'] }
wasm-bindgen-futures = "0.4"
js-sys = "0.3"
# force all our dependencies to also use arkworks from github
[patch.crates-io]
ark-std = { git = "https://github.com/arkworks-rs/std" }
ark-ec = { git = "https://github.com/arkworks-rs/algebra" }
ark-ff = { git = "https://github.com/arkworks-rs/algebra" }
ark-serialize = { git = "https://github.com/arkworks-rs/algebra" }
[dev-dependencies]
hex = "0.4"
num-bigint = { version = "0.4.3", features = ["rand"] }
criterion = "0.3"

32
README
View File

@@ -1,20 +1,16 @@
This repo generates a circom circuit which is used to decode output labels from GC.
AuthDecode with the halo2 backend has been merged upstream into
https://github.com/tlsnotary/tlsn. This repo contains a snarkjs backend
which we may want to use in the future. So, leaving this repo here for now.
Install snarkjs https://github.com/iden3/snarkjs
Download powers of tau^14 https://hermez.s3-eu-west-1.amazonaws.com/powersOfTau28_hez_final_14.ptau
Run:
python3 script.py 10
# 10 is how much plaintext (in Field elements of ~32 bytes) we want
# to decode inside the snark. (For tau^14 max is 21)
# if you need more than 21, you'll need to download another ptau file from
# https://github.com/iden3/snarkjs#7-prepare-phase-2
circom circuit.circom --r1cs --wasm --sym
snarkjs r1cs export json circuit.r1cs circuit.r1cs.json
cd circuit_js/ && node generate_witness.js circuit.wasm ../input.json ../witness.wtns && cd ..
snarkjs groth16 setup circuit.r1cs powersOfTau28_hez_final_14.ptau circuit_0000.zkey
snarkjs zkey contribute circuit_0000.zkey circuit_0001.zkey --name="1st" -v -e="kkk" # 10 sec one-time
snarkjs zkey contribute circuit_0001.zkey circuit_final.zkey --name="2nd" -v -e="Another"
snarkjs zkey export verificationkey circuit_final.zkey verification_key.json
snarkjs groth16 prove circuit_final.zkey witness.wtns proof.json public.json
snarkjs groth16 verify verification_key.json public.json proof.json
//! This module implements the protocol for authenticated decoding (aka AuthDecode)
//! of output labels from a garbled circuit (GC) evaluation.
//! The purpose of AuthDecode is to allow the GC evaluator to produce a zk-friendly
//! hash commitment to the GC output. Computing a zk-friendly hash directly inside
//! the GC is too expensive, hence the need for this protocol.
//!
//! Authdecode assumes a privacy-free setting for the garbler, i.e. the protocol
//! MUST ONLY start AFTER the garbler reveals all his secret GC inputs.
//! Specifically, in the context of the TLSNotary protocol, AuthDecode MUST ONLY
//! start AFTER the Notary (who is the garbler) has revealed all of his TLS session
//! keys' shares.

View File

@@ -0,0 +1,113 @@
use authdecode::halo2_backend::onetimesetup::OneTimeSetup;
use authdecode::halo2_backend::prover::Prover;
use authdecode::halo2_backend::verifier::Verifier;
use authdecode::halo2_backend::Curve;
use authdecode::prover::AuthDecodeProver;
use authdecode::verifier::AuthDecodeVerifier;
use criterion::{black_box, criterion_group, criterion_main, Criterion};
use rand::thread_rng;
use rand::Rng;
fn criterion_benchmark(c: &mut Criterion) {
c.bench_function("proof_generation", move |bench| {
// The Prover should have generated the proving key (before the authdecode
// protocol starts) like this:
let proving_key = OneTimeSetup::proving_key();
// The Verifier should have generated the verifying key (before the authdecode
// protocol starts) like this:
let verification_key = OneTimeSetup::verification_key();
let prover = Box::new(Prover::new(proving_key));
let verifier = Box::new(Verifier::new(verification_key, Curve::Pallas));
let mut rng = thread_rng();
// generate random plaintext of random size up to 400 bytes
let plaintext: Vec<u8> = core::iter::repeat_with(|| rng.gen::<u8>())
.take(thread_rng().gen_range(0..400))
.collect();
// Normally, the Prover is expected to obtain her binary labels by
// evaluating the garbled circuit.
// To keep this test simple, we don't evaluate the gc, but we generate
// all labels of the Verifier and give the Prover her active labels.
let bit_size = plaintext.len() * 8;
let mut all_binary_labels: Vec<[u128; 2]> = Vec::with_capacity(bit_size);
let mut delta: u128 = rng.gen();
// set the last bit
delta |= 1;
for _ in 0..bit_size {
let label_zero: u128 = rng.gen();
all_binary_labels.push([label_zero, label_zero ^ delta]);
}
let prover_labels = choose(&all_binary_labels, &u8vec_to_boolvec(&plaintext));
let verifier = AuthDecodeVerifier::new(all_binary_labels.clone(), verifier);
let verifier = verifier.setup().unwrap();
let prover = AuthDecodeProver::new(plaintext, prover);
// Perform setup
let prover = prover.setup().unwrap();
// Commitment to the plaintext is sent to the Notary
let (plaintext_hash, prover) = prover.plaintext_commitment().unwrap();
// Notary sends back encrypted arithm. labels.
let (ciphertexts, verifier) = verifier.receive_plaintext_hashes(plaintext_hash).unwrap();
// Hash commitment to the label_sum is sent to the Notary
let (label_sum_hashes, prover) = prover
.label_sum_commitment(ciphertexts, &prover_labels)
.unwrap();
// Notary sends the arithmetic label seed
let (seed, verifier) = verifier.receive_label_sum_hashes(label_sum_hashes).unwrap();
// At this point the following happens in the `committed GC` protocol:
// - the Notary reveals the GC seed
// - the User checks that the GC was created from that seed
// - the User checks that her active output labels correspond to the
// output labels derived from the seed
// - we are called with the result of the check and (if successful)
// with all the output labels
let prover = prover
.binary_labels_authenticated(true, Some(all_binary_labels))
.unwrap();
// Prover checks the integrity of the arithmetic labels and generates zero_sums and deltas
let prover = prover.authenticate_arithmetic_labels(seed).unwrap();
bench.iter(|| {
//Prover generates the proof
black_box(prover.create_zk_proofs());
});
});
}
/// Unzips a slice of pairs, returning items corresponding to choice
fn choose<T: Clone>(items: &[[T; 2]], choice: &[bool]) -> Vec<T> {
assert!(items.len() == choice.len(), "arrays are different length");
items
.iter()
.zip(choice)
.map(|(items, choice)| items[*choice as usize].clone())
.collect()
}
/// Converts BE bytes into bits in MSB-first order, left-padding with zeroes
/// to the nearest multiple of 8.
pub fn u8vec_to_boolvec(v: &[u8]) -> Vec<bool> {
let mut bv = Vec::with_capacity(v.len() * 8);
for byte in v.iter() {
for i in 0..8 {
bv.push(((byte >> (7 - i)) & 1) != 0);
}
}
bv
}
criterion_group!(benches, criterion_benchmark);
criterion_main!(benches);

24
circom/README Normal file
View File

@@ -0,0 +1,24 @@
This folder contains files used by the snarkjs_backend.
To use that backend, make sure you have node installed (tested on Node v16.17.1)
Install dependencies with:
npm install
powersOfTau28_hez_final_14.ptau was downloaded from https://hermez.s3-eu-west-1.amazonaws.com/powersOfTau28_hez_final_14.ptau
Whenever circuit.circom is modified, delete circuit_0000.zkey and run:
circom circuit.circom --r1cs --wasm
All the commands below will be run by the prover/verifier from the .mjs files:
snarkjs groth16 setup circuit.r1cs powersOfTau28_hez_final_14.ptau circuit_0000.zkey
snarkjs zkey contribute circuit_0000.zkey circuit_final.zkey -v -e="Notary's entropy"
snarkjs zkey export verificationkey circuit_final.zkey verification_key.json
snarkjs groth16 fullprove input.json circuit_js/circuit.wasm circuit_final.zkey proof.json public.json
snarkjs groth16 verify verification_key.json public.json proof.json
We can generate circuit.wasm and circuit.r1cs deterministically with circom 2.0.5+
circom circuit.circom --r1cs --wasm
and then ship circuit.wasm on the User side and circuit.r1cs on the Notary side

66
circom/circuit.circom Normal file
View File

@@ -0,0 +1,66 @@
pragma circom 2.0.0;
include "./poseidon.circom";
include "./utils.circom";
template Main() {
// Poseidon hash rate (how many field elements are permuted at a time)
var w = 16;
// The amount of last field element's high bits (in big-endian) to use for
// the plaintext. The rest of it will be used for the salt.
var last_fe_bits = 125;
signal input plaintext_hash;
signal input label_sum_hash;
signal input plaintext[w];
signal input salt;
signal input delta[w-1][253];
signal input delta_last[last_fe_bits];
signal input sum_of_zero_labels;
// acc.to the Poseidon paper, the 2nd element of the Poseidon state
// is the hash digest
component hash = PoseidonEx(w, 2);
hash.initialState <== 0;
for (var i = 0; i < w-1; i++) {
hash.inputs[i] <== plaintext[i];
}
//add salt to the last element of plaintext shifting it left first
hash.inputs[w-1] <== plaintext[w-1] * (1 << 128) + salt;
log(1);
plaintext_hash === hash.out[1];
log(2);
// the last element of sum_of_deltas will contain the accumulated sum total
signal sum_of_deltas[w+1];
sum_of_deltas[0] <== 0;
// inner products of (deltas * plaintext bits) go here
component ip[w];
for (var i = 0; i<w; i++) {
// The last field element contains the salt. We make sure *not* to
// include the salt in the inner product.
var useful_bits = i < w-1 ? 253 : last_fe_bits;
ip[i] = InnerProd(useful_bits);
ip[i].plaintext <== plaintext[i];
for (var j=0; j < useful_bits; j++) {
if (i < w-1){
ip[i].deltas[j] <== delta[i][j];
}
else {
ip[i].deltas[j] <== delta_last[j];
}
}
sum_of_deltas[i+1] <== sum_of_deltas[i] + ip[i].out;
}
// acc.to the Poseidon paper, the 2nd element of the Poseidon state
// is the hash digest
component ls_hash = PoseidonEx(1, 2);
ls_hash.initialState <== 0;
// shift the sum to the left and put the salt into the last 128 bits
ls_hash.inputs[0] <== (sum_of_zero_labels + sum_of_deltas[w]) * (1 << 128) + salt;
log(3);
label_sum_hash === ls_hash.out[1];
log(4);
}
component main {public [sum_of_zero_labels, plaintext_hash, label_sum_hash, delta, delta_last]} = Main();

BIN
circom/circuit.r1cs Normal file

Binary file not shown.

Binary file not shown.

View File

@@ -0,0 +1,20 @@
const wc = require("./witness_calculator.js");
const { readFileSync, writeFile } = require("fs");
if (process.argv.length != 5) {
console.log("Usage: node generate_witness.js <file.wasm> <input.json> <output.wtns>");
} else {
const input = JSON.parse(readFileSync(process.argv[3], "utf8"));
const buffer = readFileSync(process.argv[2]);
wc(buffer).then(async witnessCalculator => {
// const w= await witnessCalculator.calculateWitness(input,0);
// for (let i=0; i< w.length; i++){
// console.log(w[i]);
// }
const buff= await witnessCalculator.calculateWTNSBin(input,0);
writeFile(process.argv[4], buff, function(err) {
if (err) throw err;
});
});
}

View File

@@ -0,0 +1,306 @@
module.exports = async function builder(code, options) {
options = options || {};
let wasmModule;
try {
wasmModule = await WebAssembly.compile(code);
} catch (err) {
console.log(err);
console.log("\nTry to run circom --c in order to generate c++ code instead\n");
throw new Error(err);
}
let wc;
const instance = await WebAssembly.instantiate(wasmModule, {
runtime: {
exceptionHandler : function(code) {
let errStr;
if (code == 1) {
errStr= "Signal not found. ";
} else if (code == 2) {
errStr= "Too many signals set. ";
} else if (code == 3) {
errStr= "Signal already set. ";
} else if (code == 4) {
errStr= "Assert Failed. ";
} else if (code == 5) {
errStr= "Not enough memory. ";
} else if (code == 6) {
errStr= "Input signal array access exceeds the size";
} else {
errStr= "Unknown error\n";
}
// get error message from wasm
errStr += getMessage();
throw new Error(errStr);
},
showSharedRWMemory: function() {
printSharedRWMemory ();
}
}
});
const sanityCheck =
options
// options &&
// (
// options.sanityCheck ||
// options.logGetSignal ||
// options.logSetSignal ||
// options.logStartComponent ||
// options.logFinishComponent
// );
wc = new WitnessCalculator(instance, sanityCheck);
return wc;
function getMessage() {
var message = "";
var c = instance.exports.getMessageChar();
while ( c != 0 ) {
message += String.fromCharCode(c);
c = instance.exports.getMessageChar();
}
return message;
}
function printSharedRWMemory () {
const shared_rw_memory_size = instance.exports.getFieldNumLen32();
const arr = new Uint32Array(shared_rw_memory_size);
for (let j=0; j<shared_rw_memory_size; j++) {
arr[shared_rw_memory_size-1-j] = instance.exports.readSharedRWMemory(j);
}
console.log(fromArray32(arr));
}
};
class WitnessCalculator {
constructor(instance, sanityCheck) {
this.instance = instance;
this.version = this.instance.exports.getVersion();
this.n32 = this.instance.exports.getFieldNumLen32();
this.instance.exports.getRawPrime();
const arr = new Uint32Array(this.n32);
for (let i=0; i<this.n32; i++) {
arr[this.n32-1-i] = this.instance.exports.readSharedRWMemory(i);
}
this.prime = fromArray32(arr);
this.witnessSize = this.instance.exports.getWitnessSize();
this.sanityCheck = sanityCheck;
}
circom_version() {
return this.instance.exports.getVersion();
}
async _doCalculateWitness(input, sanityCheck) {
//input is assumed to be a map from signals to arrays of bigints
this.instance.exports.init((this.sanityCheck || sanityCheck) ? 1 : 0);
const keys = Object.keys(input);
var input_counter = 0;
keys.forEach( (k) => {
const h = fnvHash(k);
const hMSB = parseInt(h.slice(0,8), 16);
const hLSB = parseInt(h.slice(8,16), 16);
const fArr = flatArray(input[k]);
let signalSize = this.instance.exports.getInputSignalSize(hMSB, hLSB);
if (signalSize < 0){
throw new Error(`Signal ${k} not found\n`);
}
if (fArr.length < signalSize) {
throw new Error(`Not enough values for input signal ${k}\n`);
}
if (fArr.length > signalSize) {
throw new Error(`Too many values for input signal ${k}\n`);
}
for (let i=0; i<fArr.length; i++) {
const arrFr = toArray32(BigInt(fArr[i])%this.prime,this.n32)
for (let j=0; j<this.n32; j++) {
this.instance.exports.writeSharedRWMemory(j,arrFr[this.n32-1-j]);
}
try {
this.instance.exports.setInputSignal(hMSB, hLSB,i);
input_counter++;
} catch (err) {
// console.log(`After adding signal ${i} of ${k}`)
throw new Error(err);
}
}
});
if (input_counter < this.instance.exports.getInputSize()) {
throw new Error(`Not all inputs have been set. Only ${input_counter} out of ${this.instance.exports.getInputSize()}`);
}
}
async calculateWitness(input, sanityCheck) {
const w = [];
await this._doCalculateWitness(input, sanityCheck);
for (let i=0; i<this.witnessSize; i++) {
this.instance.exports.getWitness(i);
const arr = new Uint32Array(this.n32);
for (let j=0; j<this.n32; j++) {
arr[this.n32-1-j] = this.instance.exports.readSharedRWMemory(j);
}
w.push(fromArray32(arr));
}
return w;
}
async calculateBinWitness(input, sanityCheck) {
const buff32 = new Uint32Array(this.witnessSize*this.n32);
const buff = new Uint8Array( buff32.buffer);
await this._doCalculateWitness(input, sanityCheck);
for (let i=0; i<this.witnessSize; i++) {
this.instance.exports.getWitness(i);
const pos = i*this.n32;
for (let j=0; j<this.n32; j++) {
buff32[pos+j] = this.instance.exports.readSharedRWMemory(j);
}
}
return buff;
}
async calculateWTNSBin(input, sanityCheck) {
const buff32 = new Uint32Array(this.witnessSize*this.n32+this.n32+11);
const buff = new Uint8Array( buff32.buffer);
await this._doCalculateWitness(input, sanityCheck);
//"wtns"
buff[0] = "w".charCodeAt(0)
buff[1] = "t".charCodeAt(0)
buff[2] = "n".charCodeAt(0)
buff[3] = "s".charCodeAt(0)
//version 2
buff32[1] = 2;
//number of sections: 2
buff32[2] = 2;
//id section 1
buff32[3] = 1;
const n8 = this.n32*4;
//id section 1 length in 64bytes
const idSection1length = 8 + n8;
const idSection1lengthHex = idSection1length.toString(16);
buff32[4] = parseInt(idSection1lengthHex.slice(0,8), 16);
buff32[5] = parseInt(idSection1lengthHex.slice(8,16), 16);
//this.n32
buff32[6] = n8;
//prime number
this.instance.exports.getRawPrime();
var pos = 7;
for (let j=0; j<this.n32; j++) {
buff32[pos+j] = this.instance.exports.readSharedRWMemory(j);
}
pos += this.n32;
// witness size
buff32[pos] = this.witnessSize;
pos++;
//id section 2
buff32[pos] = 2;
pos++;
// section 2 length
const idSection2length = n8*this.witnessSize;
const idSection2lengthHex = idSection2length.toString(16);
buff32[pos] = parseInt(idSection2lengthHex.slice(0,8), 16);
buff32[pos+1] = parseInt(idSection2lengthHex.slice(8,16), 16);
pos += 2;
for (let i=0; i<this.witnessSize; i++) {
this.instance.exports.getWitness(i);
for (let j=0; j<this.n32; j++) {
buff32[pos+j] = this.instance.exports.readSharedRWMemory(j);
}
pos += this.n32;
}
return buff;
}
}
function toArray32(rem,size) {
const res = []; //new Uint32Array(size); //has no unshift
const radix = BigInt(0x100000000);
while (rem) {
res.unshift( Number(rem % radix));
rem = rem / radix;
}
if (size) {
var i = size - res.length;
while (i>0) {
res.unshift(0);
i--;
}
}
return res;
}
function fromArray32(arr) { //returns a BigInt
var res = BigInt(0);
const radix = BigInt(0x100000000);
for (let i = 0; i<arr.length; i++) {
res = res*radix + BigInt(arr[i]);
}
return res;
}
function flatArray(a) {
var res = [];
fillArray(res, a);
return res;
function fillArray(res, a) {
if (Array.isArray(a)) {
for (let i=0; i<a.length; i++) {
fillArray(res, a[i]);
}
} else {
res.push(a);
}
}
}
function fnvHash(str) {
const uint64_max = BigInt(2) ** BigInt(64);
let hash = BigInt("0xCBF29CE484222325");
for (var i = 0; i < str.length; i++) {
hash ^= BigInt(str[i].charCodeAt());
hash *= BigInt(0x100000001B3);
hash %= uint64_max;
}
let shash = hash.toString(16);
let n = 16 - shash.length;
shash = '0'.repeat(n).concat(shash);
return shash;
}

44
circom/onetimesetup.mjs Normal file
View File

@@ -0,0 +1,44 @@
import * as snarkjs from "snarkjs";
import {createOverride} from "fastfile";
import bfj from "bfj";
import { utils } from "ffjavascript";
const {stringifyBigInts} = utils;
// this workaround allows to require() from within ES6 modules
// (which is not allowed by default in nodejs).
import { createRequire } from 'module'
const require = createRequire(import.meta.url)
const fs = require('fs');
async function main(){
const argv = process.argv;
let entropy = argv[2];
if (entropy.length != 500){
process.exit(1);
}
const r1cs = fs.readFileSync("circom/circuit.r1cs");
const ptau = fs.readFileSync("circom/powersOfTau28_hez_final_14.ptau");
// snarkjs groth16 setup circuit.r1cs powersOfTau28_hez_final_14.ptau circuit_0000.zkey
const zkey_0 = {type: "file", fileName: "circom/circuit_0000.zkey"};
await createOverride(zkey_0);
console.log("groth16 setup...");
await snarkjs.zKey.newZKey(r1cs, ptau, zkey_0);
// snarkjs zkey contribute circuit_0000.zkey circuit_final.zkey -e="<Notary's entropy>"
const zkey_final = {type: "file", fileName: "circom/circuit_final.zkey.notary"};
await createOverride(zkey_final);
console.log("zkey contribute...");
await snarkjs.zKey.contribute(zkey_0, zkey_final, "", entropy);
// snarkjs zkey export verificationkey circuit_final.zkey verification_key.json
console.log("zkey export...");
const vKey = await snarkjs.zKey.exportVerificationKey(zkey_final);
// copied from snarkjs/cli.js zkeyExportVKey()
await bfj.write("circom/verification_key.json", stringifyBigInts(vKey), { space: 1 });
}
main().then(() => {
process.exit(0);
});

8
circom/package.json Normal file
View File

@@ -0,0 +1,8 @@
{
"dependencies": {
"circom": "^0.5.46",
"circom2": "^0.2.5",
"circomlibjs": "^0.1.7",
"snarkjs": "^0.4.24"
}
}

View File

@@ -38,13 +38,13 @@ template Mix(t, M) {
}
}
template MixLast(t, M) {
template MixLast(t, M, s) {
signal input in[t];
signal output out;
var lc = 0;
for (var j=0; j<t; j++) {
lc += M[j][0]*in[j];
lc += M[j][s]*in[j];
}
out <== lc;
}
@@ -53,7 +53,7 @@ template MixS(t, S, r) {
signal input in[t];
signal output out[t];
var lc = 0;
for (var i=0; i<t; i++) {
lc += S[(t*2-1)*r+i]*in[i];
@@ -64,9 +64,10 @@ template MixS(t, S, r) {
}
}
template Poseidon(nInputs) {
template PoseidonEx(nInputs, nOuts) {
signal input inputs[nInputs];
signal output out;
signal input initialState;
signal output out[nOuts];
// Using recommended parameters from whitepaper https://eprint.iacr.org/2019/458.pdf (table 2, table 8)
// Generated by https://extgit.iaik.tugraz.at/krypto/hadeshash/-/blob/master/code/calc_round_numbers.py
@@ -85,7 +86,7 @@ template Poseidon(nInputs) {
component sigmaP[nRoundsP];
component mix[nRoundsF-1];
component mixS[nRoundsP];
component mixLast;
component mixLast[nOuts];
ark[0] = Ark(t, C, 0);
@@ -93,7 +94,7 @@ template Poseidon(nInputs) {
if (j>0) {
ark[0].in[j] <== inputs[j-1];
} else {
ark[0].in[j] <== 0;
ark[0].in[j] <== initialState;
}
}
@@ -184,10 +185,24 @@ template Poseidon(nInputs) {
sigmaF[nRoundsF-1][j].in <== mix[nRoundsF-2].out[j];
}
mixLast = MixLast(t,M);
for (var j=0; j<t; j++) {
mixLast.in[j] <== sigmaF[nRoundsF-1][j].out;
for (var i=0; i<nOuts; i++) {
mixLast[i] = MixLast(t,M,i);
for (var j=0; j<t; j++) {
mixLast[i].in[j] <== sigmaF[nRoundsF-1][j].out;
}
out[i] <== mixLast[i].out;
}
out <== mixLast.out;
}
template Poseidon(nInputs) {
signal input inputs[nInputs];
signal output out;
component pEx = PoseidonEx(nInputs, 1);
pEx.initialState <== 0;
for (var i=0; i<nInputs; i++) {
pEx.inputs[i] <== inputs[i];
}
out <== pEx.out[0];
}

File diff suppressed because one or more lines are too long

Binary file not shown.

32
circom/prove.mjs Normal file
View File

@@ -0,0 +1,32 @@
import * as snarkjs from "snarkjs";
import path from "path";
// this workaround allows to require() from within ES6 modules
// (which is not allowed by default in nodejs).
import { createRequire } from 'module'
const require = createRequire(import.meta.url)
const fs = require('fs');
async function main(){
const input_path = process.argv[2];
const proving_key_path = process.argv[3];
const proof_path = process.argv[4];
const input = fs.readFileSync(input_path);
const wasm = fs.readFileSync(path.join("circom", "circuit_js", "circuit.wasm"));
const zkey_final = fs.readFileSync(proving_key_path);
const in_json = JSON.parse(input);
const res = await snarkjs.groth16.fullProve(in_json, wasm, zkey_final);
// the Notary will generate the publicSignals themselves, we only need to
// send the proof
fs.writeFileSync(proof_path, JSON.stringify(res.proof));
// Only for debugging
// fs.writeFileSync(proof_path + ".publicSignals", JSON.stringify(res.publicSignals));
}
main().then(() => {
process.exit(0);
});

41
circom/utils.circom Normal file
View File

@@ -0,0 +1,41 @@
pragma circom 2.0.0;
// copied from circomlib/circuits/bitify.circom
template Num2Bits(n) {
signal input in;
signal output out[n];
var lc1=0;
var e2=1;
for (var i = 0; i<n; i++) {
out[i] <-- (in >> i) & 1;
out[i] * (out[i] -1 ) === 0;
lc1 += out[i] * e2;
e2 = e2+e2;
}
lc1 === in;
}
// Compute inner product on the high "count" bits of the plaintext.
template InnerProd(count){
signal input plaintext;
signal input deltas[count];
signal output out;
component n2b = Num2Bits(253);
plaintext ==> n2b.in;
// the last element of sum will contain the accumulated sum total
signal sum[count+1];
sum[0] <== 0;
for (var i=0; i<count; i++) {
// Num2Bits returns bits in "least bit first" order
// but deltas are in the opposite bit order.
// So, we reverse the bits.
sum[i+1] <== sum[i] + n2b.out[253-1-i] * deltas[i];
}
out <== sum[count];
}

32
circom/verify.mjs Normal file
View File

@@ -0,0 +1,32 @@
import * as snarkjs from "snarkjs";
// this workaround allows to require() from within ES6 modules
// (which is not allowed by default in nodejs).
import { createRequire } from 'module'
const require = createRequire(import.meta.url)
const fs = require('fs');
async function main(retval){
const pub_path = process.argv[2];
const proof_path = process.argv[3];
const vk = JSON.parse(fs.readFileSync("circom/verification_key.json", "utf8"));
const pub = JSON.parse(fs.readFileSync(pub_path, "utf8"));
const proof = JSON.parse(fs.readFileSync(proof_path, "utf8"));
const res = await snarkjs.groth16.verify(vk, pub, proof);
if (res == true) {
// exit code 0 means "the process exited successfully"
retval = 0;
}
else {
// any other exit code means "exited unsuccessfully"
retval = 99;
}
return retval;
}
main().then((retval) => {
console.log('verify.mjs exiting with ', retval);
process.exit(retval);
});

138
script.py
View File

@@ -1,138 +0,0 @@
import sys
import random
import os
def padded_hex(s):
h = hex(s)
l = len(h)
if l % 2 == 0:
return h
else:
return '0x{0:0{1}x}'.format(s,l-1)
# This script will generate the circom circuit and inputs
if __name__ == '__main__':
if len(sys.argv) != 2:
print('Expected 1 argument: amount of plaintext to process (in Field elements)')
exit(1)
count = int(sys.argv[1])
input = '{\n'
input += '"sum_of_labels": "'+str(random.randint(0, 2**140))+'",\n'
input += '"sum_of_zero_labels": "'+str(random.randint(0, 2**140))+'",\n'
input += '"plaintext": [\n'
for c in range(0, count):
input += ' "'+str(random.randint(0, 2**253))+'"'
if c < count-1:
input += ',\n'
input += "],\n"
input += '"delta": [\n'
for c in range(0, count):
input += ' [\n'
for x in range(0, 254):
input += ' "'+str(random.randint(0, 2**253))+'"'
if x < 253:
input += ',\n'
input += ' ]\n'
if c < count-1:
input += ',\n'
input += ']\n'
input += '}\n'
with open('input.json', 'w') as f:
f.write(input)
main = 'pragma circom 2.0.0;\n'
main += 'include "./poseidon.circom";\n'
main += 'include "./utils.circom";\n'
main += 'template Main() {\n'
main += ' signal output out;\n'
main += ' signal output prover_hash;\n'
main += ' signal input sum_of_labels;\n'
main += ' signal input plaintext['+str(count)+'];\n'
main += ' signal input delta['+str(count)+'][254];\n'
main += ' signal input sum_of_zero_labels;\n'
main += ' signal sums['+str(count)+'];\n'
# check that Prover's hash is correct. hashing 16 field elements at a time since
# idk how to chain hashes with circomlib.
# Using prev. digest as the first input to the next hash
# if is_final is true then count includes the sum_of_labels
def hash(no, start, count, is_final=False):
out = ' component hash_'+str(no)+' = Poseidon('+str(count)+');\n'
if no > 0:
#first element is prev. hash digest
out += ' hash_'+str(no)+'.inputs[0] <== hash_'+str(no-1)+'.out;\n'
else:
if is_final and count == 1:
out += ' hash_'+str(no)+'.inputs[0] <== sum_of_labels;\n'
else:
out += ' hash_'+str(no)+'.inputs[0] <== plaintext['+str(start)+'];\n'
for x in range(1, count-1):
out += ' hash_'+str(no)+'.inputs['+str(x)+'] <== plaintext['+str(start+x)+'];\n'
if is_final:
# sum of labels if the last input
out += ' hash_'+str(no)+'.inputs['+str(count-1)+'] <== sum_of_labels;\n'
else:
out += ' hash_'+str(no)+'.inputs['+str(count-1)+'] <== plaintext['+str(start+count-1)+'];\n'
out += '\n'
return out
def hash_str():
out = ''
if count+1 <= 16:
out += hash(0, 0, count+1, True)
out += ' prover_hash <== hash_0.out;\n'
return out
else:
out += hash(0, 0, 16, False)
if count+1 <= 32:
out += hash(1, 16, count+1-16, True)
out += ' prover_hash <== hash_1.out;\n'
return out
else:
out += hash(1, 16, 16, False)
if count+1 <= 48:
out += hash(2, 16, count+1-32, True)
out += ' prover_hash <== hash_2.out;\n'
return out
else:
out += hash(2, 16, 16, False)
main += '\n'
main += hash_str()
main += '\n'
for c in range(0, count):
main += ' component ip'+str(c)+' = InnerProd();\n'
main += ' ip'+str(c)+'.plaintext <== plaintext['+str(c)+'];\n'
main += ' for (var i=0; i<254; i++) {\n'
main += ' ip'+str(c)+'.deltas[i] <== delta['+str(c)+'][i];\n'
main += ' }\n'
main += ' sums['+str(c)+'] <== ip'+str(c)+'.out;\n\n'
main += ' signal sum_of_deltas <== '
for c in range(0, count):
main += 'sums['+str(c)+']'
if c < count-1:
main += ' + '
else:
main += ';\n'
main += '\n';
main += " // TODO to pass this assert we'd have to\n"
main += ' // use actual values instead of random ones, so commenting out for now\n'
main += ' // sum_of_labels === sum_of_zero_labels + sum_of_deltas;\n'
main += '}\n'
main += 'component main {public [delta, sum_of_zero_labels]} = Main();'
with open('circuit.circom', 'w') as f:
f.write(main)

View File

@@ -0,0 +1,709 @@
use halo2_proofs::{
circuit::{AssignedCell, Layouter, Region, SimpleFloorPlanner, Value},
plonk::{
Advice, Circuit, Column, ConstraintSystem, Constraints, Error, Expression, Instance,
Selector,
},
poly::Rotation,
};
use pasta_curves::pallas;
use pasta_curves::Fp;
use std::convert::TryInto;
use super::poseidon::circuit_config::{configure_poseidon_rate_1, configure_poseidon_rate_15};
use super::poseidon::spec::{Spec1, Spec15};
use halo2_gadgets::poseidon::{primitives::ConstantLength, Hash, Pow5Chip, Pow5Config};
use num::BigUint;
use super::utils::{bigint_to_256bits, bigint_to_f, bits_to_limbs, f_to_bigint};
// The AuthDecode protocol decodes a chunk of X bits at a time.
// Each of the bit requires 1 corresponding public input - a delta.
// We want the deltas to use up as few instance columns as possible
// because more instance columns means more prover time. We also want
// K to stay as low as possible since low K also improves prover time.
// The best ratio is achieved with K==6 and 68 instance columns.
// However, 68-bit limbs are awkward to work with. So we choose to have
// 64 columns and 4 rows, to place all the field element's bits into.
// Our circuit's K is 6, which gives us 2^6-6=58 useful rows
// (halo2 reserves 6 rows for internal purposes).
// It requires 4 64-cell rows in order to hold all the bits of one field element.
// The total amount of field elements we can decode is 58/4 = 14 1/2,
// which equals to 14 253-bit field elements plus 1 field element of 128 bits.
// The remaining 125 bits of the 15th element will be used for the salt.
// We could have much simpler logic if we just used 253 instance columns.
// But compared to 64 columns, that would increase the prover time 2x.
/// The total amount of field elements that will be decoded and hashed.
pub const TOTAL_FIELD_ELEMENTS: usize = 15;
/// The amount of "full" field elements. We fully pack the plaintext bits
/// into these field elements.
/// The last field element is not "full" since it contains only two 64-bit
/// limbs of plaintext.
pub const FULL_FIELD_ELEMENTS: usize = 14;
/// The parameter informing halo2 about the upper bound of how many rows our
/// circuit uses. This is a power of 2.
///
/// TODO: Initially K was set to 6 and the whole circuit was built around
/// 58 rows. After the circuit was built, I discovered that setting
/// rate-15 Poseidon partial rounds count to 64 produces a `NotEnoughRowsAvailable`
/// error.
///
/// One solution to decrease the number of used rows is to set Poseidon rate-15 partial
/// rounds count to 62 (which is acceptable acc.to
/// https://github.com/filecoin-project/neptune/blob/master/spec/poseidon_spec.pdf
/// see p.5 Security Inequalities (3) which says that total round count must be at least
/// > 58), but that is not in keeping with the arbitrary security margin suggested in the
/// Poseidon paper which bring the total round count to 64.
///
/// The other solution is to modify the circuit to hash 14 elements instead of 15. This
/// will decrease the required partial rounds to 60. But that means modifying the circuit.
///
/// The simplest solution was to increase the amount of available rows by increasing
/// K. However, this increases the proof generation time by 30%.
pub const K: u32 = 7;
/// For one row of the circuit, this is the amount of advice cells to put
/// plaintext bits into and also this is the amount of instance cells to
/// put deltas into.
pub const CELLS_PER_ROW: usize = 64;
/// The amount of rows that can be used by the circuit.
///
/// When K == 6, halo2 reserves 6 rows internally, so the actual amount of rows
/// that the circuit can use is 2^K - 6 = 58.
/// If we ever change K, we should re-compute the number of reserved rows with
/// (cs.blinding_factors() + 1)
pub const USEFUL_ROWS: usize = 58;
/// The size of the salt of the hash in bits.
///
/// We don't use the usual 128 bits, because it is convenient to put two 64-bit
/// limbs of plaintext into the field element (which has 253 useful bits, see
/// [super::USEFUL_BITS]) and use the remaining 125 bits of the field element
/// for the salt (see [crate::Salt]).
pub const SALT_SIZE: usize = 125;
type F = pallas::Base;
#[derive(Clone, Debug)]
pub struct TopLevelConfig {
/// Each plaintext field element is decomposed into 256 bits
/// and each 64-bit limb is places on a row
bits: [Column<Advice>; CELLS_PER_ROW],
/// Space to calculate intermediate sums
scratch_space: [Column<Advice>; 5],
/// Expected dot product for each 64-bit limb
dot_product: Column<Advice>,
/// Expected 64-bit limb composed into an integer
expected_limbs: Column<Advice>,
/// The salt will be placed into the 1st row of this column
salt: Column<Advice>,
/// Each row of deltas corresponds to one limb of plaintext
deltas: [Column<Instance>; CELLS_PER_ROW],
// When calling assign_advice_from_instance() we store the resulting cell
// in this column. TODO: we don't need a new column but could store it in
// any of the above advice column's free cell, e.g. in `scratch_space`, but
// when I tried to use them, assign_advice_from_instance() was giving me an error
// NotEnoughRowsAvailable { current_k: 6 }, even though the offset
// was < USEFUL_ROWS
poseidon_misc: Column<Advice>,
// SELECTORS. below is the description of what happens when a selector
// is activated for a given row.
/// Computes a dot product
selector_dot_product: Selector,
/// Composes a given limb from bits into an integer.
/// The highest limb corresponds to the selector with index 0.
selector_compose: [Selector; 4],
/// Checks binariness of decomposed bits
selector_binary_check: Selector,
/// Sums 4 cells
selector_sum4: Selector,
/// Sums 2 cells
selector_sum2: Selector,
/// Left-shifts the first cell by the size of salt and adds the salt
selector_add_salt: Selector,
/// config for Poseidon with rate 15
poseidon_config_rate15: Pow5Config<Fp, 16, 15>,
/// config for Poseidon with rate 1
poseidon_config_rate1: Pow5Config<Fp, 2, 1>,
/// Contains 3 public input in this order:
/// [plaintext hash, label sum hash, zero sum].
/// Does **NOT** contain deltas.
public_inputs: Column<Instance>,
}
pub struct AuthDecodeCircuit {
/// plaintext is private input
plaintext: [F; TOTAL_FIELD_ELEMENTS],
/// salt is private input
salt: F,
/// deltas is a public input.
/// Since halo2 doesn't allow to access deltas passed in [crate::prover::Prove::prove],
/// we pass it here again to be able to compute the in-circuit expected values.
/// To make handling simpler, this is a matrix of rows, where each row corresponds
/// to a 64-bit limb of the plaintext.
deltas: [[F; CELLS_PER_ROW]; USEFUL_ROWS],
}
impl Circuit<F> for AuthDecodeCircuit {
type Config = TopLevelConfig;
type FloorPlanner = SimpleFloorPlanner;
// halo2 requires this function
fn without_witnesses(&self) -> Self {
Self {
plaintext: Default::default(),
salt: Default::default(),
deltas: [[Default::default(); CELLS_PER_ROW]; USEFUL_ROWS],
}
}
/// Creates the circuit's columns, selectors and defines the gates.
fn configure(meta: &mut ConstraintSystem<F>) -> Self::Config {
// keep this in case we modify the circuit and change K but forget
// to update USEFUL_ROWS
// UPDATE: since we temporary changed [K] from 6 to 7, commenting out
// this assert. Uncomment when
// assert!(((1 << K) as usize) - (meta.blinding_factors() + 1) == USEFUL_ROWS);
// ADVICE COLUMNS
let bits: [Column<Advice>; CELLS_PER_ROW] = (0..CELLS_PER_ROW)
.map(|_| meta.advice_column())
.collect::<Vec<_>>()
.try_into()
.unwrap();
let dot_product = meta.advice_column();
meta.enable_equality(dot_product);
let expected_limbs = meta.advice_column();
meta.enable_equality(expected_limbs);
let salt = meta.advice_column();
meta.enable_equality(salt);
let scratch_space: [Column<Advice>; 5] = (0..5)
.map(|_| {
let c = meta.advice_column();
meta.enable_equality(c);
c
})
.collect::<Vec<_>>()
.try_into()
.unwrap();
let poseidon_misc = meta.advice_column();
meta.enable_equality(poseidon_misc);
// INSTANCE COLUMNS
let deltas: [Column<Instance>; CELLS_PER_ROW] = (0..CELLS_PER_ROW)
.map(|_| meta.instance_column())
.collect::<Vec<_>>()
.try_into()
.unwrap();
let public_inputs = meta.instance_column();
meta.enable_equality(public_inputs);
// SELECTORS
let selector_dot_product = meta.selector();
let selector_binary_check = meta.selector();
let selector_compose: [Selector; 4] = (0..4)
.map(|_| meta.selector())
.collect::<Vec<_>>()
.try_into()
.unwrap();
let selector_sum4 = meta.selector();
let selector_sum2 = meta.selector();
let selector_add_salt = meta.selector();
// POSEIDON
let poseidon_config_rate15 = configure_poseidon_rate_15::<Spec15>(15, meta);
let poseidon_config_rate1 = configure_poseidon_rate_1::<Spec1>(1, meta);
// CONFIG
// Put everything initialized above into a config
let cfg = TopLevelConfig {
bits,
scratch_space,
dot_product,
expected_limbs,
salt,
poseidon_misc,
deltas,
selector_dot_product,
selector_compose,
selector_binary_check,
selector_sum4,
selector_sum2,
selector_add_salt,
poseidon_config_rate15,
poseidon_config_rate1,
public_inputs,
};
// MISC
// build Expressions containing powers of 2, to be used in some gates
let two = BigUint::from(2u8);
let pow_2_x: Vec<_> = (0..256)
.map(|i| Expression::Constant(bigint_to_f(&two.pow(i as u32))))
.collect();
// GATES
// Computes the dot product of 2 sets of cells
meta.create_gate("dot product", |meta| {
let mut product = Expression::Constant(F::from(0));
for i in 0..CELLS_PER_ROW {
let delta = meta.query_instance(cfg.deltas[i], Rotation::cur());
let bit = meta.query_advice(cfg.bits[i], Rotation::cur());
product = product + delta * bit;
}
// constrain to match the expected value
let expected = meta.query_advice(cfg.dot_product, Rotation::cur());
let sel = meta.query_selector(cfg.selector_dot_product);
vec![sel * (product - expected)]
});
// Batch-checks binariness of multiple bits
meta.create_gate("binary check", |meta| {
// create one Expression for each cell to be checked
let expressions: [Expression<F>; CELLS_PER_ROW] = (0..CELLS_PER_ROW)
.map(|i| {
let bit = meta.query_advice(cfg.bits[i], Rotation::cur());
bit.clone() * bit.clone() - bit
})
.collect::<Vec<_>>()
.try_into()
.unwrap();
let sel = meta.query_selector(cfg.selector_binary_check);
// constrain all expressions to be equal to 0
Constraints::with_selector(sel, expressions)
});
// create 4 gates, each processing a different limb
for idx in 0..4 {
// compose the bits of a 64-bit limb into a field element and shift the
// limb to the left depending in the limb's index `idx`
meta.create_gate("compose limb", |meta| {
let mut sum_total = Expression::Constant(F::from(0));
for i in 0..CELLS_PER_ROW {
// the first bit is the highest bit. It is multiplied by the
// highest power of 2 for that limb.
let bit = meta.query_advice(cfg.bits[i], Rotation::cur());
sum_total = sum_total + bit * pow_2_x[255 - (CELLS_PER_ROW * idx) - i].clone();
}
// constrain to match the expected value
let expected = meta.query_advice(cfg.expected_limbs, Rotation::cur());
let sel = meta.query_selector(cfg.selector_compose[idx]);
vec![sel * (sum_total - expected)]
});
}
// sums 4 cells
meta.create_gate("sum4", |meta| {
let mut sum = Expression::Constant(F::from(0));
for i in 0..4 {
let dot_product = meta.query_advice(cfg.scratch_space[i], Rotation::cur());
sum = sum + dot_product;
}
// constrain to match the expected value
let expected = meta.query_advice(cfg.scratch_space[4], Rotation::cur());
let sel = meta.query_selector(cfg.selector_sum4);
vec![sel * (sum - expected)]
});
// sums 2 cells
meta.create_gate("sum2", |meta| {
let mut sum = Expression::Constant(F::from(0));
for i in 0..2 {
let dot_product = meta.query_advice(cfg.scratch_space[i], Rotation::cur());
sum = sum + dot_product;
}
// constrain to match the expected value
let expected = meta.query_advice(cfg.scratch_space[4], Rotation::cur());
let sel = meta.query_selector(cfg.selector_sum2);
vec![sel * (sum - expected)]
});
// left-shifts the first cell by SALT_SIZE and adds the second cell (the salt)
meta.create_gate("add salt", |meta| {
let cell = meta.query_advice(cfg.scratch_space[0], Rotation::cur());
let salt = meta.query_advice(cfg.scratch_space[1], Rotation::cur());
let sum = cell * pow_2_x[SALT_SIZE].clone() + salt;
// constrain to match the expected value
let expected = meta.query_advice(cfg.scratch_space[4], Rotation::cur());
let sel = meta.query_selector(cfg.selector_add_salt);
vec![sel * (sum - expected)]
});
cfg
}
/// Creates the circuit
fn synthesize(&self, cfg: Self::Config, mut layouter: impl Layouter<F>) -> Result<(), Error> {
let (label_sum, plaintext) = layouter.assign_region(
|| "main",
|mut region| {
// dot products for each row
let mut assigned_dot_products = Vec::new();
// limb for each row
let mut assigned_limbs = Vec::new();
//salt
let assigned_salt =
region.assign_advice(|| "", cfg.salt, 0, || Value::known(self.salt))?;
for j in 0..FULL_FIELD_ELEMENTS + 1 {
// decompose the private field element into bits
let bits = bigint_to_256bits(f_to_bigint(&self.plaintext[j].clone()));
// The last field element consists of only 2 64-bit limbs,
// so we use 2 rows for its bits and we skip processing the
// 2 high limbs
let max_row = if j == FULL_FIELD_ELEMENTS { 2 } else { 4 };
let skip = if j == FULL_FIELD_ELEMENTS { 2 } else { 0 };
for row in 0..max_row {
// convert bits into field elements and put them on the same row
for i in 0..CELLS_PER_ROW {
region.assign_advice(
|| "",
cfg.bits[i],
j * 4 + row,
|| Value::known(F::from(bits[CELLS_PER_ROW * (row + skip) + i])),
)?;
}
// constrain the whole row of bits to be binary
cfg.selector_binary_check.enable(&mut region, j * 4 + row)?;
let limbs = bits_to_limbs(bits);
// place expected limbs for each row
assigned_limbs.push(region.assign_advice(
|| "",
cfg.expected_limbs,
j * 4 + row,
|| Value::known(bigint_to_f(&limbs[row + skip].clone())),
)?);
// constrain the expected limb to match what the gate
// composes from bits
cfg.selector_compose[row + skip].enable(&mut region, j * 4 + row)?;
// compute the expected dot product for this row
let mut dot_product = F::from(0);
for i in 0..CELLS_PER_ROW {
dot_product += self.deltas[j * 4 + row][i]
* F::from(bits[CELLS_PER_ROW * (row + skip) + i]);
}
// place it into a cell for the expected dot_product
assigned_dot_products.push(region.assign_advice(
|| "",
cfg.dot_product,
j * 4 + row,
|| Value::known(dot_product),
)?);
// constrain the expected dot product to match what the gate computes
cfg.selector_dot_product.enable(&mut region, j * 4 + row)?;
}
}
// the grand sum of all dot products
let (dot_product, mut offset) =
self.compute_58_cell_sum(&assigned_dot_products, &mut region, &cfg, 0)?;
// move `zero_sum` into `scratch_space` area to be used in computations
let zero_sum = region.assign_advice_from_instance(
|| "",
cfg.public_inputs,
2,
cfg.scratch_space[0],
offset,
)?;
offset += 1;
// add zero_sum to all dot_products to get label_sum
let label_sum =
self.fold_sum(&[vec![dot_product, zero_sum]], &mut region, &cfg, offset)?[0]
.clone();
offset += 1;
// add salt
let label_sum_salted = self.add_salt(
label_sum.clone(),
assigned_salt.clone(),
&mut region,
&cfg,
offset,
)?;
offset += 1;
// Constrains each chunks of 4 limbs to be equal to a cell and
// returns the constrained cells containing the original plaintext
// (the private input to the circuit).
let plaintext: Result<Vec<AssignedCell<Fp, Fp>>, Error> = assigned_limbs
.chunks(4)
.map(|c| {
let sum =
self.fold_sum(&[c.to_vec()], &mut region, &cfg, offset)?[0].clone();
offset += 1;
Ok(sum)
})
.collect();
let mut plaintext = plaintext?;
// add salt to the last field element of the plaintext
let pt_len = plaintext.len();
let last_with_salt = self.add_salt(
plaintext[pt_len - 1].clone(),
assigned_salt,
&mut region,
&cfg,
offset,
)?;
offset += 1;
// replace the last field element with the one with salt
plaintext[pt_len - 1] = last_with_salt;
println!("{:?} final `scratch_space` offset", offset);
//Ok((label_sum, plaintext))
Ok((label_sum_salted, plaintext))
},
)?;
// Hash the label sum and constrain the digest to match the public input
let chip = Pow5Chip::construct(cfg.poseidon_config_rate1.clone());
let hasher = Hash::<F, _, Spec1, ConstantLength<1>, 2, 1>::init(
chip,
layouter.namespace(|| "init"),
)?;
let output = hasher.hash(layouter.namespace(|| "hash"), [label_sum])?;
layouter.assign_region(
|| "constrain output",
|mut region| {
let expected = region.assign_advice_from_instance(
|| "",
cfg.public_inputs,
1,
cfg.poseidon_misc,
0,
)?;
region.constrain_equal(output.cell(), expected.cell())?;
Ok(())
},
)?;
// Hash the plaintext and constrain the digest to match the public input
let chip = Pow5Chip::construct(cfg.poseidon_config_rate15.clone());
let hasher = Hash::<F, _, Spec15, ConstantLength<15>, 16, 15>::init(
chip,
layouter.namespace(|| "init"),
)?;
// unwrap() is safe since we use exactly 15 field elements in plaintext
let output = hasher.hash(layouter.namespace(|| "hash"), plaintext.try_into().unwrap())?;
layouter.assign_region(
|| "constrain output",
|mut region| {
let expected = region.assign_advice_from_instance(
|| "",
cfg.public_inputs,
0,
cfg.poseidon_misc,
1,
)?;
region.constrain_equal(output.cell(), expected.cell())?;
Ok(())
},
)?;
Ok(())
}
}
impl AuthDecodeCircuit {
pub fn new(plaintext: [F; 15], salt: F, deltas: [[F; CELLS_PER_ROW]; USEFUL_ROWS]) -> Self {
Self {
plaintext,
salt,
deltas,
}
}
// Computes the sum of 58 `cells` and outputs the cell containing the sum
// and the amount of rows used up during computation.
// Computations are done in the `scratch_space` area starting at the `row_offset`
// row. Constrains all intermediate values as necessary, so that
// the resulting cell is a properly constrained sum.
fn compute_58_cell_sum(
&self,
cells: &Vec<AssignedCell<Fp, Fp>>,
region: &mut Region<F>,
config: &TopLevelConfig,
row_offset: usize,
) -> Result<(AssignedCell<F, F>, usize), Error> {
let original_offset = row_offset;
let mut offset = row_offset;
// copy chunks of 4 cells to `scratch_space` and compute their sums
let l1_chunks: Vec<Vec<AssignedCell<F, F>>> = cells.chunks(4).map(|c| c.to_vec()).collect();
// do not process the last chunk of level1 as it will be
// later combined with the last chunk of level2
let l2_sums = self.fold_sum(&l1_chunks[..l1_chunks.len() - 1], region, &config, offset)?;
offset += l1_chunks.len() - 1;
// we now have 14 level-2 subsums which need to be summed with each
// other in batches of 4. There are 2 subsums from level 1 which we
// will combine with level 2 subsums.
let l2_chunks: Vec<Vec<AssignedCell<F, F>>> =
l2_sums.chunks(4).map(|c| c.to_vec()).collect();
// do not process the last chunk as it will be combined with
// level1's last chunk's sums
let mut l3_sums =
self.fold_sum(&l2_chunks[..l2_chunks.len() - 1], region, &config, offset)?;
offset += l2_chunks.len() - 1;
// we need to find the sum of level1's last chunk's 2 elements and level2's
// last chunks 2 elements
let chunk = [
l1_chunks[l1_chunks.len() - 1][0].clone(),
l1_chunks[l1_chunks.len() - 1][1].clone(),
l2_chunks[l2_chunks.len() - 1][0].clone(),
l2_chunks[l2_chunks.len() - 1][1].clone(),
];
let sum = self.fold_sum(&[chunk.to_vec()], region, &config, offset)?;
offset += 1;
l3_sums.push(sum[0].clone());
// 4 level-3 subsums into the final level-4 sum which is the final
// sum
let l3_chunks: Vec<Vec<AssignedCell<F, F>>> =
l3_sums.chunks(4).map(|c| c.to_vec()).collect();
let final_sum = self.fold_sum(&l3_chunks, region, &config, offset)?[0].clone();
offset += 1;
Ok((final_sum, offset - original_offset))
}
// Puts the cells on the same row and computes their sum. Places the resulting
// cell into the 5th column of the `scratch_space` and returns it. Returns
// as many sums as there are chunks of cells.
fn fold_sum(
&self,
chunks: &[Vec<AssignedCell<F, F>>],
region: &mut Region<F>,
config: &TopLevelConfig,
row_offset: usize,
) -> Result<Vec<AssignedCell<F, F>>, Error> {
(0..chunks.len())
.map(|i| {
let size = chunks[i].len();
assert!(size == 2 || size == 4);
let mut sum = Value::known(F::from(0));
// copy the cells onto the same row
for j in 0..size {
chunks[i][j].copy_advice(
|| "",
region,
config.scratch_space[j],
row_offset + i,
)?;
sum = sum + chunks[i][j].value();
}
let assigned_sum =
region.assign_advice(|| "", config.scratch_space[4], row_offset + i, || sum)?;
// activate the gate which performs the actual constraining
if size == 4 {
config.selector_sum4.enable(region, row_offset + i)?;
} else {
config.selector_sum2.enable(region, row_offset + i)?;
}
Ok(assigned_sum)
})
.collect()
}
// Puts two cells on the same row. The second cell is the salt. Left-shifts
// the first cell's value by the size of the salt and adds the salt to it.
// Places the resulting cell into the 5th column of the `scratch_space` and
// returns it.
fn add_salt(
&self,
cell: AssignedCell<F, F>,
salt: AssignedCell<F, F>,
region: &mut Region<F>,
config: &TopLevelConfig,
row_offset: usize,
) -> Result<AssignedCell<F, F>, Error> {
// copy the cells onto the same row
cell.copy_advice(|| "", region, config.scratch_space[0], row_offset)?;
salt.copy_advice(|| "", region, config.scratch_space[1], row_offset)?;
// compute the expected sum and put it into the 5th cell
let two = BigUint::from(2u8);
let pow_2_salt = bigint_to_f(&two.pow(SALT_SIZE as u32));
let sum = cell.value() * Value::known(pow_2_salt) + salt.value();
let assigned_sum =
region.assign_advice(|| "", config.scratch_space[4], row_offset, || sum)?;
// activate the gate which performs the actual constraining
config.selector_add_salt.enable(region, row_offset)?;
Ok(assigned_sum)
}
}
/// The circuit is tested from [super::prover::tests]
#[cfg(test)]
mod tests {}

60
src/halo2_backend/mod.rs Normal file
View File

@@ -0,0 +1,60 @@
mod circuit;
pub mod onetimesetup;
mod poseidon;
pub mod prover;
mod utils;
pub mod verifier;
/// The amount of useful bits, see [crate::prover::Prove::useful_bits].
/// This value is hard-coded into the circuit regardless of whether we use the
/// Pallas curve (field size 255) or the bn254 curve (field size 254).
const USEFUL_BITS: usize = 253;
/// The size of the chunk, see [crate::prover::Prove::chunk_size].
/// We use 14 field elements of 253 bits and 128 bits of the 15th field
/// element: 14 * 253 + 128 == 3670 bits total. The low 125 bits
/// of the last field element will be used for the salt.
const CHUNK_SIZE: usize = 3670;
/// The elliptic curve on which the Poseidon hash will be computed.
pub enum Curve {
Pallas,
BN254,
}
#[cfg(test)]
mod tests {
use super::onetimesetup::OneTimeSetup;
use super::prover::Prover;
use super::verifier::Verifier;
use super::*;
use crate::tests::e2e_test;
/// Run the whole authdecode protocol end-to-end, optionally corrupting the proof
/// if `will_corrupt_proof` is set to true.
fn halo2_e2e_test(will_corrupt_proof: bool) {
// The Prover should have generated the proving key (before the authdecode
// protocol starts) like this:
let proving_key = OneTimeSetup::proving_key();
// The Verifier should have generated the verifying key (before the authdecode
// protocol starts) like this:
let verification_key = OneTimeSetup::verification_key();
let prover = Box::new(Prover::new(proving_key));
let verifier = Box::new(Verifier::new(verification_key, Curve::Pallas));
e2e_test(prover, verifier, will_corrupt_proof);
}
#[test]
/// Tests that the protocol runs successfully
fn halo2_e2e_test_success() {
halo2_e2e_test(false);
}
#[test]
/// Tests that a corrupted proof causes verification to fail
fn halo2_e2e_test_failure() {
halo2_e2e_test(true);
}
}

View File

@@ -0,0 +1,53 @@
use super::circuit::{AuthDecodeCircuit, CELLS_PER_ROW, K, USEFUL_ROWS};
use super::prover::PK;
use super::verifier::VK;
use halo2_proofs::plonk;
use halo2_proofs::poly::commitment::Params;
use pasta_curves::EqAffine;
pub struct OneTimeSetup {}
/// OneTimeSetup generates the proving key and the verification key. It can be
/// run ahead of time before the actual zk proving/verification takes place.
///
/// Note that as of Oct 2022 halo2 does not support serializing the proving/verification
/// keys. That's why we can't use cached keys but need to call this one-time setup every
/// time when we instantiate the halo2 prover/verifier.
impl OneTimeSetup {
/// Returns the verification key for the AuthDecode circuit
pub fn verification_key() -> VK {
let params: Params<EqAffine> = Params::new(K);
// we need an instance of the circuit, the exact inputs don't matter
let circuit = AuthDecodeCircuit::new(
Default::default(),
Default::default(),
[[Default::default(); CELLS_PER_ROW]; USEFUL_ROWS],
);
// safe to unwrap, we are inputting deterministic params and circuit on every
// invocation
let vk = plonk::keygen_vk(&params, &circuit).unwrap();
VK { key: vk, params }
}
/// Returns the proving key for the AuthDecode circuit
pub fn proving_key() -> PK {
let params: Params<EqAffine> = Params::new(K);
// we need an instance of the circuit, the exact inputs don't matter
let circuit = AuthDecodeCircuit::new(
Default::default(),
Default::default(),
[[Default::default(); CELLS_PER_ROW]; USEFUL_ROWS],
);
// safe to unwrap, we are inputting deterministic params and circuit on every
// invocation
let vk = plonk::keygen_vk(&params, &circuit).unwrap();
let pk = plonk::keygen_pk(&params, vk.clone(), &circuit).unwrap();
PK {
key: pk,
params: params,
}
}
}

View File

@@ -0,0 +1,57 @@
use halo2_gadgets::poseidon::primitives::Spec;
use halo2_gadgets::poseidon::Pow5Chip;
use halo2_gadgets::poseidon::Pow5Config;
use halo2_proofs::plonk::ConstraintSystem;
use pasta_curves::pallas::Base as F;
/// Configures the in-circuit Poseidon for rate 15 and returns the config
///
/// Patterned after [halo2_gadgets::poseidon::pow5]
/// (see in that file tests::impl Circuit for HashCircuit::configure())
pub fn configure_poseidon_rate_15<S: Spec<F, 16, 15>>(
rate: usize,
meta: &mut ConstraintSystem<F>,
) -> Pow5Config<F, 16, 15> {
let width = rate + 1;
let state = (0..width).map(|_| meta.advice_column()).collect::<Vec<_>>();
let partial_sbox = meta.advice_column();
let rc_a = (0..width).map(|_| meta.fixed_column()).collect::<Vec<_>>();
let rc_b = (0..width).map(|_| meta.fixed_column()).collect::<Vec<_>>();
meta.enable_constant(rc_b[0]);
Pow5Chip::configure::<S>(
meta,
state.try_into().unwrap(),
partial_sbox,
rc_a.try_into().unwrap(),
rc_b.try_into().unwrap(),
)
}
/// Configures the in-circuit Poseidon for rate 1 and returns the config
///
/// Patterned after [halo2_gadgets::poseidon::pow5]
/// (see in that file tests::impl Circuit for HashCircuit::configure())
pub fn configure_poseidon_rate_1<S: Spec<F, 2, 1>>(
rate: usize,
meta: &mut ConstraintSystem<F>,
) -> Pow5Config<F, 2, 1> {
let width = rate + 1;
let state = (0..width).map(|_| meta.advice_column()).collect::<Vec<_>>();
let partial_sbox = meta.advice_column();
let rc_a = (0..width).map(|_| meta.fixed_column()).collect::<Vec<_>>();
let rc_b = (0..width).map(|_| meta.fixed_column()).collect::<Vec<_>>();
meta.enable_constant(rc_b[0]);
Pow5Chip::configure::<S>(
meta,
state.try_into().unwrap(),
partial_sbox,
rc_a.try_into().unwrap(),
rc_b.try_into().unwrap(),
)
}

View File

@@ -0,0 +1,24 @@
pub(crate) mod circuit_config;
mod rate15_params;
mod rate1_params;
pub(crate) mod spec;
use halo2_gadgets::poseidon::primitives::{self as poseidon, ConstantLength};
use pasta_curves::pallas::Base as F;
use spec::{Spec1, Spec15};
/// Hashes inputs with rate 15 Poseidon and returns the digest
///
/// Patterned after [halo2_gadgets::poseidon::pow5]
/// (see in that file tests::poseidon_hash())
pub fn poseidon_15(field_elements: &[F; 15]) -> F {
poseidon::Hash::<F, Spec15, ConstantLength<15>, 16, 15>::init().hash(*field_elements)
}
/// Hashes inputs with rate 1 Poseidon and returns the digest
///
/// Patterned after [halo2_gadgets::poseidon::pow5]
/// (see in that file tests::poseidon_hash())
pub fn poseidon_1(field_elements: &[F; 1]) -> F {
poseidon::Hash::<F, Spec1, ConstantLength<1>, 2, 1>::init().hash(*field_elements)
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,988 @@
//! Parameters for using rate 1 Poseidon with the Pallas field.
//! Patterned after [halo2_gadgets::poseidon::primitives::fp]
//!
//! The parameteres can be reproduced by running the following Sage script from
//! [this repository](https://github.com/daira/pasta-hadeshash):
//!
//! ```text
//! $ sage generate_parameters_grain.sage 1 0 255 2 8 56 0x40000000000000000000000000000000224698fc094cf91b992d30ed00000001 --rust
//! ```
//!
//! where 1 means "prime field", 0 means "non-negative sbox", 255 is the bitsize
//! of the field, 2 is the Poseidon width (rate + 1), 8 is the number of full
//! rounds, 56 is the number of partial rounds.
use pasta_curves::pallas;
//Number of round constants: 128
//Round constants for GF(p):
pub(crate) const ROUND_CONSTANTS: [[pallas::Base; 2]; 64] = [
[
pallas::Base::from_raw([
0x5cf8_7cc9_e030_f60f,
0xf4ab_5c10_a2c5_5adb,
0x8511_8166_c68b_f0c4,
0x3034_7723_5114_38a0,
]),
pallas::Base::from_raw([
0x26cf_86d0_a045_1a4b,
0x439f_1a05_1aa4_630c,
0xeb64_2730_3181_e7b7,
0x10db_8569_65e4_0038,
]),
],
[
pallas::Base::from_raw([
0x3efe_b3a3_2039_8526,
0x905f_9cee_e38d_cce8,
0x1dd0_487b_c9fa_b1c6,
0x3b07_f0ff_7edc_f93b,
]),
pallas::Base::from_raw([
0x672b_68ab_fb91_91e0,
0x1630_7879_2887_5c07,
0x76d9_4dbb_24b0_f3c6,
0x3a93_3861_cf23_7523,
]),
],
[
pallas::Base::from_raw([
0x6dc7_a3fd_779b_3906,
0x86a5_4c9d_60d3_5eb0,
0x8696_7b11_d5dc_efb9,
0x0af5_bafd_335d_ae5c,
]),
pallas::Base::from_raw([
0xb35f_d80f_0f80_de9e,
0xe5be_96c3_aec1_5579,
0xc40e_d1f4_f9bd_7d17,
0x16bb_e434_b24f_94e2,
]),
],
[
pallas::Base::from_raw([
0x2cf4_ddd5_4154_6890,
0x2c3d_7dd9_e271_6c3a,
0xcab0_1ce9_81d4_7552,
0x0d1b_e811_a8e7_3220,
]),
pallas::Base::from_raw([
0xd550_13d3_2124_4f13,
0x3f90_6e10_6f84_4196,
0xdc48_c3a2_26db_da31,
0x06a8_b453_9488_b7dd,
]),
],
[
pallas::Base::from_raw([
0x0f06_3d16_bef9_c43a,
0xc14c_d635_5415_df6e,
0xf414_e901_802d_54f8,
0x0d27_03e5_b30f_54d7,
]),
pallas::Base::from_raw([
0xe4c8_56c1_ca3d_e9c7,
0xb82a_2457_4f6d_13ee,
0x23a4_a445_7781_9b7d,
0x2730_c607_bba7_3337,
]),
],
[
pallas::Base::from_raw([
0x1056_f227_b514_b9f6,
0x3fba_b53e_b1a8_3f55,
0xdc10_43c5_3b23_800a,
0x01ac_5f59_256c_5004,
]),
pallas::Base::from_raw([
0x44ba_b5ba_81ae_42f3,
0x6d89_481e_7a56_328b,
0xc95b_9493_7afb_b579,
0x0790_b925_23c9_73f1,
]),
],
[
pallas::Base::from_raw([
0x2484_e266_142b_b8c0,
0xcc74_9bc8_3eb2_f037,
0x1396_4fb3_e877_1d0a,
0x1d63_b59d_97bc_269d,
]),
pallas::Base::from_raw([
0x252d_2140_efdf_0914,
0x77fd_f7e2_bfb5_6c8b,
0x5f7a_0126_2df9_e68c,
0x1a52_d04e_5f14_a3a0,
]),
],
[
pallas::Base::from_raw([
0x2583_ac4e_9894_305c,
0x77dd_24ec_bfb2_8e1b,
0x3959_d162_80a3_61aa,
0x333b_0eea_5da7_ed1e,
]),
pallas::Base::from_raw([
0x648c_5ce7_ebd9_df18,
0xc374_2cb7_45ec_8821,
0xc0c5_bb3a_e26b_077d,
0x3b50_3fc3_33b7_95cc,
]),
],
[
pallas::Base::from_raw([
0xcda9_33d6_ae52_eca7,
0x9eb0_6801_6ae8_8af9,
0x2648_52f0_75c7_8f7f,
0x2d83_0a53_584c_5556,
]),
pallas::Base::from_raw([
0x276c_26f1_a2b1_b86e,
0xccc7_c81e_3add_37db,
0xe60e_55f1_35b9_ac80,
0x0250_f4d6_780a_d29a,
]),
],
[
pallas::Base::from_raw([
0x4c4d_8962_33b9_3cd5,
0x97eb_e98e_3a45_0a62,
0x3ee9_0cdb_884f_1c46,
0x2cd3_3145_55d6_faf2,
]),
pallas::Base::from_raw([
0xbad5_026b_46ec_cb27,
0xc8c3_2bdb_01ba_11ab,
0x1d60_edb9_9c0a_eda7,
0x2e1c_c28e_390e_64aa,
]),
],
[
pallas::Base::from_raw([
0xcb3c_96c7_0024_f71b,
0x1e73_4a70_08ae_09bb,
0x276b_eac4_08fe_2e85,
0x2d48_2000_0675_df7c,
]),
pallas::Base::from_raw([
0x8385_b5fa_19e9_7142,
0x266f_7636_e05b_0c06,
0x8b90_2c6b_2dc9_92cb,
0x0c2f_e101_a2b5_2b53,
]),
],
[
pallas::Base::from_raw([
0x5785_5153_5002_bc95,
0x87fc_7bd4_f21a_9e02,
0x7c6a_178e_f2f0_0b86,
0x209b_790b_78c0_e792,
]),
pallas::Base::from_raw([
0x4bd4_222b_93e1_4c10,
0xf94a_9108_52a7_b4ea,
0xc649_1513_d08a_9983,
0x2dd0_926c_f56b_baae,
]),
],
[
pallas::Base::from_raw([
0x4571_827e_74bf_d2d3,
0x8a4c_cc0f_6494_cc05,
0x7b14_6324_c847_72c5,
0x37af_6129_00b8_3997,
]),
pallas::Base::from_raw([
0xce11_d065_c195_5338,
0x371e_8322_8e42_67bb,
0x9911_e5cb_3781_d772,
0x2af0_0c93_d59e_d14c,
]),
],
[
pallas::Base::from_raw([
0x0446_f221_8017_874b,
0x1f67_f71e_9b87_a4a3,
0x2cb1_0ecb_e50b_5349,
0x232b_6c84_7a6d_2391,
]),
pallas::Base::from_raw([
0x629e_faa0_e964_56c0,
0xeea7_4b3f_0b66_644a,
0x57a3_70e4_fd62_6071,
0x0ab3_4adb_e77b_8f1e,
]),
],
[
pallas::Base::from_raw([
0x9147_cbc5_b708_4be8,
0xcab6_3280_7c2c_d0dc,
0x46b1_bdbe_ab8d_d5cd,
0x1a83_e43e_f118_c900,
]),
pallas::Base::from_raw([
0xa595_d4dc_6c56_f2f9,
0xb642_5674_62a3_d557,
0x9005_4687_2091_8130,
0x1ec6_fa41_b41b_672d,
]),
],
[
pallas::Base::from_raw([
0x864f_8e57_13fe_4a17,
0xf82a_acf0_287d_03e1,
0xa111_b8f5_212c_fc5b,
0x01f8_1a15_3199_a751,
]),
pallas::Base::from_raw([
0x7bcc_a4de_2c6f_0649,
0x2d75_1cab_f3d9_fd00,
0xecd7_3a54_a7b2_0616,
0x2617_3075_87a6_75f4,
]),
],
[
pallas::Base::from_raw([
0x7e36_2d7f_98bc_953f,
0xb1ae_6e2d_4e16_8208,
0x7974_a245_624b_642b,
0x1647_be94_c515_178c,
]),
pallas::Base::from_raw([
0xda7e_f9d1_840b_0419,
0xb5c0_bc2a_ab1c_7b46,
0x75fc_370e_aa8c_1b77,
0x32bf_3d45_1b69_dde0,
]),
],
[
pallas::Base::from_raw([
0x7a89_a58a_2dc4_0097,
0x9252_44fd_d0aa_184c,
0x77e9_6e91_52b3_6042,
0x17cb_b3ee_0adc_b9d9,
]),
pallas::Base::from_raw([
0x8ace_050d_5537_66a3,
0x956e_52b8_dac3_6ef8,
0x5c55_2f6b_5977_ab94,
0x29d7_6a82_1e32_2077,
]),
],
[
pallas::Base::from_raw([
0xc6ed_a903_5d04_334d,
0xa99f_dc45_24fc_2810,
0xa693_a51a_e822_7cc0,
0x0dfc_4741_51e5_c605,
]),
pallas::Base::from_raw([
0xaec9_d366_a91b_1d98,
0xeeda_fb47_5574_46a9,
0x2ed4_65cd_ab37_a7b2,
0x3e28_7088_506b_b38d,
]),
],
[
pallas::Base::from_raw([
0xaff2_c913_cf58_974a,
0x8c56_f3cd_3994_0030,
0x673c_9eff_c245_7879,
0x2641_d291_9d0b_ed24,
]),
pallas::Base::from_raw([
0x03c5_e74d_0a66_3cf9,
0x01a5_45ec_b1c6_a374,
0x7c84_d93b_a935_9da9,
0x286c_1f84_1a05_bb66,
]),
],
[
pallas::Base::from_raw([
0x4469_7b49_b102_5df5,
0xeae1_0e7c_312a_eb8a,
0x4b44_16fc_8231_7a4d,
0x0b8d_4686_1838_7249,
]),
pallas::Base::from_raw([
0x028c_4f55_d4f4_f0ed,
0xcf77_9baf_f745_7cce,
0x3f05_7c10_46bb_04de,
0x0e2f_4df2_de65_0455,
]),
],
[
pallas::Base::from_raw([
0x29ea_d0b3_e70a_eba4,
0xe5a3_5810_b854_0228,
0xd1c7_7259_d976_6437,
0x1694_268c_cf0c_f400,
]),
pallas::Base::from_raw([
0x54b5_375e_7f46_698b,
0x26c2_8910_a099_4e08,
0x38c9_c08c_36ee_90b6,
0x3c96_c51a_30dc_76af,
]),
],
[
pallas::Base::from_raw([
0x4bc0_616e_f5b3_dddb,
0x07d1_928b_efec_6a43,
0x7f6f_2631_f93c_b33d,
0x2107_7e81_fe5a_0bde,
]),
pallas::Base::from_raw([
0x9c94_76e7_a380_fb90,
0x74a8_0c75_0418_6d3c,
0x3ea2_1f84_ea42_ba33,
0x11ae_b10c_549b_c49d,
]),
],
[
pallas::Base::from_raw([
0x41e0_6470_4a02_4904,
0x2493_7f94_bd97_4919,
0x3e3b_a071_6b5d_e50a,
0x0e9b_a2ef_bd17_7a7c,
]),
pallas::Base::from_raw([
0x92d8_93f4_5788_e9d6,
0x8fbd_01e3_e5c2_2f41,
0xe3c5_0c2d_9abc_405d,
0x1dda_541f_2f74_4aae,
]),
],
[
pallas::Base::from_raw([
0xb656_a2e2_8db0_97b3,
0x0c26_6d9c_002c_5d77,
0x9fbd_8cf2_9049_439a,
0x2ec1_165c_af7f_d158,
]),
pallas::Base::from_raw([
0x9443_f95d_c235_66ec,
0x575e_4b7a_e66c_de54,
0xe0bb_bfb8_8241_9c31,
0x1f8c_c6e5_61bd_e673,
]),
],
[
pallas::Base::from_raw([
0x9efb_000e_0bcf_bdd0,
0xa494_546e_a8ef_e718,
0x6bca_37a8_8295_daeb,
0x1205_704b_f8e9_5ba2,
]),
pallas::Base::from_raw([
0x5408_36f0_6eb4_9d38,
0x3d21_df81_eee9_35ec,
0xfbbd_4ef8_0645_d247,
0x016c_deeb_db6f_8b37,
]),
],
[
pallas::Base::from_raw([
0x1849_23bd_fa0c_ae33,
0x7b5c_a43c_7809_5f14,
0xadd6_f220_ff49_38fe,
0x19b8_a2f4_a640_3c98,
]),
pallas::Base::from_raw([
0xc25d_67bb_017e_26d5,
0x6bf9_31d5_efb8_ce59,
0x694e_e0a2_7a4e_c9fb,
0x1935_8aab_5822_facc,
]),
],
[
pallas::Base::from_raw([
0xfe24_e764_a2e7_affe,
0xc985_3f81_1188_cd44,
0xbee1_4572_b696_7720,
0x0c7a_80ba_8f73_72a2,
]),
pallas::Base::from_raw([
0x21cc_1e42_8f9b_48c4,
0x09b8_7f7f_cffc_3173,
0x6f42_058e_3f87_1ac1,
0x3894_fe11_3139_129d,
]),
],
[
pallas::Base::from_raw([
0x2357_40d5_fb40_6ab9,
0x8e94_f5e3_f96a_177c,
0xf0e6_2be6_01da_8cad,
0x1aef_c078_5946_d65e,
]),
pallas::Base::from_raw([
0xe7d1_e06a_4e16_26b3,
0x4028_8c36_554e_e106,
0xe545_ef54_3ac1_b166,
0x0af1_c4fc_0b49_030f,
]),
],
[
pallas::Base::from_raw([
0x7908_7df4_6601_54f4,
0xa4e1_62e0_0d3b_a70d,
0xe023_3ca1_68ea_6c21,
0x1318_aac0_f0ef_72e5,
]),
pallas::Base::from_raw([
0x4e49_b90c_d73e_3d05,
0x2321_7247_ece5_e051,
0x2c80_a4dd_8308_3db0,
0x0059_83d5_e4d5_d48c,
]),
],
[
pallas::Base::from_raw([
0x34d5_9af5_2a6a_d067,
0x7463_56c6_821f_f037,
0xc539_6532_1371_73d6,
0x39c9_3d11_9626_20d7,
]),
pallas::Base::from_raw([
0x19e5_f680_bed7_5fa7,
0xa2bc_7bec_09db_ce13,
0x83fc_c7d2_ccb1_5127,
0x2475_bc5a_cc01_2b80,
]),
],
[
pallas::Base::from_raw([
0xddbe_de53_4265_c995,
0x8ac2_9094_27f0_a608,
0x28df_8eb9_c2cb_17ed,
0x0bb4_42eb_a5ba_f8e7,
]),
pallas::Base::from_raw([
0x9881_ad92_2720_4ecd,
0x841a_94be_0277_02ea,
0xa884_fe33_f287_ea54,
0x3d12_0149_595e_0c7f,
]),
],
[
pallas::Base::from_raw([
0xf22e_8703_01d0_51e0,
0xf25c_44be_406a_90de,
0x1df4_bf4f_5f1d_a532,
0x35e4_7349_a427_ba8b,
]),
pallas::Base::from_raw([
0xd69d_4e1f_d34a_bcec,
0xf36d_c3e0_7eb1_df7f,
0x5259_71b5_7384_1109,
0x19ca_f5fb_bc0a_1424,
]),
],
[
pallas::Base::from_raw([
0x5de1_1ea8_1912_3651,
0xd5da_fe9f_ce2c_807f,
0xc0b9_5360_e3d9_92b0,
0x06ab_7adf_0f99_7bad,
]),
pallas::Base::from_raw([
0xe665_fbe8_3d63_cdd2,
0x2b31_a8a4_4178_a0d6,
0xca45_cc1d_a7d8_ca5e,
0x0186_5aba_8ec3_d89a,
]),
],
[
pallas::Base::from_raw([
0x7af1_c256_0afc_4ed9,
0xfbe8_0c8e_0602_d8ea,
0xa34e_9566_471d_b96e,
0x221d_9df3_3836_d262,
]),
pallas::Base::from_raw([
0x4cf7_6c60_32d1_59af,
0xef24_ec3d_5ba0_0096,
0xe095_85d7_4c63_7be4,
0x0f12_e59c_448a_18ba,
]),
],
[
pallas::Base::from_raw([
0x56b5_fcb9_3ecb_7a27,
0x120f_4e68_4359_0ed1,
0x40df_d9aa_95fa_8b5c,
0x1037_6fa0_adb3_a505,
]),
pallas::Base::from_raw([
0xa752_9a98_0458_eac8,
0x7e54_0902_ada5_f8d8,
0x0ed8_3817_99fa_51e2,
0x0315_97a4_9ea8_90a5,
]),
],
[
pallas::Base::from_raw([
0x04ea_6707_cf15_59fd,
0x1344_80ab_e2cd_a79b,
0xa78a_948a_86d8_9ab4,
0x0c25_825f_1d3b_b30c,
]),
pallas::Base::from_raw([
0x09ed_59b3_aa79_984c,
0xa992_4e26_f425_b9dd,
0xe6dd_0693_439a_500b,
0x22aa_9b1c_a14e_a000,
]),
],
[
pallas::Base::from_raw([
0xbf92_096d_8b0d_8ce5,
0x32a1_e8b7_2ef6_35fa,
0xda4c_f647_fd75_c45b,
0x2073_3231_fc2b_0ff1,
]),
pallas::Base::from_raw([
0x8b38_131b_edcb_d3e9,
0xc18d_38b2_44f6_75e2,
0x6f92_489d_4b7f_0d1d,
0x0099_8747_8146_99bc,
]),
],
[
pallas::Base::from_raw([
0x175d_c968_5651_53f8,
0xf8f0_cb81_a431_beea,
0x4018_a3aa_9ba8_d6b6,
0x056a_f47f_0208_cf52,
]),
pallas::Base::from_raw([
0x2774_0b9c_38a0_332c,
0x3f72_748d_1578_393f,
0x42ef_8477_1ee2_cc93,
0x1e97_1384_8edf_3386,
]),
],
[
pallas::Base::from_raw([
0x3ff6_43e5_75a2_eaca,
0x564b_0ea4_af44_619c,
0xcdde_4e4e_bab5_2ae8,
0x363c_3aed_da55_0421,
]),
pallas::Base::from_raw([
0x9c26_5808_f0e8_db75,
0x92eb_9c09_04d6_75d7,
0xc442_9b9d_7735_f4e1,
0x389c_6cf1_5e1d_5e2c,
]),
],
[
pallas::Base::from_raw([
0xe219_65a7_b5b4_de53,
0x35b7_d6f8_7894_02f4,
0x79ab_87fb_8061_ad65,
0x11e4_dc42_1276_2355,
]),
pallas::Base::from_raw([
0x82f8_ac7e_99eb_2dfb,
0x1d15_a478_4263_a5fe,
0x5fec_2e21_5963_c619,
0x3e1a_3d14_beb1_d015,
]),
],
[
pallas::Base::from_raw([
0x5c14_091a_b6d1_9b34,
0x8dba_ccf6_4f08_8e2f,
0x3867_fba2_7a80_a2b8,
0x16bf_2023_58c8_1b94,
]),
pallas::Base::from_raw([
0xd636_c91e_4e8e_1114,
0xf730_2fff_34e8_2ec0,
0xe035_3f99_5bf5_4afc,
0x36e0_11b9_3cee_7996,
]),
],
[
pallas::Base::from_raw([
0x9311_4b69_0fc0_60b6,
0xdac2_46ea_9b16_5dc1,
0x6f8e_d030_de4f_6cbe,
0x1668_16fa_35c1_44fa,
]),
pallas::Base::from_raw([
0x63b1_bb14_3c69_5b59,
0x93a0_45b8_aea6_1110,
0x9d1b_9d98_9e1e_193c,
0x38b9_8a24_31cc_b829,
]),
],
[
pallas::Base::from_raw([
0xebce_3e78_525f_067f,
0xa249_1c88_5e5c_390c,
0x41ef_a273_f1f0_c378,
0x0424_05a4_17d7_0cb0,
]),
pallas::Base::from_raw([
0x0189_ac12_380e_4f31,
0xa90c_8e62_2f1e_d0f0,
0x6f59_7d07_d10e_80bf,
0x016b_3608_8fe6_d24d,
]),
],
[
pallas::Base::from_raw([
0x9cc3_c0fb_38ac_7528,
0x8b06_03ad_5836_8b1a,
0x1f85_c6a8_1519_157a,
0x1348_c563_bde6_0444,
]),
pallas::Base::from_raw([
0x7816_7f02_71df_e597,
0xb5f1_ac10_a621_5063,
0xf6d2_0690_7131_328e,
0x1cdf_fdc7_9e5e_42b6,
]),
],
[
pallas::Base::from_raw([
0x4c8b_bdaf_a419_00d7,
0x2543_ead7_4aa2_9aa2,
0x078e_5de0_b51b_6c61,
0x3c8d_1b23_53e9_bca6,
]),
pallas::Base::from_raw([
0xb6f7_27d8_72f8_dc51,
0x0d16_284b_bcfc_80f7,
0x7815_f401_74df_e809,
0x2f48_008f_3de7_aec3,
]),
],
[
pallas::Base::from_raw([
0xbd23_e16f_c24d_f4cb,
0x8a58_09aa_8b46_f65e,
0x56c2_e65a_0a3e_e0cd,
0x1e93_a47f_81d7_c850,
]),
pallas::Base::from_raw([
0x7612_d632_1b2c_e006,
0x6a93_8b75_4a6e_671f,
0xd5ab_e704_6dd4_ce00,
0x0fc7_5ffc_8126_32a1,
]),
],
[
pallas::Base::from_raw([
0x9d52_ac72_b760_489f,
0xe9e0_5a39_9ca1_5adb,
0xdffd_5dcb_e4dd_e2de,
0x3f00_a54a_ef2f_dc28,
]),
pallas::Base::from_raw([
0x96cc_9cf2_9557_761a,
0x0d29_c179_45ee_4b11,
0xe911_5da2_a5c6_61c8,
0x0e2e_602d_44f4_3a0d,
]),
],
[
pallas::Base::from_raw([
0xc753_50fe_25b3_d967,
0x41b2_bd13_8ea7_bd31,
0x4688_7aeb_9207_72c8,
0x071c_7ce9_1d7c_50c8,
]),
pallas::Base::from_raw([
0xe5cb_f146_f526_500a,
0x0a97_6ff4_9cea_6cde,
0x0506_ae41_8cab_4be9,
0x2e19_dd91_019b_f563,
]),
],
[
pallas::Base::from_raw([
0x05c6_87a9_3a84_f105,
0x593b_5b3b_276c_f400,
0xbe60_beae_a7b8_6bea,
0x3a93_ede7_f5f9_1e9f,
]),
pallas::Base::from_raw([
0x77e3_2d6a_2c33_35c5,
0x826a_0042_2185_1b6d,
0xad3e_d4d8_e514_158d,
0x3e55_66cb_cc3e_563a,
]),
],
[
pallas::Base::from_raw([
0x46a4_99a9_3283_7fc0,
0x6cc9_5bf8_1f7a_f09e,
0xcc05_4759_423f_f910,
0x0f1e_5a15_cf83_3246,
]),
pallas::Base::from_raw([
0x42cc_099a_d8f5_fd92,
0x95a8_2a2e_18ab_de4d,
0x14be_8077_2e2d_3552,
0x1be9_9e07_fcaf_31cf,
]),
],
[
pallas::Base::from_raw([
0x58bc_c387_c83e_b5be,
0x7595_883f_bb56_7e67,
0x10de_ee41_e1a3_6a4a,
0x027b_b458_8733_32d0,
]),
pallas::Base::from_raw([
0x5bb5_3bba_1843_65b7,
0x2a53_545c_d431_6ff9,
0x974a_f96a_9d25_054e,
0x1780_0175_167f_67b5,
]),
],
[
pallas::Base::from_raw([
0x529f_a59e_9d06_7714,
0x0ebd_9700_7dba_7c5c,
0xe750_08a0_e839_997e,
0x1a4a_af44_46da_14d6,
]),
pallas::Base::from_raw([
0xf049_c2a2_6532_0e67,
0x2f8e_05aa_39f1_a561,
0xc635_1cef_8aa7_06c2,
0x3360_5fea_8d5f_da8b,
]),
],
[
pallas::Base::from_raw([
0x9461_d08e_fa9a_05ad,
0x10f3_04d8_6ec4_5447,
0xfb65_19ef_d638_b0b3,
0x213a_93a7_89ef_3805,
]),
pallas::Base::from_raw([
0x71ab_7e2b_4ee5_0d79,
0xd151_135d_c0e5_9e2c,
0xf307_5bd1_01b9_bcb9,
0x043f_c5a4_9dbd_9775,
]),
],
[
pallas::Base::from_raw([
0x5a45_7d44_d59f_4040,
0x7d17_0d59_41ca_7f94,
0xb918_7158_6da5_4341,
0x04ee_6ac7_a590_7e3d,
]),
pallas::Base::from_raw([
0x35d7_312b_215f_a5ca,
0x31ee_c979_fa44_f565,
0x210a_dc03_67de_b08c,
0x2108_016d_b6f8_3a9f,
]),
],
[
pallas::Base::from_raw([
0xf8fd_b6b2_484e_50af,
0xf1d8_2711_2189_9731,
0x1b9b_9677_ee20_7986,
0x22f3_c7ca_c770_1833,
]),
pallas::Base::from_raw([
0xc9bd_973d_d1a2_7149,
0xff8d_cda9_cc87_a542,
0xbf3a_3c0d_52ba_3037,
0x1de8_3ef3_1303_508e,
]),
],
[
pallas::Base::from_raw([
0x1674_b50e_d2c6_94bc,
0x589a_080c_5adb_964d,
0x2549_38ed_849f_15fb,
0x1756_5a85_cdb3_8d82,
]),
pallas::Base::from_raw([
0xd2bd_506d_5aff_d505,
0xce50_6ce6_27bc_f11c,
0xb9cf_eb06_7546_68fc,
0x2851_1e30_efb0_5ee2,
]),
],
[
pallas::Base::from_raw([
0x1f84_4e8b_6810_1ab6,
0xa12d_a729_cac2_68ed,
0xb7b0_7bc5_8188_a2f2,
0x3e11_73f5_0bb7_1198,
]),
pallas::Base::from_raw([
0xdc4a_c560_c1c5_e404,
0x8e5e_c2d3_b8df_3f4c,
0xd81c_f97d_543d_c2e5,
0x0c9a_7e5b_4a80_fac8,
]),
],
[
pallas::Base::from_raw([
0x7bb3_d73a_053d_35f4,
0xbb2d_522b_1320_010f,
0x9b99_2d31_e948_c141,
0x0d5c_1dab_03ea_c50e,
]),
pallas::Base::from_raw([
0x7b9c_eb8e_30d5_37a1,
0x05d1_3b4e_8073_9aba,
0x4a13_85e3_d215_30be,
0x0266_2562_9b3d_63b9,
]),
],
[
pallas::Base::from_raw([
0x0ef1_9920_b750_e03b,
0x23e0_bde9_5ccd_fe9f,
0x343f_5bd4_310f_3216,
0x26c9_6b01_477e_1d22,
]),
pallas::Base::from_raw([
0xcd3b_4278_cb50_6936,
0x00a2_f851_4f4f_51f7,
0x8f4d_d361_313b_7b7f,
0x3d3f_9ed7_c700_089e,
]),
],
[
pallas::Base::from_raw([
0xd014_e1d1_fb24_b787,
0x0fbc_92bc_99b5_bcc0,
0xe35e_af39_bda8_a613,
0x19bd_d3fa_dba9_a54a,
]),
pallas::Base::from_raw([
0xcd6e_afba_c8a2_4b2c,
0xc5dc_91a0_3ed8_62f2,
0x564f_9a28_2f61_9d2b,
0x3f76_b39a_fd07_f198,
]),
],
[
pallas::Base::from_raw([
0x170d_6565_f0ae_f5b7,
0x2883_f3e3_068d_ef26,
0x2e24_da4e_bcf6_04ed,
0x2d86_297c_57d5_27a0,
]),
pallas::Base::from_raw([
0x97bd_3ee5_ff4f_59c3,
0x6262_65eb_2784_c166,
0x5d7a_54e8_bf64_0d25,
0x2d92_06ad_bc21_57f8,
]),
],
[
pallas::Base::from_raw([
0x78e9_bf79_a067_5a2a,
0xe6c5_5ced_7877_cb31,
0xff37_e59b_0924_d681,
0x0077_a733_6336_672f,
]),
pallas::Base::from_raw([
0xe81c_3f70_2f68_ed3f,
0x5ddd_adaa_bae8_b75e,
0xc3a0_7302_befe_0553,
0x0794_b785_e1aa_1bb5,
]),
],
[
pallas::Base::from_raw([
0x8db4_3423_a1a6_b1fd,
0xc433_233a_59ca_974e,
0x100d_f8c7_67ca_7b59,
0x3666_ea9a_17cc_da18,
]),
pallas::Base::from_raw([
0x5af0_b46b_d02b_2fc8,
0x2708_c20e_d75a_a58f,
0xbfa3_8363_cf23_f6ff,
0x0c01_953d_5c46_d4cd,
]),
],
];
// n: 255
// t: 2
// N: 510
// Result Algorithm 1:
// [True, 0]
// Result Algorithm 2:
// [True, None]
// Result Algorithm 3:
// [True, None]
// Prime number: 0x0x40000000000000000000000000000000224698fc094cf91b992d30ed00000001
// MDS matrix:
pub(crate) const MDS: [[pallas::Base; 2]; 2] = [
[
pallas::Base::from_raw([
0x215e_2650_42d5_6b39,
0x1f4a_f3f9_3a77_ffc7,
0xfa10_b63d_b5c2_9fb0,
0x0456_8631_47b9_cd1e,
]),
pallas::Base::from_raw([
0x9370_d180_77b1_ccb5,
0x16bb_48ed_9804_2ec6,
0xc74f_1f6f_6d1e_5f62,
0x196e_0094_d3d3_50a0,
]),
],
[
pallas::Base::from_raw([
0x558d_1c05_176c_f008,
0xb0af_dcfb_54f2_f175,
0x254a_38d2_505c_4bb9,
0x1f89_e907_a33b_5e7d,
]),
pallas::Base::from_raw([
0xe38f_4cda_3d6e_f58b,
0x491c_7b9e_835b_61d1,
0x4bed_03c4_ba03_a32d,
0x23c4_24f6_216e_138b,
]),
],
];
//Inverse MDS matrix:
pub(crate) const MDS_INV: [[pallas::Base; 2]; 2] = [
[
pallas::Base::from_raw([
0x2886_adc2_ffb9_1eec,
0x9a1b_bd56_b0fb_9105,
0x7e23_4946_cbd2_e9b1,
0x1fc3_4a10_a23e_5dfb,
]),
pallas::Base::from_raw([
0xd9f8_aca0_f3bf_3152,
0xaa0b_d707_acdc_5df5,
0x3184_7088_464f_0a36,
0x107b_ef1a_7b48_acb2,
]),
],
[
pallas::Base::from_raw([
0x80c1_6662_05bd_6073,
0xa6e0_3466_cecb_9ae3,
0xe782_e3f6_3f6e_4373,
0x05d7_727a_7aa3_8f58,
]),
pallas::Base::from_raw([
0x6073_5efd_fc0e_9579,
0xf63c_9282_5200_e45b,
0xfd2a_af9c_97f9_c29e,
0x2fda_d27b_cacc_efe5,
]),
],
];

View File

@@ -0,0 +1,79 @@
use super::{rate15_params, rate1_params};
use group::ff::Field;
use halo2_gadgets::poseidon::primitives::Spec;
use pasta_curves::pallas::Base as F;
/// The type used to hold the MDS matrix and its inverse.
pub(crate) type Mds<F, const T: usize> = [[F; T]; T];
/// Spec for rate 15 Poseidon. halo2 uses this spec both inside
/// the zk circuit and in the clear.
///
/// Compare it to the spec which zcash uses:
/// [halo2_gadgets::poseidon::primitives::P128Pow5T3]
#[derive(Debug)]
pub struct Spec15;
impl Spec<F, 16, 15> for Spec15 {
fn full_rounds() -> usize {
8
}
fn partial_rounds() -> usize {
// Taken from https://github.com/iden3/circomlib/blob/master/circuits/poseidon.circom
// (see "var N_ROUNDS_P[16]"), where they use 64 partial rounds for 15-rate Poseidon
64
}
fn sbox(val: F) -> F {
val.pow_vartime(&[5])
}
fn secure_mds() -> usize {
unimplemented!()
}
fn constants() -> (Vec<[F; 16]>, Mds<F, 16>, Mds<F, 16>) {
(
rate15_params::ROUND_CONSTANTS[..].to_vec(),
rate15_params::MDS,
rate15_params::MDS_INV,
)
}
}
/// Spec for rate 1 Poseidon which halo2 uses both inside
/// the zk circuit and in the clear.
///
/// Compare it to the spec which zcash uses:
/// [halo2_gadgets::poseidon::primitives::P128Pow5T3]
#[derive(Debug)]
pub struct Spec1;
impl Spec<F, 2, 1> for Spec1 {
fn full_rounds() -> usize {
8
}
fn partial_rounds() -> usize {
// Taken from https://github.com/iden3/circomlib/blob/master/circuits/poseidon.circom
// (see "var N_ROUNDS_P[16]"), where they use 56 partial rounds for 1-rate Poseidon
56
}
fn sbox(val: F) -> F {
val.pow_vartime(&[5])
}
fn secure_mds() -> usize {
unimplemented!()
}
fn constants() -> (Vec<[F; 2]>, Mds<F, 2>, Mds<F, 2>) {
(
rate1_params::ROUND_CONSTANTS[..].to_vec(),
rate1_params::MDS,
rate1_params::MDS_INV,
)
}
}

349
src/halo2_backend/prover.rs Normal file
View File

@@ -0,0 +1,349 @@
use super::circuit::{AuthDecodeCircuit, SALT_SIZE, TOTAL_FIELD_ELEMENTS};
use super::poseidon::{poseidon_1, poseidon_15};
use super::utils::{bigint_to_f, deltas_to_matrices, f_to_bigint};
use super::{CHUNK_SIZE, USEFUL_BITS};
use crate::prover::{ProofInput, Prove, ProverError};
use halo2_proofs::plonk;
use halo2_proofs::plonk::ProvingKey;
use halo2_proofs::poly::commitment::Params;
use halo2_proofs::transcript::{Blake2bWrite, Challenge255};
use instant::Instant;
use num::BigUint;
use pasta_curves::pallas::Base as F;
use pasta_curves::EqAffine;
use rand::thread_rng;
/// halo2's native ProvingKey can't be used without params, so we wrap
/// them in one struct.
#[derive(Clone)]
pub struct PK {
pub key: ProvingKey<EqAffine>,
pub params: Params<EqAffine>,
}
/// Implements the Prover in the authdecode protocol using halo2
/// proof system.
pub struct Prover {
proving_key: PK,
}
impl Prove for Prover {
fn prove(&self, input: ProofInput) -> Result<Vec<u8>, ProverError> {
if input.deltas.len() != self.chunk_size() || input.plaintext.len() != TOTAL_FIELD_ELEMENTS
{
// this can only be caused by an error in
// `crate::prover::AuthDecodeProver` logic
return Err(ProverError::InternalError);
}
// convert into matrices
let (deltas_as_rows, deltas_as_columns) =
deltas_to_matrices(&input.deltas, self.useful_bits());
// convert plaintext into F type
let plaintext: [F; TOTAL_FIELD_ELEMENTS] = input
.plaintext
.iter()
.map(|bigint| bigint_to_f(bigint))
.collect::<Vec<_>>()
.try_into()
.unwrap();
// arrange into the format which halo2 expects
let mut all_inputs: Vec<&[F]> = deltas_as_columns.iter().map(|v| v.as_slice()).collect();
// add another column with public inputs
let tmp = &[
bigint_to_f(&input.plaintext_hash),
bigint_to_f(&input.label_sum_hash),
bigint_to_f(&input.sum_of_zero_labels),
];
all_inputs.push(tmp);
let now = Instant::now();
// prepare the proving system and generate the proof:
let circuit =
AuthDecodeCircuit::new(plaintext, bigint_to_f(&input.salt), deltas_as_rows.into());
let params = &self.proving_key.params;
let pk = &self.proving_key.key;
let mut transcript = Blake2bWrite::<_, _, Challenge255<_>>::init(vec![]);
let mut rng = thread_rng();
let res = plonk::create_proof(
params,
pk,
&[circuit],
&[all_inputs.as_slice()],
&mut rng,
&mut transcript,
);
if res.is_err() {
return Err(ProverError::ProvingBackendError);
}
println!("Proof created [{:?}]", now.elapsed());
let proof = transcript.finalize();
println!("Proof size [{} kB]", proof.len() as f64 / 1024.0);
Ok(proof)
}
fn useful_bits(&self) -> usize {
USEFUL_BITS
}
fn poseidon_rate(&self) -> usize {
TOTAL_FIELD_ELEMENTS
}
fn permutation_count(&self) -> usize {
1
}
fn salt_size(&self) -> usize {
SALT_SIZE
}
fn chunk_size(&self) -> usize {
CHUNK_SIZE
}
fn hash(&self, inputs: &Vec<BigUint>) -> Result<BigUint, ProverError> {
hash_internal(inputs)
}
}
impl Prover {
pub fn new(pk: PK) -> Self {
Self { proving_key: pk }
}
}
/// Hashes `inputs` with Poseidon and returns the digest as `BigUint`.
fn hash_internal(inputs: &Vec<BigUint>) -> Result<BigUint, ProverError> {
let digest = match inputs.len() {
15 => {
// hash with rate-15 Poseidon
let fes: [F; 15] = inputs
.iter()
.map(|i| bigint_to_f(i))
.collect::<Vec<_>>()
.try_into()
.unwrap();
poseidon_15(&fes)
}
1 => {
// hash with rate-1 Poseidon
let fes: [F; 1] = inputs
.iter()
.map(|i| bigint_to_f(i))
.collect::<Vec<_>>()
.try_into()
.unwrap();
poseidon_1(&fes)
}
_ => return Err(ProverError::WrongPoseidonInput),
};
Ok(f_to_bigint(&digest))
}
#[cfg(test)]
mod tests {
use super::*;
use crate::halo2_backend::circuit::{CELLS_PER_ROW, K, USEFUL_ROWS};
use crate::halo2_backend::prover::hash_internal;
use crate::halo2_backend::Curve;
use crate::prover::{ProofInput, Prove, ProverError};
use crate::tests::run_until_proofs_are_generated;
use crate::verifier::{VerificationInput, VerifierError, Verify};
use crate::Proof;
use halo2_proofs::dev::MockProver;
use num::BigUint;
use std::panic::catch_unwind;
/// TestHalo2Prover is a test prover. It is the same as [Prover] except:
/// - it doesn't require a proving key
/// - it uses a `MockProver` inside `prove()`
///
/// This allows us to test the circuit with the correct inputs from the authdecode
/// protocol execution. Also allows us to corrupt each of the circuit inputs and
/// expect a failure.
struct TestHalo2Prover {}
impl Prove for TestHalo2Prover {
fn prove(&self, input: ProofInput) -> Result<Proof, ProverError> {
// convert into matrices
let (deltas_as_rows, deltas_as_columns) =
deltas_to_matrices(&input.deltas, self.useful_bits());
// convert plaintext into F type
let good_plaintext: [F; TOTAL_FIELD_ELEMENTS] = input
.plaintext
.iter()
.map(|bigint| bigint_to_f(bigint))
.collect::<Vec<_>>()
.try_into()
.unwrap();
// arrange into the format which halo2 expects
let mut good_inputs: Vec<Vec<F>> =
deltas_as_columns.iter().map(|v| v.to_vec()).collect();
// add another column with public inputs
let tmp = vec![
bigint_to_f(&input.plaintext_hash),
bigint_to_f(&input.label_sum_hash),
bigint_to_f(&input.sum_of_zero_labels),
];
good_inputs.push(tmp);
let circuit = AuthDecodeCircuit::new(
good_plaintext,
bigint_to_f(&input.salt),
deltas_as_rows.into(),
);
// Test with the correct inputs.
println!("start mockprover");
let prover = MockProver::run(K, &circuit, good_inputs.clone()).unwrap();
assert!(prover.verify().is_ok());
println!("end mockprover");
// Corrupt at least one delta which corresponds to plaintext bit 1.
// Since the plaintext was chosen randomly, we corrupt only the last
// deltas on each row - one of those deltas will correspond to a plaintext
// bit 1 with high probability.
// Expect halo2 to panic.
let mut bad_input1 = good_inputs.clone();
for i in 0..USEFUL_ROWS {
bad_input1[CELLS_PER_ROW - 1][i] = F::from(123);
}
println!("start mockprover2");
let prover = MockProver::run(K, &circuit, bad_input1.clone()).unwrap();
assert!(prover.verify().is_err());
println!("end mockprover2");
// One-by-one corrupt the plaintext hash, the label sum hash, the zero sum.
// Expect halo2 to panic.
for i in 0..3 {
let mut bad_public_input = good_inputs.clone();
bad_public_input[CELLS_PER_ROW][i] = F::from(123);
println!("start mockprover3");
let prover = MockProver::run(K, &circuit, bad_public_input.clone()).unwrap();
assert!(prover.verify().is_err());
println!("end mockprover3");
}
// Corrupt only the plaintext.
// Expect halo2 to panic.
let mut bad_plaintext = good_plaintext.clone();
bad_plaintext[0] = F::from(123);
let circuit = AuthDecodeCircuit::new(
bad_plaintext,
bigint_to_f(&input.salt),
deltas_as_rows.into(),
);
println!("start mockprover4");
let prover = MockProver::run(K, &circuit, good_inputs.clone()).unwrap();
assert!(prover.verify().is_err());
println!("end mockprover4");
// Corrupt only the salt.
// Expect halo2 to panic.
let bad_salt = BigUint::from(123u8);
let circuit = AuthDecodeCircuit::new(
good_plaintext,
bigint_to_f(&bad_salt),
deltas_as_rows.into(),
);
println!("start mockprover5");
let prover = MockProver::run(K, &circuit, good_inputs.clone()).unwrap();
assert!(prover.verify().is_err());
println!("end mockprover5");
Ok(Default::default())
}
fn useful_bits(&self) -> usize {
USEFUL_BITS
}
fn poseidon_rate(&self) -> usize {
TOTAL_FIELD_ELEMENTS
}
fn permutation_count(&self) -> usize {
1
}
fn salt_size(&self) -> usize {
SALT_SIZE
}
fn chunk_size(&self) -> usize {
CHUNK_SIZE
}
fn hash(&self, inputs: &Vec<BigUint>) -> Result<BigUint, ProverError> {
hash_internal(inputs)
}
}
impl TestHalo2Prover {
pub fn new() -> Self {
Self {}
}
}
/// This verifier is the same as [crate::halo2_backend::verifier::Verifier] except:
/// - it doesn't require a verifying key
/// - it does not verify since `MockProver` does that already
struct TestHalo2Verifier {
curve: Curve,
}
impl TestHalo2Verifier {
pub fn new(curve: Curve) -> Self {
Self { curve }
}
}
impl Verify for TestHalo2Verifier {
fn verify(&self, _: VerificationInput) -> Result<bool, VerifierError> {
Ok(false)
}
fn field_size(&self) -> usize {
match self.curve {
Curve::Pallas => 255,
Curve::BN254 => 254,
_ => panic!("a new curve was added. Add its field size here."),
}
}
fn useful_bits(&self) -> usize {
USEFUL_BITS
}
fn chunk_size(&self) -> usize {
CHUNK_SIZE
}
}
#[test]
/// Tests the circuit with the correct inputs as well as wrong inputs. The logic is
/// in [TestHalo2Prover]'s prove()
fn test_circuit() {
let prover = Box::new(TestHalo2Prover::new());
let verifier = Box::new(TestHalo2Verifier::new(Curve::Pallas));
let _res = run_until_proofs_are_generated(prover, verifier);
}
}

250
src/halo2_backend/utils.rs Normal file
View File

@@ -0,0 +1,250 @@
use super::circuit::{CELLS_PER_ROW, USEFUL_ROWS};
use crate::utils::{boolvec_to_u8vec, u8vec_to_boolvec};
use crate::Delta;
use halo2_proofs::arithmetic::FieldExt;
use num::{BigUint, FromPrimitive};
use pasta_curves::Fp as F;
/// Decomposes a `BigUint` into bits and returns the bits in MSB-first bit order,
/// left padding them with zeroes to the size of 256.
/// The assumption is that `bigint` was sanitized earlier and is not larger
/// than 256 bits
pub fn bigint_to_256bits(bigint: BigUint) -> [bool; 256] {
let bits = u8vec_to_boolvec(&bigint.to_bytes_be());
let mut bits256 = [false; 256];
bits256[256 - bits.len()..].copy_from_slice(&bits);
bits256
}
#[test]
fn test_bigint_to_256bits() {
use num_bigint::RandomBits;
use rand::thread_rng;
use rand::Rng;
// test with a fixed number
let res = bigint_to_256bits(BigUint::from(3u8));
let expected: [bool; 256] = [vec![false; 254], vec![true; 2]]
.concat()
.try_into()
.unwrap();
assert_eq!(res, expected);
// test with a random number
let mut rng = thread_rng();
let b: BigUint = rng.sample(RandomBits::new(256));
let mut expected_bits: [bool; 256] = (0..256)
.map(|i| b.bit(i))
.collect::<Vec<_>>()
.try_into()
.unwrap();
expected_bits.reverse();
assert_eq!(bigint_to_256bits(b), expected_bits);
}
/// Converts a `BigUint` into an field element type.
/// The assumption is that `bigint` was sanitized earlier and is not larger
/// than [crate::verifier::Verify::field_size]
pub fn bigint_to_f(bigint: &BigUint) -> F {
let le = bigint.to_bytes_le();
let mut wide = [0u8; 64];
wide[0..le.len()].copy_from_slice(&le);
F::from_bytes_wide(&wide)
}
#[test]
fn test_bigint_to_f() {
// Test that the sum of 2 random `BigUint`s matches the sum of 2 field elements
use num_bigint::RandomBits;
use rand::thread_rng;
use rand::Rng;
let mut rng = thread_rng();
let a: BigUint = rng.sample(RandomBits::new(253));
let b: BigUint = rng.sample(RandomBits::new(253));
let c = a.clone() + b.clone();
let a_f = bigint_to_f(&a);
let b_f = bigint_to_f(&b);
let c_f = a_f + b_f;
assert_eq!(bigint_to_f(&c), c_f);
}
/// Converts `F` into a `BigUint` type.
/// The assumption is that the field is <= 256 bits
pub fn f_to_bigint(f: &F) -> BigUint {
let tmp: [u8; 32] = f.try_into().unwrap();
BigUint::from_bytes_le(&tmp)
}
#[test]
fn test_f_to_bigint() {
// Test that the sum of 2 random `F`s matches the expected sum
use rand::thread_rng;
use rand::Rng;
let mut rng = thread_rng();
let a = rng.gen::<u128>();
let b = rng.gen::<u128>();
let res = f_to_bigint(&(F::from_u128(a) + F::from_u128(b)));
let expected: BigUint = BigUint::from_u128(a).unwrap() + BigUint::from_u128(b).unwrap();
assert_eq!(res, expected);
}
/// Splits up 256 bits into 4 limbs, shifts each limb left
/// and returns the shifted limbs as `BigUint`s.
pub fn bits_to_limbs(bits: [bool; 256]) -> [BigUint; 4] {
// break up the field element into 4 64-bit limbs
// the limb at index 0 is the high limb
let limbs: [BigUint; 4] = bits
.chunks(64)
.map(|c| BigUint::from_bytes_be(&boolvec_to_u8vec(c)))
.collect::<Vec<_>>()
.try_into()
.unwrap();
// shift each limb to the left:
let two = BigUint::from(2u8);
// how many bits to left-shift each limb by
let shift_by: [BigUint; 4] = [192, 128, 64, 0]
.iter()
.map(|s| two.pow(*s))
.collect::<Vec<_>>()
.try_into()
.unwrap();
limbs
.iter()
.zip(shift_by.iter())
.map(|(l, s)| l * s)
.collect::<Vec<_>>()
.try_into()
.unwrap()
}
#[test]
fn test_bits_to_limbs() {
use std::str::FromStr;
let bits: [bool; 256] = [
vec![false; 63],
vec![true],
vec![false; 63],
vec![true],
vec![false; 63],
vec![true],
vec![false; 63],
vec![true],
]
.concat()
.try_into()
.unwrap();
let res = bits_to_limbs(bits);
let expected = [
BigUint::from_str("6277101735386680763835789423207666416102355444464034512896").unwrap(),
BigUint::from_str("340282366920938463463374607431768211456").unwrap(),
BigUint::from_str("18446744073709551616").unwrap(),
BigUint::from_str("1").unwrap(),
];
assert_eq!(res, expected);
}
/// Converts a vec of deltas into a matrix of rows and a matrix of
/// columns and returns them.
///
/// Assumes that the size of `deltas` is [super::CHUNK_SIZE]
pub fn deltas_to_matrices(
deltas: &Vec<Delta>,
useful_bits: usize,
) -> (
[[F; CELLS_PER_ROW]; USEFUL_ROWS],
[[F; USEFUL_ROWS]; CELLS_PER_ROW],
) {
let deltas = convert_and_pad_deltas(deltas, useful_bits);
let deltas_as_rows = deltas_to_matrix_of_rows(&deltas);
let deltas_as_columns = transpose_rows(&deltas_as_rows);
(deltas_as_rows, deltas_as_columns)
}
#[test]
fn test_deltas_to_matrices() {
use super::CHUNK_SIZE;
// all deltas except the penultimate one are 1. The penultimate delta is 2.
let deltas = [
vec![Delta::from(1u8); CHUNK_SIZE - 2],
vec![BigUint::from(2u8)],
vec![BigUint::from(1u8)],
]
.concat();
let (deltas_as_rows, deltas_as_columns) = deltas_to_matrices(&deltas, 253);
let dar_concat = deltas_as_rows.concat();
let dac_concat = deltas_as_columns.concat();
// both matrices must contain equal amount of elements
assert_eq!(dar_concat.len(), dac_concat.len());
// 3 extra padding deltas were added 14 times
assert_eq!(dar_concat.len(), deltas.len() + 14 * 3);
// the penultimate element in the last row should be 2
let row = deltas_as_rows[deltas_as_rows.len() - 1];
assert_eq!(row[row.len() - 2], F::from(2));
// the last element in the penultimate column should be 2
let col = deltas_as_columns[deltas_as_columns.len() - 2];
assert_eq!(col[col.len() - 1], F::from(2));
}
/// To make handling inside the circuit simpler, we pad each chunk (except for
/// the last one) of deltas with zero values on the left to the size 256.
/// Note that the last chunk (corresponding to the 15th field element) will
/// contain only 128 deltas, so we do NOT pad it.
///
/// Returns padded deltas
fn convert_and_pad_deltas(deltas: &Vec<Delta>, useful_bits: usize) -> Vec<F> {
// convert deltas into F type
let deltas: Vec<F> = deltas.iter().map(|d| bigint_to_f(d)).collect();
deltas
.chunks(useful_bits)
.enumerate()
.map(|(i, c)| {
if i < 14 {
let mut v = vec![F::from(0); 256 - c.len()];
v.extend(c.to_vec());
v
} else {
c.to_vec()
}
})
.flatten()
.collect()
}
/// Converts a vec of padded deltas into a matrix of rows and returns it.
fn deltas_to_matrix_of_rows(deltas: &Vec<F>) -> ([[F; CELLS_PER_ROW]; USEFUL_ROWS]) {
deltas
.chunks(CELLS_PER_ROW)
.map(|c| c.try_into().unwrap())
.collect::<Vec<_>>()
.try_into()
.unwrap()
}
/// Transposes a matrix of rows of fixed size.
fn transpose_rows(matrix: &[[F; CELLS_PER_ROW]; USEFUL_ROWS]) -> [[F; USEFUL_ROWS]; CELLS_PER_ROW] {
(0..CELLS_PER_ROW)
.map(|i| {
matrix
.iter()
.map(|inner| inner[i].clone().try_into().unwrap())
.collect::<Vec<_>>()
.try_into()
.unwrap()
})
.collect::<Vec<_>>()
.try_into()
.unwrap()
}

View File

@@ -0,0 +1,90 @@
use super::utils::{bigint_to_f, deltas_to_matrices};
use super::{Curve, CHUNK_SIZE, USEFUL_BITS};
use crate::verifier::{VerificationInput, VerifierError, Verify};
use halo2_proofs::plonk;
use halo2_proofs::plonk::SingleVerifier;
use halo2_proofs::plonk::VerifyingKey;
use halo2_proofs::poly::commitment::Params;
use halo2_proofs::transcript::Blake2bRead;
use halo2_proofs::transcript::Challenge255;
use instant::Instant;
use pasta_curves::pallas::Base as F;
use pasta_curves::EqAffine;
/// halo2's native [halo2::VerifyingKey] can't be used without params, so we wrap
/// them in one struct.
#[derive(Clone)]
pub struct VK {
pub key: VerifyingKey<EqAffine>,
pub params: Params<EqAffine>,
}
/// Implements the Verifier in the authdecode protocol.
pub struct Verifier {
verification_key: VK,
curve: Curve,
}
impl Verifier {
pub fn new(vk: VK, curve: Curve) -> Self {
Self {
verification_key: vk,
curve,
}
}
}
impl Verify for Verifier {
fn verify(&self, input: VerificationInput) -> Result<bool, VerifierError> {
let params = &self.verification_key.params;
let vk = &self.verification_key.key;
let strategy = SingleVerifier::new(&params);
let proof = input.proof;
let mut transcript = Blake2bRead::<_, _, Challenge255<_>>::init(&proof[..]);
// convert deltas into a matrix which halo2 expects
let (_, deltas_as_columns) = deltas_to_matrices(&input.deltas, self.useful_bits());
let mut all_inputs: Vec<&[F]> = deltas_as_columns.iter().map(|v| v.as_slice()).collect();
// add another column with public inputs
let tmp = &[
bigint_to_f(&input.plaintext_hash),
bigint_to_f(&input.label_sum_hash),
bigint_to_f(&input.sum_of_zero_labels),
];
all_inputs.push(tmp);
let now = Instant::now();
// perform the actual verification
let res = plonk::verify_proof(
params,
vk,
strategy,
&[all_inputs.as_slice()],
&mut transcript,
);
println!("Proof verified [{:?}]", now.elapsed());
if res.is_err() {
return Err(VerifierError::VerificationFailed);
} else {
Ok(true)
}
}
fn field_size(&self) -> usize {
match self.curve {
Curve::Pallas => 255,
Curve::BN254 => 254,
_ => panic!("a new curve was added. Add its field size here."),
}
}
fn useful_bits(&self) -> usize {
USEFUL_BITS
}
fn chunk_size(&self) -> usize {
CHUNK_SIZE
}
}

86
src/label.rs Normal file
View File

@@ -0,0 +1,86 @@
use super::utils::bits_to_bigint;
use num::BigUint;
use rand::RngCore;
use rand::{thread_rng, Rng, SeedableRng};
use rand_chacha::ChaCha20Rng;
/// The PRG for generating arithmetic labels.
type Prg = ChaCha20Rng;
/// The seed from which to generate the arithmetic labels.
pub type Seed = [u8; 32];
// The arithmetic label.
type Label = BigUint;
/// A pair of labels: the first one encodes the value 0, the second one encodes
/// the value 1.
pub type LabelPair = [Label; 2];
pub struct LabelGenerator {}
impl LabelGenerator {
/// Generates a seed and then generates `count` arithmetic label pairs
/// of bitsize `label_size` from that seed. Returns the labels and the seed.
pub fn generate(count: usize, label_size: usize) -> (Vec<LabelPair>, Seed) {
let seed = thread_rng().gen::<Seed>();
let pairs = LabelGenerator::generate_from_seed(count, label_size, seed);
(pairs, seed)
}
// Generates `count` arithmetic label pairs of bitsize `label_size` from a
// seed and returns the labels.
pub fn generate_from_seed(count: usize, label_size: usize, seed: Seed) -> Vec<LabelPair> {
let prg = Prg::from_seed(seed);
LabelGenerator::generate_from_prg(count, label_size, Box::new(prg))
}
/// Generates `count` arithmetic label pairs of bitsize `label_size` using a PRG.
/// Returns the generated label pairs.
fn generate_from_prg(
count: usize,
label_size: usize,
mut prg: Box<dyn RngCore>,
) -> Vec<LabelPair> {
(0..count)
.map(|_| {
// To keep the handling simple, we want to avoid a negative delta, that's why
// W_0 and delta must be (label_size - 1)-bit values and W_1 will be
// set to W_0 + delta
let zero_label = bits_to_bigint(
&core::iter::repeat_with(|| prg.gen::<bool>())
.take(label_size - 1)
.collect::<Vec<_>>(),
);
let delta = bits_to_bigint(
&core::iter::repeat_with(|| prg.gen::<bool>())
.take(label_size - 1)
.collect::<Vec<_>>(),
);
let one_label = zero_label.clone() + delta.clone();
[zero_label, one_label]
})
.collect()
}
}
#[cfg(test)]
mod tests {
use super::LabelGenerator;
use num::BigUint;
use rand::rngs::mock::StepRng;
#[test]
fn test_label_generator() {
// PRG which always returns bit 1
let prg = StepRng::new(u64::MAX, 0);
// zero_label and delta should be 511 (bit 1 repeated 9 times), one_label
// should be 511+511=1022
let result = LabelGenerator::generate_from_prg(10, 10, Box::new(prg));
let expected = (0..10)
.map(|_| [BigUint::from(511u128), BigUint::from(1022u128)])
.collect::<Vec<_>>();
assert_eq!(expected, result);
}
}

205
src/lib.rs Normal file
View File

@@ -0,0 +1,205 @@
//! This module implements the protocol for authenticated decoding (aka AuthDecode)
//! of output labels from a garbled circuit (GC) evaluation.
//! The purpose of AuthDecode is to allow the GC evaluator to produce a zk-friendly
//! hash commitment to the GC output. Computing a zk-friendly hash directly inside
//! the GC is too expensive, hence the need for this protocol.
//!
//! Authdecode assumes a privacy-free setting for the garbler, i.e. the protocol
//! MUST ONLY start AFTER the garbler reveals all his secret GC inputs.
//! Specifically, in the context of the TLSNotary protocol, AuthDecode MUST ONLY
//! start AFTER the Notary (who is the garbler) has revealed all of his TLS session
//! keys' shares.
pub mod halo2_backend;
mod label;
pub mod prover;
pub mod snarkjs_backend;
mod utils;
pub mod verifier;
use num::BigUint;
/// The bitsize of an arithmetic label. MUST be > 40 to give statistical
/// security against the Prover guessing the label. For a 254-bit field,
/// the bitsize > 96 would require 2 field elements for the
/// salted label sum instead of 1.
const ARITHMETIC_LABEL_SIZE: usize = 96;
/// The maximum supported size (in bits) of one [Chunk] of plaintext.
/// Should not exceed 2^{ [prover::Prove::useful_bits] - [prover::Prove::salt_size]
/// - [ARITHMETIC_LABEL_SIZE] }.
/// 2^20 should suffice for most use cases.
const MAX_CHUNK_SIZE: usize = 1 << 20;
/// The maximum supported amount of plaintext [Chunk]s ( which is equal to the
/// amount of zk proofs). Having too many zk proofs may be a DOS vector
/// against the Notary who is the verifier of zk proofs.
const MAX_CHUNK_COUNT: usize = 128;
/// The decoded output labels of the garbled circuit. In other words, this is
/// the plaintext output resulting from the evaluation of a garbled circuit.
type Plaintext = Vec<u8>;
/// The size of [Plaintext] in bits.
type PlaintextSize = usize;
/// A chunk of [Plaintext]. The amount of vec elements equals
/// [Prove::poseidon_rate] * [Prove::permutation_count]. Each vec element
/// is an "Elliptic curve field element" into which [Prove::useful_bits] bits
/// of [Plaintext] is packed.
/// The chunk does NOT contain the [Salt].
type Chunk = Vec<BigUint>;
/// Before hashing a [Chunk], it is salted by shifting its last element to the
/// left by [Prove::salt_size] and placing the salt into the low bits.
/// This same salt is also used to salt the sum of all the labels corresponding
/// to the [Chunk].
/// Without the salt, a hash of plaintext with low entropy could be brute-forced.
type Salt = BigUint;
/// A Poseidon hash digest of a [Salt]ed [Chunk]. This is an EC field element.
type PlaintextHash = BigUint;
/// A Poseidon hash digest of a [Salt]ed arithmetic sum of arithmetic labels
/// corresponding to the [Chunk]. This is an EC field element.
type LabelSumHash = BigUint;
/// An arithmetic sum of all "zero" arithmetic labels ( those are the labels
/// which encode the bit value 0) corresponding to one [Chunk].
type ZeroSum = BigUint;
/// An arithmetic difference between the arithmetic label "one" and the
/// arithmetic label "zero".
type Delta = BigUint;
/// A serialized proof proving that a Poseidon hash is the result of hashing a
/// salted [Chunk], which [Chunk] is the result of the decoding of a garbled
/// circuit's labels.
type Proof = Vec<u8>;
#[cfg(test)]
mod tests {
use crate::prover::ProverError;
use crate::prover::{AuthDecodeProver, Prove};
use crate::utils::*;
use crate::verifier::VerifyMany;
use crate::verifier::{AuthDecodeVerifier, VerifierError, Verify};
use crate::{Proof, Salt};
use rand::{thread_rng, Rng};
/// Accepts a concrete Prover and Verifier and runs the whole AuthDecode
/// protocol end-to-end.
///
/// Corrupts the proof if `will_corrupt_proof` is `true` and expects the
/// verification to fail.
pub fn e2e_test(prover: Box<dyn Prove>, verifier: Box<dyn Verify>, will_corrupt_proof: bool) {
let (prover_result, verifier) = run_until_proofs_are_generated(prover, verifier);
let (proofs, _salts) = prover_result.unwrap();
if !will_corrupt_proof {
// Notary verifies a good proof
let (result, _) = verifier.verify_many(proofs).unwrap();
assert_eq!(result, true);
} else {
// corrupt one byte in each proof
let corrupted_proofs: Vec<Proof> = proofs
.iter()
.map(|p| {
let old_byte = p[p.len() / 2];
let new_byte = old_byte.checked_add(1).unwrap_or_default();
let mut new_proof = p.clone();
let p_len = new_proof.len();
new_proof[p_len / 2] = new_byte;
new_proof
})
.collect();
// Notary tries to verify a corrupted proof
let res = verifier.verify_many(corrupted_proofs);
assert_eq!(res.err().unwrap(), VerifierError::VerificationFailed);
}
}
/// Runs the protocol until the moment when Prover returns generated proofs.
///
/// Returns the proofs, the salts, and the verifier in the next expected state.
pub fn run_until_proofs_are_generated(
prover: Box<dyn Prove>,
verifier: Box<dyn Verify>,
) -> (
Result<(Vec<Proof>, Vec<Salt>), ProverError>,
AuthDecodeVerifier<VerifyMany>,
) {
let mut rng = thread_rng();
// generate random plaintext of random size up to 1000 bytes
let plaintext: Vec<u8> = core::iter::repeat_with(|| rng.gen::<u8>())
.take(thread_rng().gen_range(0..1000))
.collect();
// Normally, the Prover is expected to obtain her binary labels by
// evaluating the garbled circuit.
// To keep this test simple, we don't evaluate the gc, but we generate
// all labels of the Verifier and give the Prover her active labels.
let bit_size = plaintext.len() * 8;
let mut all_binary_labels: Vec<[u128; 2]> = Vec::with_capacity(bit_size);
let mut delta: u128 = rng.gen();
// set the last bit
delta |= 1;
for _ in 0..bit_size {
let label_zero: u128 = rng.gen();
all_binary_labels.push([label_zero, label_zero ^ delta]);
}
let prover_labels = choose(&all_binary_labels, &u8vec_to_boolvec(&plaintext));
let verifier = AuthDecodeVerifier::new(all_binary_labels.clone(), verifier);
let verifier = verifier.setup().unwrap();
let prover = AuthDecodeProver::new(plaintext, prover);
// Perform setup
let prover = prover.setup().unwrap();
// Commitment to the plaintext is sent to the Notary
let (plaintext_hash, prover) = prover.plaintext_commitment().unwrap();
// Notary sends back encrypted arithm. labels.
let (ciphertexts, verifier) = verifier.receive_plaintext_hashes(plaintext_hash).unwrap();
// Hash commitment to the label_sum is sent to the Notary
let (label_sum_hashes, prover) = prover
.label_sum_commitment(ciphertexts, &prover_labels)
.unwrap();
// Notary sends the arithmetic label seed
let (seed, verifier) = verifier.receive_label_sum_hashes(label_sum_hashes).unwrap();
// At this point the following happens in the `committed GC` protocol:
// - the Notary reveals the GC seed
// - the User checks that the GC was created from that seed
// - the User checks that her active output labels correspond to the
// output labels derived from the seed
// - we are called with the result of the check and (if successful)
// with all the output labels
let prover = prover
.binary_labels_authenticated(true, Some(all_binary_labels))
.unwrap();
// Prover checks the integrity of the arithmetic labels and generates zero_sums and deltas
let prover = prover.authenticate_arithmetic_labels(seed).unwrap();
// Prover generates the proof
(prover.create_zk_proofs(), verifier)
}
/// Unzips a slice of pairs, returning items corresponding to choice
fn choose<T: Clone>(items: &[[T; 2]], choice: &[bool]) -> Vec<T> {
assert!(items.len() == choice.len(), "arrays are different length");
items
.iter()
.zip(choice)
.map(|(items, choice)| items[*choice as usize].clone())
.collect()
}
}

973
src/prover.rs Normal file
View File

@@ -0,0 +1,973 @@
use std::ops::Shl;
use crate::label::{LabelGenerator, Seed};
use crate::utils::{
bits_to_bigint, compute_zero_sum_and_deltas, encrypt_arithmetic_labels, sha256,
u8vec_to_boolvec,
};
use crate::{
Chunk, Delta, LabelSumHash, Plaintext, PlaintextHash, PlaintextSize, Proof, Salt, ZeroSum,
};
use crate::{ARITHMETIC_LABEL_SIZE, MAX_CHUNK_COUNT, MAX_CHUNK_SIZE};
use aes::{Aes128, BlockDecrypt, NewBlockCipher};
use cipher::generic_array::GenericArray;
use num::BigUint;
use rand::{thread_rng, Rng};
#[derive(Debug, PartialEq, thiserror::Error)]
pub enum ProverError {
#[error("Provided empty plaintext")]
EmptyPlaintext,
#[error("Unable to put the salt of the hash into one field element")]
NoRoomForSalt,
#[error("Exceeded the maximum supported size of one chunk of plaintext")]
MaxChunkSizeExceeded,
#[error("Exceeded the maximum supported number of chunks of plaintext")]
MaxChunkCountExceeded,
#[error("Internal error: WrongFieldElementCount")]
WrongFieldElementCount,
#[error("Internal error: WrongPoseidonInput")]
WrongPoseidonInput,
#[error("Provided encrypted arithmetic labels of unexpected size. Expected {0}. Got {1}.")]
IncorrectEncryptedLabelSize(usize, usize),
#[error("Provided binary labels of unexpected size. Expected {0}. Got {1}.")]
IncorrectBinaryLabelSize(usize, usize),
#[error("Internal error: ErrorInPoseidonImplementation")]
ErrorInPoseidonImplementation,
#[error("Cannot proceed because the binary labels were not authenticated")]
BinaryLabelAuthenticationFailed,
#[error("Binary labels were not provided")]
BinaryLabelsNotProvided,
#[error("Failed to authenticate the arithmetic labels")]
ArithmeticLabelAuthenticationFailed,
#[error("The proof system returned an error when generating a proof")]
ProvingBackendError,
#[error("Internal error: WrongLastFieldElementBitCount")]
WrongLastFieldElementBitCount,
#[error("An internal error was encountered")]
InternalError,
}
#[derive(Clone, Default)]
// Public and private inputs to the zk circuit
pub struct ProofInput {
// Public
pub plaintext_hash: PlaintextHash,
pub label_sum_hash: LabelSumHash,
pub sum_of_zero_labels: ZeroSum,
pub deltas: Vec<Delta>,
// Private
pub plaintext: Chunk,
pub salt: Salt,
}
#[derive(Debug, Clone, Copy, PartialEq, Default)]
pub struct ArithmeticLabelCheck([u8; 32]);
impl ArithmeticLabelCheck {
/// Stores the hash of encrypted arithmetic labels. This hash will be checked
/// against later when the Notary "opens" all the arithmetic labels and
/// the garbled circuit's output labels which were used as keys to encrypt
/// the arithmetic labels.
/// This technique of "opening" the labels is similar to committed OT
/// (Oblivious Transfer), where the OT sender reveals all the OT messages that
/// he sent and the keys which he used to encrypt those messages.
///
/// The purpose of this check is to detect when a malicious Notary sends
/// a false arithmetic label which (if chosen by the User) would prevent the
/// User from creating a zk proof. By observing whether the User succeeded in
/// creating a zk proof, the Notary could infer whether the false label was chosen
/// and would learn 1 bit about the User's secret plaintext.
pub fn new(ciphertexts: &Vec<[[u8; 16]; 2]>) -> Self {
// flatten the ciphertexts and hash them
let flat: Vec<u8> = ciphertexts
.iter()
.map(|pair| pair.to_vec().into_iter().flatten().collect::<Vec<u8>>())
.flatten()
.collect();
Self(sha256(&flat))
}
}
pub trait State {}
#[derive(Default)]
pub struct Setup {
plaintext: Plaintext,
}
impl State for Setup {}
// see comments to the field's type.
#[derive(Default)]
pub struct PlaintextCommitment {
plaintext_size: PlaintextSize,
chunks: Vec<Chunk>,
salts: Vec<Salt>,
}
impl State for PlaintextCommitment {}
// see comments to the field's type.
#[derive(Default)]
pub struct LabelSumCommitment {
plaintext_size: PlaintextSize,
chunks: Vec<Chunk>,
salts: Vec<Salt>,
plaintext_hashes: Vec<PlaintextHash>,
}
impl State for LabelSumCommitment {}
// see comments to the field's type.
#[derive(Default)]
pub struct BinaryLabelsAuthenticated {
chunks: Vec<Chunk>,
salts: Vec<Salt>,
plaintext_hashes: Vec<PlaintextHash>,
label_sum_hashes: Vec<LabelSumHash>,
arith_label_check: ArithmeticLabelCheck,
}
impl State for BinaryLabelsAuthenticated {}
// see comments to the field's type.
#[derive(Default)]
pub struct AuthenticateArithmeticLabels {
chunks: Vec<Chunk>,
salts: Vec<Salt>,
plaintext_hashes: Vec<PlaintextHash>,
label_sum_hashes: Vec<LabelSumHash>,
arith_label_check: ArithmeticLabelCheck,
// Garbled circuit's output labels. We call them "binary" to distinguish
// them from the arithmetic labels.
all_binary_labels: Vec<[u128; 2]>,
}
impl State for AuthenticateArithmeticLabels {}
// see comments to the field's type.
pub struct ProofCreation {
chunks: Vec<Chunk>,
salts: Vec<Salt>,
plaintext_hashes: Vec<PlaintextHash>,
label_sum_hashes: Vec<LabelSumHash>,
deltas: Vec<Delta>,
zero_sums: Vec<ZeroSum>,
}
impl State for ProofCreation {}
pub trait Prove {
/// Given the `input` to the AuthDecode zk circuit, returns a serialized zk
/// proof which can be passed to the Verifier.
fn prove(&self, input: ProofInput) -> Result<Proof, ProverError>;
/// Returns how many bits of plaintext we will pack into one field element.
/// Normally, this should be [crate::Verify::field_size] minus 1.
fn useful_bits(&self) -> usize;
/// How many field elements the Poseidon hash consumes for one permutation.
fn poseidon_rate(&self) -> usize;
/// How many permutations the circuit supports. One permutation consumes
/// [Prove::poseidon_rate()] field elements.
fn permutation_count(&self) -> usize;
/// The size of the hash's salt in bits. The salt takes up the least
/// bits of the last field element.
fn salt_size(&self) -> usize;
/// How many bits of [Plaintext] can fit into one [Chunk]. This does not
/// include the [Salt] of the hash - which takes up the remaining least bits
/// of the last field element of each chunk.
fn chunk_size(&self) -> usize;
/// Evaluates the Poseidon hash on `inputs` and returns the digest.
fn hash(&self, inputs: &Vec<BigUint>) -> Result<BigUint, ProverError>;
}
pub struct AuthDecodeProver<S = Setup>
where
S: State,
{
prover: Box<dyn Prove>,
state: S,
}
impl AuthDecodeProver {
/// Returns the next expected state.
pub fn new(plaintext: Plaintext, prover: Box<dyn Prove>) -> AuthDecodeProver<Setup> {
AuthDecodeProver {
state: Setup { plaintext },
prover,
}
}
}
impl AuthDecodeProver<Setup> {
// Performs setup. Splits plaintext into chunks and computes a hash of each
// chunk. Returns the next expected state.
pub fn setup(self) -> Result<AuthDecodeProver<PlaintextCommitment>, ProverError> {
if self.state.plaintext.len() == 0 {
return Err(ProverError::EmptyPlaintext);
}
if self.prover.useful_bits() < self.prover.salt_size() {
// last field element must be large enough to contain the salt.
// In the future, if we need to support fields < salt,
// we can put the salt into multiple field elements.
return Err(ProverError::NoRoomForSalt);
}
if self.prover.chunk_size() > MAX_CHUNK_SIZE {
return Err(ProverError::MaxChunkSizeExceeded);
}
let (chunks, salts) = self.plaintext_to_chunks(&self.state.plaintext)?;
Ok(AuthDecodeProver {
state: PlaintextCommitment {
plaintext_size: self.state.plaintext.len() * 8,
chunks,
salts,
},
prover: self.prover,
})
}
/// Creates chunks of plaintext (each chunk will have a separate zk proof).
/// If there is not enough plaintext to fill the whole chunk, we fill the gap
/// with zero bits. Returns all [Chunk]s and all [Salt]s.
fn plaintext_to_chunks(
&self,
plaintext: &Plaintext,
) -> Result<(Vec<Chunk>, Vec<Salt>), ProverError> {
// chunk size
let cs = &self.prover.chunk_size();
// the amount of field elements per chunk
let fes_per_chunk = (cs + self.prover.salt_size()) / self.prover.useful_bits();
if fes_per_chunk != self.prover.poseidon_rate() * self.prover.permutation_count() {
// can only happen if there is a logic error in `Prove` impl
return Err(ProverError::WrongFieldElementCount);
}
let mut bits = u8vec_to_boolvec(&plaintext);
// chunk count (rounded up)
let chunk_count = (bits.len() + (cs - 1)) / cs;
if chunk_count > MAX_CHUNK_COUNT {
return Err(ProverError::MaxChunkCountExceeded);
}
// extend bits with zeroes to fill the last chunk
bits.extend(vec![false; chunk_count * cs - bits.len()]);
let mut rng = thread_rng();
Ok(bits
.chunks(*cs)
.map(|chunk_of_bits| {
// chunk of field elements
let chunk_of_fes: Chunk = chunk_of_bits
.chunks(self.prover.useful_bits())
.map(|fe| bits_to_bigint(fe))
.collect();
// generate the salt for this chunk. Do not apply the salt to the
// chunk but store it separately.
let salt: Vec<bool> = core::iter::repeat_with(|| rng.gen::<bool>())
.take(self.prover.salt_size())
.collect::<Vec<_>>();
(chunk_of_fes, bits_to_bigint(&salt))
})
.unzip())
}
}
impl AuthDecodeProver<PlaintextCommitment> {
/// Returns a vec of [Salt]ed Poseidon hashes for each [Chunk] and the next
/// expected state.
pub fn plaintext_commitment(
self,
) -> Result<(Vec<PlaintextHash>, AuthDecodeProver<LabelSumCommitment>), ProverError> {
let hashes = self.salt_and_hash_chunks(&self.state.chunks, &self.state.salts)?;
Ok((
hashes.clone(),
AuthDecodeProver {
state: LabelSumCommitment {
plaintext_size: self.state.plaintext_size,
plaintext_hashes: hashes,
chunks: self.state.chunks,
salts: self.state.salts,
},
prover: self.prover,
},
))
}
/// Salts and hashes each chunk with Poseidon and returns digests for each
/// salted chunk.
fn salt_and_hash_chunks(
&self,
chunks: &Vec<Chunk>,
salts: &Vec<Salt>,
) -> Result<Vec<BigUint>, ProverError> {
chunks
.iter()
.zip(salts.iter())
.map(|(chunk, salt)| {
let salted_chunk = self.salt_chunk(chunk, salt)?;
Ok(self.prover.hash(&salted_chunk)?)
})
.collect()
}
/// Puts salt into the low bits of the last field element of the chunk.
/// Returns the salted chunk.
fn salt_chunk(&self, chunk: &Chunk, salt: &Salt) -> Result<Chunk, ProverError> {
let len = chunk.len();
let last_fe = chunk[len - 1].clone();
if last_fe.bits() as usize > self.prover.useful_bits() - self.prover.salt_size() {
// can only happen if there is a logic error in this code
return Err(ProverError::WrongLastFieldElementBitCount);
}
let mut salted_chunk = chunk.clone();
salted_chunk[len - 1] = last_fe.shl(self.prover.salt_size()) + salt;
Ok(salted_chunk)
}
}
impl AuthDecodeProver<LabelSumCommitment> {
/// Computes the sum of all arithmetic labels for each chunk of plaintext.
/// Returns the [Salt]ed hash of each sum and the next expected state.
pub fn label_sum_commitment(
self,
ciphertexts: Vec<[[u8; 16]; 2]>,
labels: &Vec<u128>,
) -> Result<
(
Vec<LabelSumHash>,
AuthDecodeProver<BinaryLabelsAuthenticated>,
),
ProverError,
> {
let sums = self.compute_label_sums(&ciphertexts, labels)?;
let arith_label_check = ArithmeticLabelCheck::new(&ciphertexts);
let res: Result<Vec<LabelSumHash>, ProverError> = sums
.iter()
.zip(self.state.salts.iter())
.map(|(sum, salt)| {
// We want to pack `sum` and `salt` into a field element like this:
// | leading zeroes | sum | salt |
// \ /
// \ low bits /
let salted_sum = sum.shl(self.prover.salt_size()) + salt;
Ok(self.prover.hash(&vec![salted_sum.clone()])?)
})
.collect();
if res.is_err() {
return Err(res.err().unwrap());
}
let label_sum_hashes = res.unwrap();
Ok((
label_sum_hashes.clone(),
AuthDecodeProver {
state: BinaryLabelsAuthenticated {
chunks: self.state.chunks,
label_sum_hashes,
plaintext_hashes: self.state.plaintext_hashes,
salts: self.state.salts,
arith_label_check,
},
prover: self.prover,
},
))
}
/// Returns the sum of all arithmetic labels for each [Chunk] of plaintext by
/// first decrypting each encrypted arithmetic label based on the pointer bit
/// of the corresponding active binary label.
fn compute_label_sums(
&self,
ciphertexts: &Vec<[[u8; 16]; 2]>,
binary_labels: &Vec<u128>,
) -> Result<Vec<BigUint>, ProverError> {
if ciphertexts.len() != self.state.plaintext_size {
return Err(ProverError::IncorrectEncryptedLabelSize(
self.state.plaintext_size,
ciphertexts.len(),
));
}
if binary_labels.len() != self.state.plaintext_size {
return Err(ProverError::IncorrectBinaryLabelSize(
self.state.plaintext_size,
binary_labels.len(),
));
}
let res = ciphertexts
.chunks(self.prover.chunk_size())
.zip(binary_labels.chunks(self.prover.chunk_size()))
.map(|(chunk_ct, chunk_lb)| {
// accumulate the label sum for one chunk here
let mut label_sum = BigUint::from(0u8);
for (ct_pair, label) in chunk_ct.iter().zip(chunk_lb) {
let key = Aes128::new_from_slice(&label.to_be_bytes()).unwrap();
// if binary label's LSB is 0, decrypt the 1st ciphertext,
// otherwise decrypt the 2nd one.
let mut ct = if label & 1 == 0 {
GenericArray::from(ct_pair[0])
} else {
GenericArray::from(ct_pair[1])
};
key.decrypt_block(&mut ct);
// add the decrypted arithmetic label to the sum
label_sum += BigUint::from_bytes_be(&ct);
}
label_sum
})
.collect();
Ok(res)
}
}
impl AuthDecodeProver<BinaryLabelsAuthenticated> {
/// Expects a signal whether the `committed GC` protocol succesfully authenticated
/// the output labels which we used earlier in the protocol. Returns the
/// next expected state.
pub fn binary_labels_authenticated(
self,
success: bool,
all_binary_labels: Option<Vec<[u128; 2]>>,
) -> Result<AuthDecodeProver<AuthenticateArithmeticLabels>, ProverError> {
if success {
if all_binary_labels.is_none() {
return Err(ProverError::BinaryLabelsNotProvided);
}
Ok(AuthDecodeProver {
state: AuthenticateArithmeticLabels {
chunks: self.state.chunks,
label_sum_hashes: self.state.label_sum_hashes,
plaintext_hashes: self.state.plaintext_hashes,
salts: self.state.salts,
arith_label_check: self.state.arith_label_check,
all_binary_labels: all_binary_labels.unwrap(),
},
prover: self.prover,
})
} else {
Err(ProverError::BinaryLabelAuthenticationFailed)
}
}
}
impl AuthDecodeProver<AuthenticateArithmeticLabels> {
/// Authenticates the arithmetic labels which were used earlier in
/// [AuthDecodeProver<LabelSumCommitment>] by first re-generating the
/// arithmetic labels from a seed and then encrypting them with binary
/// labels. The resulting ciphertext must match the ciphertext which was
/// sent to us in [AuthDecodeProver<LabelSumCommitment>]. Returns the next
/// expected state.
pub fn authenticate_arithmetic_labels(
self,
seed: Seed,
) -> Result<AuthDecodeProver<ProofCreation>, ProverError> {
let alabels = LabelGenerator::generate_from_seed(
self.state.all_binary_labels.len(),
ARITHMETIC_LABEL_SIZE,
seed,
);
// Encrypt the arithm labels with binary labels and compare the resulting
// ciphertext with the ciphertext which the Verifier sent to us earlier.
let ciphertexts = match encrypt_arithmetic_labels(&alabels, &self.state.all_binary_labels) {
Ok(ct) => ct,
Err(_) => return Err(ProverError::InternalError),
};
if ArithmeticLabelCheck::new(&ciphertexts) != self.state.arith_label_check {
return Err(ProverError::ArithmeticLabelAuthenticationFailed);
}
// There will be as many deltas as there are output labels in the
// garbled circuit.
let mut deltas: Vec<Delta> = Vec::with_capacity(self.state.all_binary_labels.len());
let zero_sums: Vec<ZeroSum> = alabels
.chunks(self.prover.chunk_size())
.map(|chunk_of_alabel_pairs| {
let (zero_sum, deltas_in_chunk) =
compute_zero_sum_and_deltas(chunk_of_alabel_pairs);
deltas.extend(deltas_in_chunk);
zero_sum
})
.collect();
Ok(AuthDecodeProver {
state: ProofCreation {
chunks: self.state.chunks,
label_sum_hashes: self.state.label_sum_hashes,
plaintext_hashes: self.state.plaintext_hashes,
salts: self.state.salts,
deltas,
zero_sums,
},
prover: self.prover,
})
}
}
impl AuthDecodeProver<ProofCreation> {
/// Creates zk proofs of label decoding for each chunk of plaintext.
/// Returns serialized proofs and salts.
pub fn create_zk_proofs(self) -> Result<(Vec<Proof>, Vec<Salt>), ProverError> {
let proofs = self
.create_zkproof_inputs(&self.state.zero_sums, self.state.deltas.clone())
.iter()
.map(|i| self.prover.prove(i.clone()))
.collect::<Result<Vec<_>, _>>()?;
Ok((proofs.clone(), self.state.salts))
}
/// Returns [ProofInput]s for each [Chunk].
fn create_zkproof_inputs(
&self,
zero_sum: &Vec<ZeroSum>,
mut deltas: Vec<Delta>,
) -> Vec<ProofInput> {
// Since the last chunk is padded with zero plaintext, we also zero-pad
// the corresponding deltas of the last chunk.
let delta_pad_count = self.prover.chunk_size() * self.state.chunks.len() - deltas.len();
deltas.extend(vec![Delta::from(0u8); delta_pad_count]);
// we will have as many chunks of deltas as there are chunks of plaintext
let chunks_of_deltas: Vec<Vec<Delta>> = deltas
.chunks(self.prover.chunk_size())
.map(|i| i.to_vec())
.collect();
(0..self.state.chunks.len())
.map(|i| ProofInput {
plaintext_hash: self.state.plaintext_hashes[i].clone(),
label_sum_hash: self.state.label_sum_hashes[i].clone(),
sum_of_zero_labels: zero_sum[i].clone(),
plaintext: self.state.chunks[i].clone(),
salt: self.state.salts[i].clone(),
deltas: chunks_of_deltas[i].clone(),
})
.collect()
}
}
#[cfg(test)]
mod tests {
use crate::prover::{
AuthDecodeProver, AuthenticateArithmeticLabels, BinaryLabelsAuthenticated,
LabelSumCommitment, PlaintextCommitment, ProofInput, Prove, ProverError, Setup,
};
use crate::{Plaintext, Proof};
use num::BigUint;
/// The prover who implements `Prove` with the correct values
struct CorrectTestProver {}
impl Prove for CorrectTestProver {
fn prove(&self, _: ProofInput) -> Result<Proof, ProverError> {
Ok(Proof::default())
}
fn useful_bits(&self) -> usize {
253
}
fn poseidon_rate(&self) -> usize {
15
}
fn permutation_count(&self) -> usize {
1
}
fn salt_size(&self) -> usize {
125
}
fn chunk_size(&self) -> usize {
3670
}
fn hash(&self, _: &Vec<BigUint>) -> Result<BigUint, ProverError> {
Ok(BigUint::default())
}
}
#[test]
/// Inputs empty plaintext and triggers [ProverError::EmptyPlaintext]
fn test_error_empty_plaintext() {
let lsp = AuthDecodeProver {
state: Setup { plaintext: vec![] },
prover: Box::new(CorrectTestProver {}),
};
let res = lsp.setup();
assert_eq!(res.err().unwrap(), ProverError::EmptyPlaintext);
}
#[test]
/// Sets useful_bits() < salt_size() and triggers [ProverError::NoRoomForSalt]
fn test_error_no_room_for_salt() {
struct TestProver {}
impl Prove for TestProver {
fn prove(&self, _input: ProofInput) -> Result<Proof, ProverError> {
Ok(Proof::default())
}
fn useful_bits(&self) -> usize {
124 //changed from 253
}
fn poseidon_rate(&self) -> usize {
15
}
fn permutation_count(&self) -> usize {
1
}
fn salt_size(&self) -> usize {
125
}
fn chunk_size(&self) -> usize {
3670
}
fn hash(&self, _inputs: &Vec<BigUint>) -> Result<BigUint, ProverError> {
Ok(BigUint::default())
}
}
let lsp = AuthDecodeProver {
state: Setup {
plaintext: vec![0u8; 1],
},
prover: Box::new(TestProver {}),
};
let res = lsp.setup();
assert_eq!(res.err().unwrap(), ProverError::NoRoomForSalt);
}
#[test]
/// Sets chunk_size() > MAX_CHUNK_SIZE and triggers [ProverError::MaxChunkSizeExceeded]
fn test_error_max_chunk_size_exceeded() {
struct TestProver {}
impl Prove for TestProver {
fn prove(&self, _input: ProofInput) -> Result<Proof, ProverError> {
Ok(Proof::default())
}
fn useful_bits(&self) -> usize {
253
}
fn poseidon_rate(&self) -> usize {
15
}
fn permutation_count(&self) -> usize {
1
}
fn salt_size(&self) -> usize {
125
}
fn chunk_size(&self) -> usize {
super::MAX_CHUNK_SIZE + 1 //changed from 3670
}
fn hash(&self, _inputs: &Vec<BigUint>) -> Result<BigUint, ProverError> {
Ok(BigUint::default())
}
}
let lsp = AuthDecodeProver {
state: Setup {
plaintext: vec![0u8; 1],
},
prover: Box::new(TestProver {}),
};
let res = lsp.setup();
assert_eq!(res.err().unwrap(), ProverError::MaxChunkSizeExceeded);
}
#[test]
/// Sets poseidon_rate() too low and triggers [ProverError::WrongFieldElementCount]
fn test_error_wrong_field_element_count() {
struct TestProver {}
impl Prove for TestProver {
fn prove(&self, _input: ProofInput) -> Result<Proof, ProverError> {
Ok(Proof::default())
}
fn useful_bits(&self) -> usize {
253
}
fn poseidon_rate(&self) -> usize {
14 //changed from 15
}
fn permutation_count(&self) -> usize {
1
}
fn salt_size(&self) -> usize {
125
}
fn chunk_size(&self) -> usize {
3670
}
fn hash(&self, _inputs: &Vec<BigUint>) -> Result<BigUint, ProverError> {
Ok(BigUint::default())
}
}
let lsp = AuthDecodeProver {
state: Setup {
plaintext: vec![0u8; 1],
},
prover: Box::new(TestProver {}),
};
let res = lsp.setup();
assert_eq!(res.err().unwrap(), ProverError::WrongFieldElementCount);
}
#[test]
/// Inputs too much plaintext and triggers [ProverError::MaxChunkCountExceeded]
fn test_error_max_chunk_count_exceeded() {
let lsp = AuthDecodeProver {
state: Setup {
plaintext: vec![0u8; 1000000],
},
prover: Box::new(CorrectTestProver {}),
};
let res = lsp.setup();
assert_eq!(res.err().unwrap(), ProverError::MaxChunkCountExceeded);
}
#[test]
/// Returns [ProverError::ErrorInPoseidonImplementation] when attempting to hash
fn test_error_error_in_poseidon_implementation() {
struct TestProver {}
impl Prove for TestProver {
fn prove(&self, _input: ProofInput) -> Result<Proof, ProverError> {
Ok(Proof::default())
}
fn useful_bits(&self) -> usize {
253
}
fn poseidon_rate(&self) -> usize {
15
}
fn permutation_count(&self) -> usize {
1
}
fn salt_size(&self) -> usize {
125
}
fn chunk_size(&self) -> usize {
3670
}
fn hash(&self, _inputs: &Vec<BigUint>) -> Result<BigUint, ProverError> {
Err(ProverError::ErrorInPoseidonImplementation)
}
}
let lsp = AuthDecodeProver::new(Plaintext::default(), Box::new(TestProver {}));
let res = lsp.prover.hash(&[BigUint::default()].to_vec());
assert_eq!(
res.err().unwrap(),
ProverError::ErrorInPoseidonImplementation
);
}
#[test]
/// Sets too few ciphertexts and triggers [ProverError::IncorrectEncryptedLabelSize]
fn test_error_incorrect_encrypted_label_size() {
let ciphertexts = vec![[[0u8; 16], [0u8; 16]]];
let labels = vec![0u128];
let lsp = AuthDecodeProver {
state: LabelSumCommitment::default(),
prover: Box::new(CorrectTestProver {}),
};
let res = lsp.label_sum_commitment(ciphertexts, &labels);
assert_eq!(
res.err().unwrap(),
ProverError::IncorrectEncryptedLabelSize(0, 1)
);
}
#[test]
/// Sets too few binary labels and triggers [ProverError::IncorrectBinaryLabelSize]
fn test_error_incorrect_binary_label_size() {
let pt_len = 1000;
let ciphertexts = vec![[[0u8; 16], [0u8; 16]]; pt_len * 8];
let labels = vec![0u128];
let lsp = AuthDecodeProver {
state: LabelSumCommitment {
plaintext_size: 0,
..Default::default()
},
prover: Box::new(CorrectTestProver {}),
};
let res = lsp.label_sum_commitment(ciphertexts, &labels);
assert_eq!(
res.err().unwrap(),
ProverError::IncorrectBinaryLabelSize(pt_len, 1)
);
}
#[test]
/// Doesn't provide binary labels and triggers [ProverError::BinaryLabelsNotProvided]
fn test_error_binary_labels_not_provided() {
let lsp = AuthDecodeProver {
state: BinaryLabelsAuthenticated::default(),
prover: Box::new(CorrectTestProver {}),
};
let res = lsp.binary_labels_authenticated(true, None);
assert_eq!(res.err().unwrap(), ProverError::BinaryLabelsNotProvided);
}
#[test]
/// Receives a `false` signal and triggers [ProverError::BinaryLabelAuthenticationFailed]
fn test_error_binary_label_authentication_failed() {
let lsp = AuthDecodeProver {
state: BinaryLabelsAuthenticated::default(),
prover: Box::new(CorrectTestProver {}),
};
let res = lsp.binary_labels_authenticated(false, Some(vec![[0u128, 0u128]]));
assert_eq!(
res.err().unwrap(),
ProverError::BinaryLabelAuthenticationFailed
);
}
#[test]
/// Provides the wrong seed and triggers [ProverError::ArithmeticLabelAuthenticationFailed]
fn test_error_arithmetic_label_authentication_failed() {
let lsp = AuthDecodeProver {
state: AuthenticateArithmeticLabels::default(),
prover: Box::new(CorrectTestProver {}),
};
let res = lsp.authenticate_arithmetic_labels([0u8; 32]);
assert_eq!(
res.err().unwrap(),
ProverError::ArithmeticLabelAuthenticationFailed
);
}
#[test]
/// Returns [ProverError::ProvingBackendError] when attempting to prove
fn test_error_proving_backend_error() {
struct TestProver {}
impl Prove for TestProver {
fn prove(&self, _input: ProofInput) -> Result<Proof, ProverError> {
Err(ProverError::ProvingBackendError)
}
fn useful_bits(&self) -> usize {
253
}
fn poseidon_rate(&self) -> usize {
15
}
fn permutation_count(&self) -> usize {
1
}
fn salt_size(&self) -> usize {
125
}
fn chunk_size(&self) -> usize {
3670
}
fn hash(&self, _inputs: &Vec<BigUint>) -> Result<BigUint, ProverError> {
Ok(BigUint::default())
}
}
let lsp = AuthDecodeProver::new(Plaintext::default(), Box::new(TestProver {}));
let res = lsp.prover.prove(ProofInput::default());
assert_eq!(res.err().unwrap(), ProverError::ProvingBackendError);
}
#[test]
/// Tests AuthDecodeProver<Setup>::plaintext_to_chunks()
fn test_plaintext_to_chunks() {
let lsp = AuthDecodeProver {
state: Setup::default(),
prover: Box::new(CorrectTestProver {}),
};
// Should return 1 chunk
let size = lsp.prover.chunk_size() / 8 - 1;
let (chunks, salts) = lsp.plaintext_to_chunks(&vec![0u8; size]).unwrap();
assert_eq!(chunks.len(), 1);
assert_eq!(salts.len(), 1);
// Should return 1 chunk
let size = lsp.prover.chunk_size() / 8;
let (chunks, salts) = lsp.plaintext_to_chunks(&vec![0u8; size]).unwrap();
assert_eq!(chunks.len(), 1);
assert_eq!(salts.len(), 1);
// Should return 2 chunks
let size = lsp.prover.chunk_size() / 8 + 1;
let (chunks, salts) = lsp.plaintext_to_chunks(&vec![0u8; size]).unwrap();
assert_eq!(chunks.len(), 2);
assert_eq!(salts.len(), 2);
}
#[test]
/// Tests AuthDecodeProver<PlaintextCommitment>::salt_chunk()
fn test_salt_chunk() {
let lsp = AuthDecodeProver {
state: PlaintextCommitment::default(),
prover: Box::new(CorrectTestProver {}),
};
let chunk: Vec<BigUint> = vec![0u128.into(), 0u128.into()];
let salt = 1234567890u128.into();
let salted_chunk = lsp.salt_chunk(&chunk, &salt).unwrap();
assert_eq!(salted_chunk, [0u128.into(), 1234567890u128.into()]);
}
}

View File

@@ -0,0 +1,32 @@
pub mod onetimesetup;
pub mod poseidon;
pub mod provernode;
pub mod verifiernode;
#[cfg(test)]
mod tests {
use super::onetimesetup::OneTimeSetup;
use super::provernode::Prover;
use super::verifiernode::Verifier;
use crate::tests::e2e_test;
#[test]
fn snarkjs_e2e_test() {
let prover_ots = OneTimeSetup::new();
let verifier_ots = OneTimeSetup::new();
// The Prover should have generated the proving key (before the labelsum
// protocol starts) like this:
prover_ots.setup().unwrap();
let proving_key = prover_ots.get_proving_key().unwrap();
// The Verifier should have generated the verifying key (before the labelsum
// protocol starts) like this:
verifier_ots.setup().unwrap();
let verification_key = verifier_ots.get_verification_key().unwrap();
let prover = Box::new(Prover::new(proving_key));
let verifier = Box::new(Verifier::new(verification_key));
e2e_test(prover, verifier, false);
}
}

View File

@@ -0,0 +1,129 @@
use rand::{distributions::Alphanumeric, Rng};
use std::fs;
use std::path::Path;
use std::process::{Command, Output};
#[derive(Debug)]
pub enum Error {
FileDoesNotExist,
SnarkjsError,
}
pub struct OneTimeSetup {}
// OneTimeSetup should be run when Notary starts. It checks that all files needed
// by snarkjs are in place. If not, the files are generated.
// The files need to be generated only once *ever* for all future instantiations
// of the Notary.
impl OneTimeSetup {
pub fn new() -> Self {
Self {}
}
fn check_output(&self, output: Result<Output, std::io::Error>) -> Result<(), Error> {
if output.is_err() {
return Err(Error::SnarkjsError);
}
if !output.unwrap().status.success() {
return Err(Error::SnarkjsError);
}
Ok(())
}
fn generate_entropy(&self) -> String {
let entropy: String = rand::thread_rng()
.sample_iter(&Alphanumeric)
.take(500)
.map(char::from)
.collect();
assert!(entropy.len() == 500);
entropy
}
pub fn setup(&self) -> Result<(), Error> {
// check if files which we ship are present
if !Path::new("circom/powersOfTau28_hez_final_14.ptau").exists()
|| !Path::new("circom/circuit.r1cs").exists()
{
return Err(Error::FileDoesNotExist);
}
// check if any of the files hasn't been generated. If so, regenerate
// all files.
if !Path::new("circom/circuit_0000.zkey").exists()
|| !Path::new("circom/circuit_final.zkey.notary").exists()
|| !Path::new("circom/verification_key.json").exists()
{
let entropy = self.generate_entropy();
//return self.regenerate1(entropy);
return self.regenerate2(entropy);
}
Ok(())
}
// Returns the already existing proving key
pub fn get_proving_key(&self) -> Result<Vec<u8>, Error> {
let path = Path::new("circom/circuit_final.zkey.notary");
if !path.exists() {
return Err(Error::FileDoesNotExist);
}
let key = fs::read(path.clone()).unwrap();
Ok(key)
}
// Returns the already existing verification key
pub fn get_verification_key(&self) -> Result<Vec<u8>, Error> {
let path = Path::new("circom/verification_key.json");
if !path.exists() {
return Err(Error::FileDoesNotExist);
}
let key = fs::read(path.clone()).unwrap();
Ok(key)
}
// this will work only if snarkjs is in the PATH
fn regenerate1(&self, entropy: String) -> Result<(), Error> {
let output = Command::new("snarkjs")
.args([
"groth16",
"setup",
"circom/circuit.r1cs",
"circom/powersOfTau28_hez_final_14.ptau",
"circom/circuit_0000.zkey",
])
.output();
self.check_output(output)?;
let output = Command::new("snarkjs")
.args([
"zkey",
"contribute",
"circom/circuit_0000.zkey",
"circom/circuit_final.zkey.notary",
&(String::from("-e=\"") + &entropy + &String::from("\"")),
])
.output();
self.check_output(output)?;
let output = Command::new("snarkjs")
.args([
"zkey",
"export",
"verificationkey",
"circom/circuit_final.zkey.notary",
"circom/verification_key.json",
])
.output();
self.check_output(output)?;
Ok(())
}
// call a js wrapper which does what regenerate1() above does
fn regenerate2(&self, entropy: String) -> Result<(), Error> {
let output = Command::new("node")
.args(["circom/onetimesetup.mjs", &entropy])
.output();
self.check_output(output)?;
Ok(())
}
}

View File

@@ -0,0 +1,123 @@
use std::str::FromStr;
use ark_bn254::Fr as F;
use ark_sponge::poseidon::{PoseidonConfig, PoseidonSponge};
use ark_sponge::CryptographicSponge;
use ark_sponge::FieldBasedCryptographicSponge;
use lazy_static::lazy_static;
use num::{BigUint, Num};
use regex::Regex;
use std::fs::File;
use std::io::prelude::*;
/// additive round keys for a specific Poseidon rate
/// outer vec length equals to total (partial + full) round count
/// inner vec length equals rate + capacity (our Poseidon's capacity is fixed at 1)
type Ark = Vec<Vec<F>>;
/// MDS matrix
/// outer vec length equals to rate+1, inner rate length also equals to rate+1
type Mds = Vec<Vec<F>>;
/// Both PARTIAL_ROUNDS and FULL_ROUNDS are copied from circomlib's poseidon.circom
/// index+1 represents the "rate" of the Poseidon
/// the value is the amount of partial rounds corresponding to that rate
/// e.g. for Poseidon with rate 2 we use 57 partial rounds
const PARTIAL_ROUNDS: [usize; 16] = [
56, 57, 56, 60, 60, 63, 64, 63, 60, 66, 60, 65, 70, 60, 64, 68,
];
const FULL_ROUNDS: usize = 8;
pub struct Poseidon {
arks: Vec<Ark>,
mdss: Vec<Mds>,
}
impl Poseidon {
pub fn new() -> Poseidon {
let (arks, mdss) = setup();
Poseidon { arks, mdss }
}
// hash the input with Poseidon and return the digest
pub fn hash(&self, input: &Vec<BigUint>) -> BigUint {
if input.len() > 16 {
panic!("> 16 not supported");
}
// Each Poseidon rate requires a separate config
let rate = input.len();
// create the config on the fly since it is a cheap operation
let config = PoseidonConfig {
full_rounds: FULL_ROUNDS,
partial_rounds: PARTIAL_ROUNDS[rate - 1],
alpha: 5,
ark: self.arks[rate - 1].clone(),
mds: self.mdss[rate - 1].clone(),
rate,
// This is always fixed at 1
capacity: 1,
};
let mut sponge = PoseidonSponge::<F>::new(&config);
// convert input to Field elements
let fes: Vec<F> = input
.iter()
.map(|x| F::from_str(&x.to_string()).unwrap())
.collect();
sponge.absorb(&fes);
let fe: Vec<F> = sponge.squeeze_native_field_elements(1);
BigUint::from_str(&fe[0].to_string()).unwrap()
}
}
fn setup() -> (Vec<Ark>, Vec<Mds>) {
let mut file = File::open("circom/poseidon_constants_old.circom").unwrap();
let mut contents = String::new();
file.read_to_string(&mut contents).unwrap();
lazy_static! {
// match either very long decimal numbers or 32-byte hex numbers
static ref RE: Regex = Regex::new(r"([0-9]{60,90})|(0x[0-9a-f]{64})").unwrap();
}
// convert all matched strings into Field elements
let v: Vec<F> = RE
.find_iter(&contents)
.map(|m| {
let m = m.as_str();
let decimal: String = if m.starts_with("0x") {
// convert from hex to decimal
BigUint::from_str_radix(&m[2..], 16)
.unwrap()
.to_str_radix(10)
} else {
// already decimal
m.into()
};
F::from_str(&decimal).unwrap()
})
.collect();
// discard the first hex number from the comment in the file
let v = v[1..].to_vec();
let mut arks: Vec<Ark> = Vec::with_capacity(16);
// split into arks (additive round keys) for each rate
let mut offset = 0;
for rate in 1..17 {
let total = (rate + 1) * (PARTIAL_ROUNDS[rate - 1] + FULL_ROUNDS);
let elems = &v[offset..offset + total];
offset += total;
arks.push(elems.chunks(rate + 1).map(|x| x.to_vec()).collect());
}
let mut mdss: Vec<Mds> = Vec::with_capacity(16);
for rate in 1..17 {
let total = (rate + 1) * (rate + 1);
let elems = &v[offset..offset + total];
offset += total;
mdss.push(elems.chunks(rate + 1).map(|x| x.to_vec()).collect());
}
// we should have consumed all elements
assert!(v.len() == offset);
(arks, mdss)
}

View File

@@ -0,0 +1,131 @@
use super::poseidon::Poseidon;
use crate::prover::ProofInput;
use crate::prover::{Prove, ProverError};
use json::{object, stringify_pretty};
use num::{BigUint, FromPrimitive, ToPrimitive, Zero};
use std::env::temp_dir;
use std::fs;
use std::process::{Command, Output};
use uuid::Uuid;
pub struct Prover {
proving_key: Vec<u8>,
poseidon: Poseidon,
}
impl Prover {
pub fn new(proving_key: Vec<u8>) -> Self {
Self {
proving_key,
poseidon: Poseidon::new(),
}
}
// Creates inputs in the "input.json" format
fn create_proof_inputs(&self, input: ProofInput) -> String {
// convert each field element of plaintext into a string
let plaintext: Vec<String> = input
.plaintext
.iter()
.map(|bigint| bigint.to_string())
.collect();
// convert all deltas to strings
let deltas_str: Vec<String> = input.deltas.iter().map(|v| v.to_string()).collect();
// split deltas into groups corresponding to the field elements
// of our Poseidon circuit
let deltas_fes: Vec<&[String]> = deltas_str.chunks(self.useful_bits()).collect();
// prepare input.json
let input = object! {
plaintext_hash: input.plaintext_hash.to_string(),
label_sum_hash: input.label_sum_hash.to_string(),
sum_of_zero_labels: input.sum_of_zero_labels.to_string(),
plaintext: plaintext,
salt: input.salt.to_string(),
delta: deltas_fes[0..deltas_fes.len()-1],
// last field element's deltas are a separate input
delta_last: deltas_fes[deltas_fes.len()-1]
};
stringify_pretty(input, 4)
}
}
impl Prove for Prover {
fn useful_bits(&self) -> usize {
253
}
fn poseidon_rate(&self) -> usize {
16
}
fn permutation_count(&self) -> usize {
1
}
fn salt_size(&self) -> usize {
128
}
fn chunk_size(&self) -> usize {
3920 //253*15+125
}
fn hash(&self, inputs: &Vec<BigUint>) -> Result<BigUint, ProverError> {
Ok(self.poseidon.hash(inputs))
}
/// Produces a groth16 proof with snarkjs. Input must be a JSON string in the
/// "input.json" format which snarkjs expects.
fn prove(&self, input: ProofInput) -> Result<Vec<u8>, ProverError> {
let mut path1 = temp_dir();
let mut path2 = temp_dir();
let mut path3 = temp_dir();
path1.push(format!("input.json.{}", Uuid::new_v4()));
path2.push(format!("proving_key.zkey.{}", Uuid::new_v4()));
path3.push(format!("proof.json.{}", Uuid::new_v4()));
let input = self.create_proof_inputs(input);
fs::write(path1.clone(), input).expect("Unable to write file");
fs::write(path2.clone(), self.proving_key.clone()).expect("Unable to write file");
let output = Command::new("node")
.args([
"circom/prove.mjs",
path1.to_str().unwrap(),
path2.to_str().unwrap(),
path3.to_str().unwrap(),
])
.output();
fs::remove_file(path1).expect("Unable to remove file");
fs::remove_file(path2).expect("Unable to remove file");
check_output(output)?;
let proof = fs::read(path3.clone()).unwrap();
fs::remove_file(path3).expect("Unable to remove file");
Ok(proof)
}
}
/// Sets snarkjs's proving key received from the Verifier. Note that this
/// method should be invoked only once on the very first interaction with
/// the Verifier. For future interactions with the same Verifier, a cached
/// key can be used.
// fn set_proving_key(&mut self, key: Vec<u8>) -> Result<(), ProverError> {
// let res = fs::write("circuit_final.zkey.verifier", key);
// if res.is_err() {
// return Err(ProverError::FileSystemError);
// }
// Ok(())
// }
fn check_output(output: Result<Output, std::io::Error>) -> Result<(), ProverError> {
if output.is_err() {
return Err(ProverError::ProvingBackendError);
}
if !output.unwrap().status.success() {
return Err(ProverError::ProvingBackendError);
}
Ok(())
}

View File

@@ -0,0 +1,77 @@
use crate::verifier::VerificationInput;
use crate::verifier::{VerifierError, Verify};
use json::{array, object, stringify, stringify_pretty, JsonValue};
use num::{BigUint, FromPrimitive, ToPrimitive, Zero};
use std::env::temp_dir;
use std::fs;
use std::path::Path;
use std::process::{Command, Output};
use uuid::Uuid;
pub struct Verifier {
verification_key: Vec<u8>,
}
impl Verifier {
pub fn new(verification_key: Vec<u8>) -> Self {
Self { verification_key }
}
}
impl Verify for Verifier {
fn field_size(&self) -> usize {
254
}
fn useful_bits(&self) -> usize {
253
}
fn chunk_size(&self) -> usize {
3920 //253*15+125
}
fn verify(&self, input: VerificationInput) -> Result<bool, VerifierError> {
// public.json is a flat array
let mut public_json: Vec<String> = Vec::new();
public_json.push(input.plaintext_hash.to_string());
public_json.push(input.label_sum_hash.to_string());
let delta_str: Vec<String> = input.deltas.iter().map(|v| v.to_string()).collect();
public_json.extend::<Vec<String>>(delta_str);
public_json.push(input.sum_of_zero_labels.to_string());
let s = stringify(JsonValue::from(public_json.clone()));
// write into temp files and delete the files after verification
let mut path1 = temp_dir();
let mut path2 = temp_dir();
path1.push(format!("public.json.{}", Uuid::new_v4()));
path2.push(format!("proof.json.{}", Uuid::new_v4()));
fs::write(path1.clone(), s).expect("Unable to write file");
fs::write(path2.clone(), input.proof).expect("Unable to write file");
let output = Command::new("node")
.args([
"circom/verify.mjs",
path1.to_str().unwrap(),
path2.to_str().unwrap(),
])
.output();
fs::remove_file(path1).expect("Unable to remove file");
fs::remove_file(path2).expect("Unable to remove file");
check_output(&output)?;
if !output.unwrap().status.success() {
return Ok(false);
}
Ok(true)
}
}
fn check_output(output: &Result<Output, std::io::Error>) -> Result<(), VerifierError> {
if output.is_err() {
return Err(VerifierError::VerifyingBackendError);
}
if !output.as_ref().unwrap().status.success() {
return Err(VerifierError::VerifyingBackendError);
}
Ok(())
}

190
src/utils.rs Normal file
View File

@@ -0,0 +1,190 @@
use crate::{Delta, ZeroSum};
use aes::{Aes128, NewBlockCipher};
use ark_ff::BigInt;
use cipher::{consts::U16, generic_array::GenericArray, BlockEncrypt};
use num::BigUint;
use sha2::{Digest, Sha256};
/// Converts bits in MSB-first order into a `BigUint`
pub fn bits_to_bigint(bits: &[bool]) -> BigUint {
BigUint::from_bytes_be(&boolvec_to_u8vec(&bits))
}
#[test]
fn test_bits_to_bigint() {
let bits = [true, false];
assert_eq!(bits_to_bigint(&bits), 2u8.into());
}
/// Converts bits in MSB-first order into BE bytes. The bits will be left-padded
/// with zeroes to the nearest multiple of 8.
pub fn boolvec_to_u8vec(bv: &[bool]) -> Vec<u8> {
let rem = bv.len() % 8;
let first_byte_bitsize = if rem == 0 { 8 } else { rem };
let offset = if rem == 0 { 0 } else { 1 };
let mut v = vec![0u8; bv.len() / 8 + offset];
// implicitely left-pad the first byte with zeroes
for (i, b) in bv[0..first_byte_bitsize].iter().enumerate() {
v[i / 8] |= (*b as u8) << (first_byte_bitsize - 1 - i);
}
for (i, b) in bv[first_byte_bitsize..].iter().enumerate() {
v[1 + i / 8] |= (*b as u8) << (7 - (i % 8));
}
v
}
#[test]
fn test_boolvec_to_u8vec() {
let bits = [true, false];
assert_eq!(boolvec_to_u8vec(&bits), [2]);
let bits = [true, false, false, false, false, false, false, true, true];
assert_eq!(boolvec_to_u8vec(&bits), [1, 3]);
}
/// Converts BE bytes into bits in MSB-first order, left-padding with zeroes
/// to the nearest multiple of 8.
pub fn u8vec_to_boolvec(v: &[u8]) -> Vec<bool> {
let mut bv = Vec::with_capacity(v.len() * 8);
for byte in v.iter() {
for i in 0..8 {
bv.push(((byte >> (7 - i)) & 1) != 0);
}
}
bv
}
#[test]
fn test_u8vec_to_boolvec() {
let bytes = [1];
assert_eq!(
u8vec_to_boolvec(&bytes),
[false, false, false, false, false, false, false, true]
);
let bytes = [255, 2];
assert_eq!(
u8vec_to_boolvec(&bytes),
[
true, true, true, true, true, true, true, true, false, false, false, false, false,
false, true, false
]
);
// convert to bits and back to bytes
let bignum: BigUint = 3898219876643u128.into();
let bits = u8vec_to_boolvec(&bignum.to_bytes_be());
let bytes = boolvec_to_u8vec(&bits);
assert_eq!(bignum, BigUint::from_bytes_be(&bytes));
}
/// Returns sha256 hash digest
pub fn sha256(data: &[u8]) -> [u8; 32] {
let mut hasher = Sha256::new();
hasher.update(data);
hasher.finalize().into()
}
/// Encrypts each arithmetic label using a corresponding binary label as a key
/// and returns ciphertexts in an order based on binary label's pointer bit (LSB).
pub fn encrypt_arithmetic_labels(
alabels: &Vec<[BigUint; 2]>,
blabels: &Vec<[u128; 2]>,
) -> Result<Vec<[[u8; 16]; 2]>, String> {
if alabels.len() > blabels.len() {
return Err("error".to_string());
}
Ok(blabels
.iter()
.zip(alabels)
.map(|(bin_pair, arithm_pair)| {
// safe to unwrap() since to_be_bytes() always returns exactly 16
// bytes for u128
let zero_key = Aes128::new_from_slice(&bin_pair[0].to_be_bytes()).unwrap();
let one_key = Aes128::new_from_slice(&bin_pair[1].to_be_bytes()).unwrap();
let mut label0 = [0u8; 16];
let mut label1 = [0u8; 16];
let ap0 = arithm_pair[0].to_bytes_be();
let ap1 = arithm_pair[1].to_bytes_be();
// pad with zeroes on the left
label0[16 - ap0.len()..].copy_from_slice(&ap0);
label1[16 - ap1.len()..].copy_from_slice(&ap1);
let mut label0: GenericArray<u8, U16> = GenericArray::from(label0);
let mut label1: GenericArray<u8, U16> = GenericArray::from(label1);
zero_key.encrypt_block(&mut label0);
one_key.encrypt_block(&mut label1);
// place encrypted arithmetic labels based on the pointer bit of
// binary label 0
if (bin_pair[0] & 1) == 0 {
[label0.into(), label1.into()]
} else {
[label1.into(), label0.into()]
}
})
.collect())
}
#[test]
fn test_encrypt_arithmetic_labels() {
let alabels: [BigUint; 2] = [3u8.into(), 4u8.into()];
let blabels = [0u128, 1u128];
let res = encrypt_arithmetic_labels(&vec![alabels], &vec![blabels]).unwrap();
let flat = res[0]
.into_iter()
.map(|ct| ct)
.flatten()
.collect::<Vec<_>>();
// expected value generated with python3:
// from Crypto.Cipher import AES
// k0 = AES.new((0).to_bytes(16, 'big'), AES.MODE_ECB)
// ct0 = k0.encrypt((3).to_bytes(16, 'big')).hex()
// k1 = AES.new((1).to_bytes(16, 'big'), AES.MODE_ECB)
// ct1 = k1.encrypt((4).to_bytes(16, 'big')).hex()
// print(ct0+ct1)
let expected = "f795aaab494b5923f7fd89ff948bc1e0382fa171550467b34c54c58b9d3cfd24";
assert_eq!(hex::encode(&flat), expected);
}
/// Returns the sum of all zero labels and deltas for each label pair.
pub fn compute_zero_sum_and_deltas(
arithmetic_label_pairs: &[[BigUint; 2]],
) -> (ZeroSum, Vec<Delta>) {
let mut deltas: Vec<Delta> = Vec::with_capacity(arithmetic_label_pairs.len());
let mut zero_sum: ZeroSum = 0u8.into();
for label_pair in arithmetic_label_pairs {
// calculate the sum of all zero labels
zero_sum += label_pair[0].clone();
// put deltas from into one vec
deltas.push(label_pair[1].clone() - label_pair[0].clone());
}
(zero_sum, deltas)
}
#[test]
/// Tests compute_zero_sum_and_deltas()
fn test_compute_zero_sum_and_deltas() {
let labels: [[BigUint; 2]; 2] = [[1u8.into(), 2u8.into()], [3u8.into(), 4u8.into()]];
let (z, d) = compute_zero_sum_and_deltas(&labels);
assert_eq!(z, 4u8.into());
assert_eq!(d, [1u8.into(), 1u8.into()]);
}
/// Make sure that the `BigUint`s bitsize is not larger than `bitsize`
pub fn sanitize_biguint(input: &BigUint, bitsize: usize) -> Result<(), String> {
if (input.bits() as usize) > bitsize {
return Err("error".to_string());
} else {
Ok(())
}
}
#[test]
/// Tests sanitize_biguint()
fn test_sanitize_biguint() {
let good = BigUint::from(2u8).pow(253) - BigUint::from(1u8);
let res = sanitize_biguint(&good, 253);
assert!(!res.is_err());
let bad = BigUint::from(2u8).pow(253);
let res = sanitize_biguint(&bad, 253);
assert!(res.is_err());
}

435
src/verifier.rs Normal file
View File

@@ -0,0 +1,435 @@
use super::ARITHMETIC_LABEL_SIZE;
use crate::label::{LabelGenerator, Seed};
use crate::utils::{compute_zero_sum_and_deltas, encrypt_arithmetic_labels, sanitize_biguint};
use crate::{Delta, LabelSumHash, PlaintextHash, Proof, ZeroSum};
use num::BigUint;
#[derive(Debug, PartialEq, thiserror::Error)]
pub enum VerifierError {
#[error("The prover has provided the wrong number of proofs. Expected {0}. Got {1}.")]
WrongProofCount(usize, usize),
#[error("The Prover has provided an input that is larger than expected")]
BigUintTooLarge,
#[error("The proving system returned an error when verifying a proof")]
VerifyingBackendError,
#[error("Proof verification failed")]
VerificationFailed,
#[error("An internal error was encountered")]
InternalError,
}
/// Public inputs and a zk proof that needs to be verified.
#[derive(Default)]
pub struct VerificationInput {
pub plaintext_hash: PlaintextHash,
pub label_sum_hash: LabelSumHash,
pub sum_of_zero_labels: ZeroSum,
pub deltas: Vec<Delta>,
pub proof: Proof,
}
pub trait State {}
pub struct Setup {
binary_labels: Vec<[u128; 2]>,
}
impl State for Setup {}
#[derive(Default)]
pub struct ReceivePlaintextHashes {
deltas: Vec<Delta>,
zero_sums: Vec<ZeroSum>,
ciphertexts: Vec<[[u8; 16]; 2]>,
arith_label_seed: Seed,
}
impl State for ReceivePlaintextHashes {}
#[derive(Default)]
pub struct ReceiveLabelSumHashes {
deltas: Vec<Delta>,
zero_sums: Vec<ZeroSum>,
plaintext_hashes: Vec<PlaintextHash>,
arith_label_seed: Seed,
}
impl State for ReceiveLabelSumHashes {}
#[derive(Default)]
pub struct VerifyMany {
deltas: Vec<Delta>,
zero_sums: Vec<ZeroSum>,
plaintext_hashes: Vec<PlaintextHash>,
label_sum_hashes: Vec<LabelSumHash>,
}
impl State for VerifyMany {}
pub struct VerificationSuccessfull {
plaintext_hashes: Vec<PlaintextHash>,
}
impl State for VerificationSuccessfull {}
pub trait Verify {
/// Verifies the zk proof against public `input`s. Returns `true` on success,
/// `false` otherwise.
fn verify(&self, input: VerificationInput) -> Result<bool, VerifierError>;
/// The EC field size in bits. Verifier uses this to sanitize the `BigUint`s
/// received from Prover.
fn field_size(&self) -> usize;
/// Returns how many bits of plaintext we will pack into one field element.
/// Normally, this should be [Verify::field_size] minus 1.
fn useful_bits(&self) -> usize;
/// How many bits of [Plaintext] can fit into one [Chunk]. This does not
/// include the [Salt] of the hash - which takes up the remaining least bits
/// of the last field element of each chunk.
fn chunk_size(&self) -> usize;
}
pub struct AuthDecodeVerifier<S = Setup>
where
S: State,
{
verifier: Box<dyn Verify>,
state: S,
}
impl AuthDecodeVerifier {
/// Returns the next expected state.
pub fn new(
binary_labels: Vec<[u128; 2]>,
verifier: Box<dyn Verify>,
) -> AuthDecodeVerifier<Setup> {
AuthDecodeVerifier {
state: Setup { binary_labels },
verifier,
}
}
}
impl AuthDecodeVerifier<Setup> {
/// Generates arithmetic labels from a seed, computes the deltas, computes
/// the sum of zero labels, encrypts arithmetic labels using binary
/// labels as encryption keys.
///
/// Returns the next expected state.
pub fn setup(self) -> Result<AuthDecodeVerifier<ReceivePlaintextHashes>, VerifierError> {
// There will be as many deltas as there are garbled circuit output
// labels.
let mut deltas: Vec<BigUint> = Vec::with_capacity(self.state.binary_labels.len());
let (label_pairs, seed) =
LabelGenerator::generate(self.state.binary_labels.len(), ARITHMETIC_LABEL_SIZE);
let zero_sums: Vec<ZeroSum> = label_pairs
.chunks(self.verifier.chunk_size())
.map(|chunk_of_alabel_pairs| {
let (zero_sum, deltas_in_chunk) =
compute_zero_sum_and_deltas(chunk_of_alabel_pairs);
deltas.extend(deltas_in_chunk);
zero_sum
})
.collect();
let ciphertexts = match encrypt_arithmetic_labels(&label_pairs, &self.state.binary_labels) {
Ok(ct) => ct,
Err(_) => return Err(VerifierError::InternalError),
};
Ok(AuthDecodeVerifier {
state: ReceivePlaintextHashes {
zero_sums,
deltas,
ciphertexts,
arith_label_seed: seed,
},
verifier: self.verifier,
})
}
}
impl AuthDecodeVerifier<ReceivePlaintextHashes> {
/// Receives hashes of plaintext and returns the encrypted
/// arithmetic labels and the next expected state.
pub fn receive_plaintext_hashes(
self,
plaintext_hashes: Vec<PlaintextHash>,
) -> Result<
(
Vec<[[u8; 16]; 2]>,
AuthDecodeVerifier<ReceiveLabelSumHashes>,
),
VerifierError,
> {
for h in &plaintext_hashes {
if sanitize_biguint(h, self.verifier.field_size()).is_err() {
return Err(VerifierError::BigUintTooLarge);
}
}
Ok((
self.state.ciphertexts,
AuthDecodeVerifier {
state: ReceiveLabelSumHashes {
zero_sums: self.state.zero_sums,
deltas: self.state.deltas,
plaintext_hashes,
arith_label_seed: self.state.arith_label_seed,
},
verifier: self.verifier,
},
))
}
}
impl AuthDecodeVerifier<ReceiveLabelSumHashes> {
/// Receives hashes of sums of labels and returns the arithmetic label [Seed]
/// and the next expected state.
pub fn receive_label_sum_hashes(
self,
label_sum_hashes: Vec<LabelSumHash>,
) -> Result<(Seed, AuthDecodeVerifier<VerifyMany>), VerifierError> {
for h in &label_sum_hashes {
if sanitize_biguint(h, self.verifier.field_size()).is_err() {
return Err(VerifierError::BigUintTooLarge);
}
}
Ok((
self.state.arith_label_seed,
AuthDecodeVerifier {
state: VerifyMany {
zero_sums: self.state.zero_sums,
deltas: self.state.deltas,
plaintext_hashes: self.state.plaintext_hashes,
label_sum_hashes,
},
verifier: self.verifier,
},
))
}
}
impl AuthDecodeVerifier<VerifyMany> {
/// Verifies as many proofs as there are [Chunk]s of the plaintext. Returns
/// the verification result and the hash of the plaintext.
pub fn verify_many(
mut self,
proofs: Vec<Proof>,
) -> Result<(bool, Vec<PlaintextHash>), VerifierError> {
let inputs = self.create_verification_inputs(proofs)?;
for input in inputs {
let res = self.verifier.verify(input)?;
if res != true {
// we will never get here since "?" takes care of the
// verification error. Still, it is good to have this check
// just in case.
return Err(VerifierError::VerificationFailed);
}
}
Ok((true, self.state.plaintext_hashes))
}
/// Construct public inputs for the zk circuit for each [Chunk].
fn create_verification_inputs(
&mut self,
proofs: Vec<Proof>,
) -> Result<Vec<VerificationInput>, VerifierError> {
// How many chunks of plaintext are there? ( == how many zk proofs to expect)
// The amount of deltas corresponds to the amount of bits in the plaintext.
// Round up the chunk count.
let chunk_count = (self.state.deltas.len() + (self.verifier.chunk_size() - 1))
/ self.verifier.chunk_size();
if proofs.len() != chunk_count {
return Err(VerifierError::WrongProofCount(chunk_count, proofs.len()));
}
// Since the last chunk of plaintext is padded with zero bits, we also zero-pad
// the corresponding deltas of the last chunk to the size of a chunk.
let delta_pad_count = self.verifier.chunk_size() * chunk_count - self.state.deltas.len();
let mut deltas = self.state.deltas.clone();
deltas.extend(vec![0u8.into(); delta_pad_count]);
let chunks_of_deltas = deltas
.chunks(self.verifier.chunk_size())
.map(|i| i.to_vec())
.collect::<Vec<Vec<_>>>();
Ok((0..chunk_count)
.map(|i| VerificationInput {
plaintext_hash: self.state.plaintext_hashes[i].clone(),
label_sum_hash: self.state.label_sum_hashes[i].clone(),
sum_of_zero_labels: self.state.zero_sums[i].clone(),
deltas: chunks_of_deltas[i].clone(),
proof: proofs[i].clone(),
})
.collect())
}
}
#[cfg(test)]
mod tests {
use crate::verifier::AuthDecodeVerifier;
use crate::verifier::ReceiveLabelSumHashes;
use crate::verifier::ReceivePlaintextHashes;
use crate::verifier::VerificationInput;
use crate::verifier::VerifierError;
use crate::verifier::Verify;
use crate::verifier::VerifyMany;
use crate::Proof;
use num::BigUint;
/// The verifier who implements `Verify` with the correct values
struct CorrectTestVerifier {}
impl Verify for CorrectTestVerifier {
fn verify(&self, _input: VerificationInput) -> Result<bool, VerifierError> {
Ok(true)
}
fn field_size(&self) -> usize {
254
}
fn useful_bits(&self) -> usize {
253
}
fn chunk_size(&self) -> usize {
3670
}
}
#[test]
/// Provide `BigUint` larger than useful_bits() and trigger
/// [VerifierError::BigUintTooLarge]
fn test_error_biguint_too_large() {
// test receive_plaintext_hashes()
let lsv = AuthDecodeVerifier {
state: ReceivePlaintextHashes::default(),
verifier: Box::new(CorrectTestVerifier {}),
};
let mut hashes: Vec<BigUint> = (0..100).map(|i| BigUint::from(i as u64)).collect();
hashes[50] = BigUint::from(2u8).pow(lsv.verifier.field_size() as u32);
let res = lsv.receive_plaintext_hashes(hashes);
assert_eq!(res.err().unwrap(), VerifierError::BigUintTooLarge);
// test receive_label_sum_hashes
let lsv = AuthDecodeVerifier {
state: ReceiveLabelSumHashes::default(),
verifier: Box::new(CorrectTestVerifier {}),
};
let mut plaintext_hashes: Vec<BigUint> =
(0..100).map(|i| BigUint::from(i as u64)).collect();
plaintext_hashes[50] = BigUint::from(2u8).pow(lsv.verifier.field_size() as u32);
let res = lsv.receive_label_sum_hashes(plaintext_hashes);
assert_eq!(res.err().unwrap(), VerifierError::BigUintTooLarge);
}
#[test]
/// Provide too many/too few proofs and trigger [VerifierError::WrongProofCount]
fn test_error_wrong_proof_count() {
// 3 chunks
let lsv = AuthDecodeVerifier {
state: VerifyMany {
deltas: vec![0u8.into(); 3670 * 2 + 1],
..Default::default()
},
verifier: Box::new(CorrectTestVerifier {}),
};
// 4 proofs
let res = lsv.verify_many(vec![Proof::default(); 4]);
assert_eq!(res.err().unwrap(), VerifierError::WrongProofCount(3, 4));
// 3 chunks
let lsv = AuthDecodeVerifier {
state: VerifyMany {
deltas: vec![0u8.into(); 3670 * 2 + 1],
..Default::default()
},
verifier: Box::new(CorrectTestVerifier {}),
};
// 2 proofs
let res = lsv.verify_many(vec![Proof::default(); 2]);
assert_eq!(res.err().unwrap(), VerifierError::WrongProofCount(3, 2));
}
#[test]
/// Returns `false` when attempting to verify and triggers
/// [VerifierError::VerificationFailed]
fn test_error_verification_failed() {
struct TestVerifier {}
impl Verify for TestVerifier {
fn verify(&self, _input: VerificationInput) -> Result<bool, VerifierError> {
Ok(false)
}
fn field_size(&self) -> usize {
254
}
fn useful_bits(&self) -> usize {
253
}
fn chunk_size(&self) -> usize {
3670
}
}
let lsv = AuthDecodeVerifier {
state: VerifyMany {
deltas: vec![0u8.into(); 3670 * 2 - 1],
zero_sums: vec![0u8.into(); 2],
plaintext_hashes: vec![0u8.into(); 2],
label_sum_hashes: vec![0u8.into(); 2],
},
verifier: Box::new(TestVerifier {}),
};
let res = lsv.verify_many(vec![Proof::default(); 2]);
assert_eq!(res.err().unwrap(), VerifierError::VerificationFailed);
}
#[test]
/// Returns some other error not related to the verification result when
/// attempting to verify and checks that the error propagates.
fn test_verification_error() {
struct TestVerifier {}
impl Verify for TestVerifier {
fn verify(&self, _input: VerificationInput) -> Result<bool, VerifierError> {
Err(VerifierError::VerifyingBackendError)
}
fn field_size(&self) -> usize {
254
}
fn useful_bits(&self) -> usize {
253
}
fn chunk_size(&self) -> usize {
3670
}
}
let lsv = AuthDecodeVerifier {
state: VerifyMany {
deltas: vec![0u8.into(); 3670 * 2 - 1],
zero_sums: vec![0u8.into(); 2],
plaintext_hashes: vec![0u8.into(); 2],
label_sum_hashes: vec![0u8.into(); 2],
},
verifier: Box::new(TestVerifier {}),
};
let res = lsv.verify_many(vec![Proof::default(); 2]);
assert_eq!(res.err().unwrap(), VerifierError::VerifyingBackendError);
}
}

View File

@@ -1,32 +0,0 @@
// copied from circomlib/circuits/bitify.circom
template Num2Bits(n) {
signal input in;
signal output out[n];
var lc1=0;
var e2=1;
for (var i = 0; i<n; i++) {
out[i] <-- (in >> i) & 1;
out[i] * (out[i] -1 ) === 0;
lc1 += out[i] * e2;
e2 = e2+e2;
}
lc1 === in;
}
template InnerProd(){
signal input plaintext;
signal input deltas[254];
signal output out;
component n2b = Num2Bits(254);
plaintext ==> n2b.in;
signal sum[254];
for (var i=0; i<254; i++) {
sum[i] <== n2b.out[i] * deltas[i];
}
out <== (sum[0] + sum[1] + sum[2] + sum[3] + sum[4] + sum[5] + sum[6] + sum[7] + sum[8] + sum[9] + sum[10] + sum[11] + sum[12] + sum[13] + sum[14] + sum[15] + sum[16] + sum[17] + sum[18] + sum[19] + sum[20] + sum[21] + sum[22] + sum[23] + sum[24] + sum[25] + sum[26] + sum[27] + sum[28] + sum[29] + sum[30] + sum[31] + sum[32] + sum[33] + sum[34] + sum[35] + sum[36] + sum[37] + sum[38] + sum[39] + sum[40] + sum[41] + sum[42] + sum[43] + sum[44] + sum[45] + sum[46] + sum[47] + sum[48] + sum[49] + sum[50] + sum[51] + sum[52] + sum[53] + sum[54] + sum[55] + sum[56] + sum[57] + sum[58] + sum[59] + sum[60] + sum[61] + sum[62] + sum[63] + sum[64] + sum[65] + sum[66] + sum[67] + sum[68] + sum[69] + sum[70] + sum[71] + sum[72] + sum[73] + sum[74] + sum[75] + sum[76] + sum[77] + sum[78] + sum[79] + sum[80] + sum[81] + sum[82] + sum[83] + sum[84] + sum[85] + sum[86] + sum[87] + sum[88] + sum[89] + sum[90] + sum[91] + sum[92] + sum[93] + sum[94] + sum[95] + sum[96] + sum[97] + sum[98] + sum[99] + sum[100] + sum[101] + sum[102] + sum[103] + sum[104] + sum[105] + sum[106] + sum[107] + sum[108] + sum[109] + sum[110] + sum[111] + sum[112] + sum[113] + sum[114] + sum[115] + sum[116] + sum[117] + sum[118] + sum[119] + sum[120] + sum[121] + sum[122] + sum[123] + sum[124] + sum[125] + sum[126] + sum[127] + sum[128] + sum[129] + sum[130] + sum[131] + sum[132] + sum[133] + sum[134] + sum[135] + sum[136] + sum[137] + sum[138] + sum[139] + sum[140] + sum[141] + sum[142] + sum[143] + sum[144] + sum[145] + sum[146] + sum[147] + sum[148] + sum[149] + sum[150] + sum[151] + sum[152] + sum[153] + sum[154] + sum[155] + sum[156] + sum[157] + sum[158] + sum[159] + sum[160] + sum[161] + sum[162] + sum[163] + sum[164] + sum[165] + sum[166] + sum[167] + sum[168] + sum[169] + sum[170] + sum[171] + sum[172] + sum[173] + sum[174] + sum[175] + sum[176] + sum[177] + sum[178] + sum[179] + sum[180] + sum[181] + sum[182] + sum[183] + sum[184] + sum[185] + sum[186] + sum[187] + sum[188] + sum[189] + sum[190] + sum[191] + sum[192] + sum[193] + sum[194] + sum[195] + sum[196] + sum[197] + sum[198] + sum[199] + sum[200] + sum[201] + sum[202] + sum[203] + sum[204] + sum[205] + sum[206] + sum[207] + sum[208] + sum[209] + sum[210] + sum[211] + sum[212] + sum[213] + sum[214] + sum[215] + sum[216] + sum[217] + sum[218] + sum[219] + sum[220] + sum[221] + sum[222] + sum[223] + sum[224] + sum[225] + sum[226] + sum[227] + sum[228] + sum[229] + sum[230] + sum[231] + sum[232] + sum[233] + sum[234] + sum[235] + sum[236] + sum[237] + sum[238] + sum[239] + sum[240] + sum[241] + sum[242] + sum[243] + sum[244] + sum[245] + sum[246] + sum[247] + sum[248] + sum[249] + sum[250] + sum[251] + sum[252] + sum[253]);
}