23 Commits

Author SHA1 Message Date
Daniel Tehrani
20565c8a25 Replace the testing R1CS to an instance that has variables 2023-03-31 12:51:57 +09:00
Daniel Tehrani
cad6dfb30b Fix 2023-03-31 12:51:15 +09:00
Daniel Tehrani
620b8921bd Use dynamic proof sizing 2023-03-30 19:17:02 +09:00
Daniel Tehrani
b9e63cb98c Expose proof values so we can read them in Hoplite 2023-03-29 22:24:29 +09:00
Daniel Tehrani
39218fe057 Use circuit_reader 2023-03-28 16:44:20 +09:00
Daniel Tehrani
8f073995c8 Merge main 2023-03-28 16:43:08 +09:00
Daniel Tehrani
ef4a70ad0a Proof verification circuit (but without final poly evaluation) 2023-03-26 13:35:48 +09:00
Daniel Tehrani
f6de8de3c2 .gitignore proofs loaded by hoplite_circuit 2023-03-24 16:56:15 +09:00
Daniel Tehrani
117f2605a1 Refactor 2023-03-24 16:55:30 +09:00
Daniel Tehrani
69a7821880 Add a prover to generate proofs which is used for testing the spartan-ecdsa halo2 circuit 2023-03-08 11:35:19 -08:00
Daniel Tehrani
05e9bbd9bc Implement sumcheck chip 2023-03-07 15:02:06 -07:00
Daniel Tehrani
9436fad9cc Fix 2023-03-03 19:38:39 -07:00
Daniel Tehrani
0206ac817f Add more circuit value structs 2023-03-03 19:31:10 -07:00
Daniel Tehrani
1a57e4085b Wrap props with Option for compatibility w/ hoplite_circuit 2023-03-03 15:05:47 -07:00
Daniel Tehrani
dfc77f142b Small fix 2023-03-03 14:00:47 -07:00
Daniel Tehrani
a67cd603b9 Specify num constraints & vars with generics 2023-03-03 13:08:03 -07:00
Daniel Tehrani
3ff73e0c5b Move circuit value related structs/traits into circuit_vals.rs 2023-03-03 13:06:17 -07:00
Daniel Tehrani
ad048acbfc Add comments 2023-03-03 12:40:45 -07:00
lsankar4033
5524adf4c2 fix filename 2023-03-01 17:35:38 -07:00
Daniel Tehrani
71f1ce73c5 Add rest of the verification processes in the ref impl 2023-03-01 11:28:55 -07:00
Daniel Tehrani
f46a2a5941 Comment 2023-02-26 14:48:47 +09:00
Daniel Tehrani
ad2ee6e4fb Add zk-sum-check circuit written in Halo2 (still incomplete!) 2023-02-26 14:42:32 +09:00
Daniel Tehrani
23cad0fa18 Move Hoplite into the repo 2023-02-25 19:30:42 +09:00
53 changed files with 5326 additions and 763 deletions

View File

@@ -6,7 +6,7 @@ on:
jobs:
publish:
runs-on: macos-latest
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
@@ -20,7 +20,7 @@ jobs:
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2022-10-31
- run: rustup component add rust-src
- run: rustup component add rust-src --toolchain nightly-2022-10-31-x86_64-unknown-linux-gnu
- run: rustup target add x86_64-apple-darwin
# Install circom-secq
- uses: GuillaumeFalourd/clone-github-repo-action@v2
@@ -31,7 +31,7 @@ jobs:
# Install wasm-pack
- uses: jetli/wasm-pack-action@v0.4.0
with:
version: "v0.10.3"
version: "0.10.3"
- run: cargo test --release
- run: yarn
- run: yarn build

4
.gitignore vendored
View File

@@ -41,3 +41,7 @@ wasm_bytes.ts
packages/lib/src/circuits/
packages/lib/example/
packages/hoplite_circuit/params/
packages/hoplite_circuit/prover/proof.bin
packages/hoplite_circuit/prover/input.bin

View File

@@ -2,6 +2,7 @@
"editor.formatOnSave": true,
"cSpell.words": [
"merkle",
"NIZK"
"NIZK",
"Secq"
]
}

View File

@@ -1,7 +1,10 @@
[workspace]
members = [
"packages/spartan_wasm",
# "packages/spartan_wasm",
"packages/secq256k1",
# "packages/poseidon",
"packages/hoplite",
"packages/hoplite_circuit",
"packages/poseidon",
"packages/Spartan-secq",
"packages/circuit_reader",

View File

@@ -1,12 +1,10 @@
[package]
name = "spartan"
version = "0.7.1"
authors = ["Srinath Setty <srinath@microsoft.com>"]
authors = ["Srinath Setty <srinath@microsoft.com>, Dan Tehrani"]
edition = "2021"
description = "High-speed zkSNARKs without trusted setup"
documentation = "https://docs.rs/spartan/"
readme = "README.md"
repository = "https://github.com/microsoft/Spartan"
license-file = "LICENSE"
keywords = ["zkSNARKs", "cryptography", "proofs"]
@@ -39,19 +37,3 @@ criterion = "0.3.1"
name = "libspartan"
path = "src/lib.rs"
crate-type = ["cdylib", "rlib"]
[[bin]]
name = "snark"
path = "profiler/snark.rs"
[[bin]]
name = "nizk"
path = "profiler/nizk.rs"
[[bench]]
name = "snark"
harness = false
[[bench]]
name = "nizk"
harness = false

View File

@@ -41,7 +41,7 @@ pub struct PolyCommitmentBlinds {
#[derive(Debug, Serialize, Deserialize)]
pub struct PolyCommitment {
C: Vec<CompressedGroup>,
pub C: Vec<CompressedGroup>,
}
#[derive(Debug, Serialize, Deserialize)]
@@ -299,7 +299,7 @@ impl AppendToTranscript for PolyCommitment {
#[derive(Debug, Serialize, Deserialize)]
pub struct PolyEvalProof {
proof: DotProductProofLog,
pub proof: DotProductProofLog,
}
impl PolyEvalProof {

View File

@@ -3,30 +3,30 @@ use thiserror::Error;
#[derive(Error, Debug)]
pub enum ProofVerifyError {
#[error("Proof verification failed")]
InternalError,
#[error("Compressed group element failed to decompress: {0:?}")]
DecompressionError([u8; 32]),
#[error("Proof verification failed")]
InternalError,
#[error("Compressed group element failed to decompress: {0:?}")]
DecompressionError([u8; 32]),
}
impl Default for ProofVerifyError {
fn default() -> Self {
ProofVerifyError::InternalError
}
fn default() -> Self {
ProofVerifyError::InternalError
}
}
#[derive(Clone, Debug, Eq, PartialEq)]
pub enum R1CSError {
/// returned if the number of constraints is not a power of 2
NonPowerOfTwoCons,
/// returned if the number of variables is not a power of 2
NonPowerOfTwoVars,
/// returned if a wrong number of inputs in an assignment are supplied
InvalidNumberOfInputs,
/// returned if a wrong number of variables in an assignment are supplied
InvalidNumberOfVars,
/// returned if a [u8;32] does not parse into a valid Scalar in the field of secq256k1
InvalidScalar,
/// returned if the supplied row or col in (row,col,val) tuple is out of range
InvalidIndex,
/// returned if the number of constraints is not a power of 2
NonPowerOfTwoCons,
/// returned if the number of variables is not a power of 2
NonPowerOfTwoVars,
/// returned if a wrong number of inputs in an assignment are supplied
InvalidNumberOfInputs,
/// returned if a wrong number of variables in an assignment are supplied
InvalidNumberOfVars,
/// returned if a [u8;32] does not parse into a valid Scalar in the field of secq256k1
InvalidScalar,
/// returned if the supplied row or col in (row,col,val) tuple is out of range
InvalidIndex,
}

File diff suppressed because it is too large Load Diff

View File

@@ -15,8 +15,8 @@ use serde::{Deserialize, Serialize};
#[derive(Debug, Serialize, Deserialize)]
pub struct BulletReductionProof {
L_vec: Vec<CompressedGroup>,
R_vec: Vec<CompressedGroup>,
pub L_vec: Vec<CompressedGroup>,
pub R_vec: Vec<CompressedGroup>,
}
impl BulletReductionProof {

View File

@@ -11,13 +11,13 @@ use merlin::Transcript;
use serde::{Deserialize, Serialize};
mod bullet;
use bullet::BulletReductionProof;
pub use bullet::BulletReductionProof;
#[derive(Serialize, Deserialize, Debug)]
pub struct KnowledgeProof {
alpha: CompressedGroup,
z1: Scalar,
z2: Scalar,
pub alpha: CompressedGroup,
pub z1: Scalar,
pub z2: Scalar,
}
impl KnowledgeProof {
@@ -77,8 +77,8 @@ impl KnowledgeProof {
#[derive(Serialize, Deserialize, Debug)]
pub struct EqualityProof {
alpha: CompressedGroup,
z: Scalar,
pub alpha: CompressedGroup,
pub z: Scalar,
}
impl EqualityProof {
@@ -146,10 +146,10 @@ impl EqualityProof {
#[derive(Serialize, Deserialize, Debug)]
pub struct ProductProof {
alpha: CompressedGroup,
beta: CompressedGroup,
delta: CompressedGroup,
z: [Scalar; 5],
pub alpha: CompressedGroup,
pub beta: CompressedGroup,
pub delta: CompressedGroup,
pub z: [Scalar; 5],
}
impl ProductProof {
@@ -292,11 +292,11 @@ impl ProductProof {
#[derive(Debug, Serialize, Deserialize)]
pub struct DotProductProof {
delta: CompressedGroup,
beta: CompressedGroup,
z: Vec<Scalar>,
z_delta: Scalar,
z_beta: Scalar,
pub delta: CompressedGroup,
pub beta: CompressedGroup,
pub z: Vec<Scalar>,
pub z_delta: Scalar,
pub z_beta: Scalar,
}
impl DotProductProof {
@@ -420,11 +420,11 @@ impl DotProductProofGens {
#[derive(Debug, Serialize, Deserialize)]
pub struct DotProductProofLog {
bullet_reduction_proof: BulletReductionProof,
delta: CompressedGroup,
beta: CompressedGroup,
z1: Scalar,
z2: Scalar,
pub bullet_reduction_proof: BulletReductionProof,
pub delta: CompressedGroup,
pub beta: CompressedGroup,
pub z1: Scalar,
pub z2: Scalar,
}
impl DotProductProofLog {

View File

@@ -21,26 +21,26 @@ use serde::{Deserialize, Serialize};
#[derive(Serialize, Deserialize, Debug)]
pub struct R1CSProof {
comm_vars: PolyCommitment,
sc_proof_phase1: ZKSumcheckInstanceProof,
claims_phase2: (
pub comm_vars: PolyCommitment,
pub sc_proof_phase1: ZKSumcheckInstanceProof,
pub claims_phase2: (
CompressedGroup,
CompressedGroup,
CompressedGroup,
CompressedGroup,
),
pok_claims_phase2: (KnowledgeProof, ProductProof),
proof_eq_sc_phase1: EqualityProof,
sc_proof_phase2: ZKSumcheckInstanceProof,
comm_vars_at_ry: CompressedGroup,
proof_eval_vars_at_ry: PolyEvalProof,
proof_eq_sc_phase2: EqualityProof,
pub pok_claims_phase2: (KnowledgeProof, ProductProof),
pub proof_eq_sc_phase1: EqualityProof,
pub sc_proof_phase2: ZKSumcheckInstanceProof,
pub comm_vars_at_ry: CompressedGroup,
pub proof_eval_vars_at_ry: PolyEvalProof,
pub proof_eq_sc_phase2: EqualityProof,
}
pub struct R1CSSumcheckGens {
gens_1: MultiCommitGens,
gens_3: MultiCommitGens,
gens_4: MultiCommitGens,
pub gens_1: MultiCommitGens,
pub gens_3: MultiCommitGens,
pub gens_4: MultiCommitGens,
}
// TODO: fix passing gens_1_ref
@@ -59,8 +59,8 @@ impl R1CSSumcheckGens {
}
pub struct R1CSGens {
gens_sc: R1CSSumcheckGens,
gens_pc: PolyCommitmentGens,
pub gens_sc: R1CSSumcheckGens,
pub gens_pc: PolyCommitmentGens,
}
impl R1CSGens {

View File

@@ -1559,6 +1559,7 @@ impl SparseMatPolyEvalProof {
}
}
#[derive(Debug)]
pub struct SparsePolyEntry {
idx: usize,
val: Scalar,
@@ -1609,7 +1610,7 @@ impl SparsePolynomial {
#[cfg(test)]
mod tests {
use super::*;
use rand_core::{RngCore, OsRng};
use rand_core::{OsRng, RngCore};
#[test]
fn check_sparse_polyeval_proof() {
let mut csprng: OsRng = OsRng;

View File

@@ -64,9 +64,9 @@ impl SumcheckInstanceProof {
#[derive(Serialize, Deserialize, Debug)]
pub struct ZKSumcheckInstanceProof {
comm_polys: Vec<CompressedGroup>,
comm_evals: Vec<CompressedGroup>,
proofs: Vec<DotProductProof>,
pub comm_polys: Vec<CompressedGroup>,
pub comm_evals: Vec<CompressedGroup>,
pub proofs: Vec<DotProductProof>,
}
impl ZKSumcheckInstanceProof {

View File

@@ -1,6 +1,6 @@
use super::group::CompressedGroup;
use super::scalar::Scalar;
use merlin::Transcript;
pub use merlin::Transcript;
pub trait ProofTranscript {
fn append_protocol_name(&mut self, protocol_name: &'static [u8]);

View File

@@ -0,0 +1,12 @@
[package]
name = "hoplite"
version = "0.1.0"
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
secq256k1 = { path = "../secq256k1" }
spartan = { path = "../Spartan-secq" }
sha3 = { version = "0.8.2" }
secpq_curves = { git = "https://github.com/DanTehrani/secpq_curves.git" }

View File

@@ -0,0 +1,8 @@
# Hoplite
Hoplite is a Spartan reference implementation designed to be the spec for the Halo2 Spartan verification circuit. [Srinath's Spartan implementation](https://github.com/microsoft/Spartan) uses stateful classes, making it difficult to conceptualize the verification process in terms of circuit constraints. To better understand the verification process, it would be helpful to re-implement the verification in a circuit-like coding manner. For example
- The verification should be stateless (i.e. should employ functional programming)
- The R1CS matrices should be hard-coded into the circuit
Additionally, this reference implementation should include thorough documentation to facilitate collaboration and audits.

212
packages/hoplite/spartan.md Normal file
View File

@@ -0,0 +1,212 @@
# Spartan: Full Protocol Description
_This doc is a work in progress. Do not recommend reading._
_Reference implementation: [Hoplite](https://github.com/personaelabs/Hoplite)_
## Public Setup
- Compute the Pedersen commitment generators using [hash-to-curve](https://github.com/personaelabs/spartan-ecdsa/blob/main/packages/secq256k1/src/hashtocurve.rs).
## Building blocks
$Fp$: The finite field used in the protocol.
### Pedersen commitment
Commitment
Multi-commitments
### proof-of-dot-prod
TBD
### proof-of-equality
TBD
### proof-of-opening
TBD
### Closed form evaluation of a multilinear polynomial
$$
\widetilde{Z}(r_y) = (1 - r_y[0]) ・ \widetilde{w}(r_y[1..]) + r_y[0]・\widetilde{(io, 1)}(r_y[1..]) \\
r_y = (2, 3, 4) \\
\widetilde{w}(x_1, x_2) = x_1 + 2x_2 \\
\widetilde{io}(x_1, x_2, 1) = x_1 + 2x_2 + 3 * 1 \\
\widetilde{Z}(r_y) = (1 - 2)・(3 + 2 * 4) + 2 * (3 + 2 * 4) \\
= -1 * 11 + 2 * 11 = 11
$$
$z = (io, 1, w)$
### zk-sum-check
The details of the zk-sum-check protocol isn't provided in the Spartan paper (it only mentions that it uses methods form prior constructions). The following is a description of the zk-sum-check protocol used in the [original Spartan implementation](https://github.com/microsoft/Spartan).
_Required prior knowledge: [The sum-check protocol](https://zkproof.org/2020/03/16/sum-checkprotocol/)_
**Notations**
- $g$: The polynomial which the sum is proven. We assume that $g$ is a multilinear polynomial (i.e. degree = 1) for simplicity.
- $H$: The sum of evaluates of $g$ over the boolean hypercube.
- $m$: The number of variables in $g$.
- $s$: $\lfloor{log_2{m}}\rfloor$
The protocol consists of $m$ rounds.
**Prover: First round**
In the first round, the prover computes
$$g_1(X) = \sum_{i\in\{0, 1\}^{s-1}} g(X, x_2, ... x_m)$$
In the standard sum-check protocol $g_1$ is sent to the verifier and the verifier checks
$$g_1(0) + g_1(1) \stackrel{?}{=} H$$
and
$$g_1(r_1) \stackrel{?}{=} \sum_{i\in\{0, 1\}^{s-1}} g(r_1, x_2, ... x_m)$$
where $r_1$ is a challenge.
The evaluation of $g$ in the second check is proven in the successive sum-check protocol.
In zk-sum-check, we instead provide the proof of evaluation of $g_1(0)$ $g_1(1)$ and $g_1(r_1)$ without revealing the coefficients of $g_1$, using proof-of-dot-product. For efficiency, we combine the evaluations into a single proof as follows.
First, since we assume $g$ is a multilinear polynomial, we can write
$$g_1(X) = p_1X + p_0$$
where $p_0, p_1 \in Fp$ . $p_1$ is the coefficient and $p_0$ is the y-intercept.
Before running proof-of-dot-prod, the prover must send commitments
$$C_{g1} = \mathrm{multicom}((p_1, p_2), r_{g1})$$
$$C_{eval} = \mathrm{com}(g_1(r), r_{eval})$$
$$C_{sum} = \mathrm{com}((g_1(0) + g_1(1), r_{sum}))$$
to the verifier.
The prover computes the weighted sum of and $g_1(0) + g_1(1)$ and $g(r_1)$ using weights $w_0, w_1 \in F_p$ sent from the verifier as
$$(g_1(0) + g_1(1)) * w_0 + g_1(r_1) * w_1$$
$$= p_1w_0 + 2p_0w_0 + p_1w_1r_1 + p_0w_1$$
$$= p_1(w_0 + r_1w_1) + p_0(2w_0 + w_1)$$
Thus, we use proof-of-dot-prod to prove
$$(w_0 + r_1w_1, 2w_0 + w_1) \cdot (p_1, p_0) = (g_1(0) + g_1(1)) * w_0 + g_1(r_1) * w_1$$
Now we proceed to the rest of the rounds
### Prover: Rest of the rounds
The rest of the rounds proceed similary as the first round except that prover proves the evaluations of the polynomial
$$g_i(X) = \sum_{b\in \{0, 1\}^{s-1-i}}g(r_1, ...r_{i-1}, X, x_{i+1},...,{x_m})$$
### Prover: Last round
In the standard sum-check protocol, the verifier queries $g(r_1, ... ,r_m)$ using the oracle of $g$. and checks the result is equal to $g_m(r_m)$. In the Spartan's version of zk-sum-check, the prover instead provides the proof of evaluation of $g(r_1, ... ,r_m)$ **doing another zk-sum-check**. The details of this second zk-sum-check protocol is described later in this doc.
### Verification
The verifier receives
- Claimed sum $H$
- proof-of-dot-products $\{dp_1, dp_2, ... dp_m\}$
Recall that the dot-product relation is
$$(w_0 + r_1w_1, 2w_0 + w_1) \cdot (p_1, p_0) = (g_1(0) + g_1(1)) * w_0 + g_1(r_1) * w_1$$
The verifier have access to $r_1, w_0, w_1$ and the commitments $Cy, Cx, C_{eval}$..
The verifier computes the **target commitment**
$$Ct = C_{sum} * w_0 + C_{eval} * w_1$$
and checks the dot product proof
$$
TBD
$$
## Main Protocol
Now we'll see how Spartan (_SpartanNIZK to be precise!_) uses the above building blocks to construct an NIZK for R1CS satisfiability.
---
**Below this is especially WIP! A lot of incomplete stuff!**
1.$P$ Commit the witness polynomial
- $P: C = PC.commit(pp, \bar{w}, S)$
send $C$ to the verifier 2.$V$ Randomly sample a challenge $\tau$ to query $\mathbb{g}$
- $\tau \in \mathbb{F^{log_m}}$ and send $\tau$ to the prover
4. Let $T_1 = 0$,
5. $V: sample r_x \in \mathbb{F^{u1}}$
6. $G_{io},\tau(x) = (\sum_{y \in \{0, 1\}^s} \widetilde{A}(x, y)\widetilde{Z}(y) + \sum_{y \in \{0, 1\}^s}\widetilde{B}(x, y)\widetilde{Z}(y) - \sum_{y \in \{0, 1\}^s}\widetilde{C}(x, y)\widetilde{Z}(y))\widetilde{eq}(x, \tau)$
$\sum_{x \in \{0, 1\}^s} G_{io},\tau(x) = 0$ for a random $\tau$ iff all the constraints are satisfied
- Run sumcheck on $G_{io},\tau(x)$
- At the last step of the sum check where the verifier queries $G_{io}, \tau(x)$, we use the following sub-protocol.
Define
- $\bar{A}(x) = \sum_{y \in \{0, 1\}^s} \widetilde{A}(x, y)$
- $\bar{B}(x) = \sum_{y \in \{0, 1\}^s} \widetilde{B}(x, y)$
- $\bar{C}(x) = \sum_{y \in \{0, 1\}^s} \widetilde{C}(x, y)$
- $M_{r_x}(y) = r_A * \widetilde{A}(x, y)\widetilde{Z}(y) + r_B * \widetilde{B}(x, y)\widetilde{Z}(y) + r_C * \widetilde{C}(x, y)\widetilde{Z}(y)$
Verify that $\bar{A}(x) * \bar{B}(x) - \bar{C}(x) = 0$
Run the sum-check protocol to verify $M_{r_x}(y)$
- $P$
- Send evaluations $v_A = \bar{A}(r_x), v_B = \bar{B}(r_x), v_C = \bar{C}(r_x)$ to the verifier.
- Send the opening $v_Z = Z(r_x)$ to the verifier
- $V$
- Check $(v_A + v_B - v_C) * eq(r_x, \tau) = e_x$
The last part of the second sum-check protocol
- $v_1 = \widetilde{A}(r_x, r_y)$
- $v_2 = \widetilde{B}(r_x, r_y)$
- $v_3 = \widetilde{C}(r_x, r_y)$
- check taht $(r_A * v_1 + r_B * v_2 + r_C * v_3) * v_z = e_y$
In the last round, the verifier needs to query $g(x)$. We will construct a protocol that is specific to Spartan that allows us to query $g(x)$ in zero-knowledge.
### The second zk-sum-check
Instead of constructing a generic method to evalute $g(X)$ in zk, we focus on $g(X)$ which is specific to Spartan. Recall that we want to prove the sum of
$$G_{io},\tau(x) = (\sum_{y \in \{0, 1\}^s} \widetilde{A}(x, y)\widetilde{Z}(y) + \sum_{y \in \{0, 1\}^s}\widetilde{B}(x, y)\widetilde{Z}(y) - \sum_{y \in \{0, 1\}^s}\widetilde{C}(x, y)\widetilde{Z}(y))\widetilde{eq}(x, \tau)$$
By looking at the terms of $\widetilde{F}(x)$, each term is in a form that is suitable to apply the SumCheck protocol. Assume for now that we can check the validity of each term (i.e each sum of $\widetilde{A}(x, y)\widetilde{Z}$, $\widetilde{B}(x, y)\widetilde{Z}$ and $\widetilde{C}(x, y)\widetilde{Z}$), we can check the relation of the sums as follows.
Define
- $\bar{A}(x) = \sum_{y \in \{0, 1\}^s} \widetilde{A}(x, y)$
- $\bar{B}(x) = \sum_{y \in \{0, 1\}^s} \widetilde{B}(x, y)$
- $\bar{C}(x) = \sum_{y \in \{0, 1\}^s} \widetilde{C}(x, y)$
Now, recall that we want to evaluate $G_{io},\tau(x)$ only at the last round of the zk-sum-check over the all round_challenges $r_x = \{r_1, r_2, ... r_m\}$.
Hence the prover can provide the evaluations $v_A, v_B$ and $v_C$ to the verifier.
$$v_A = \bar{A}(r_x), v_B = \bar{B}(r_x), v_C = \bar{C}(r_x)$$
The verifier checks that the evaluation of $G_{io}$ is equal to the evaluation of $g_m(r_m)$
$$g_m(r_m) \stackrel{?}{=} (v_A + v_B - v_C)\widetilde{eq}(r_x, \tau)$$
The verifier also needs to check the validity of $\bar{A}(x), \bar{B}(x), \bar{C}(x)$.
This is where the second zk-sum-check comes in.
We can check each term individually, but for efficiency, we use a random linear combination of the three terms.
and sample challnges $r_A, r_B, r_C \in_R F_p$ to compute the random linear combination
$$
\widetilde{M}(x) \\ = r_A \bar{A}(r_x) + r_B\bar{B}(r_x) + r_C\bar{C}(r_x) \\
= (r_A\widetilde{A}(r_x, r_y) + r_B\widetilde{B}(r_x, r_y) + r_C\widetilde{C}(r_x, r_y))\widetilde{Z}(r_x, r_y)
$$
At the end of the second zk-sum-check, the verifier needs to evaluate $\widetilde{Z}(r_x, r_y)$. In order to evaluate without knowing the coefficients, we use the proof_log-of-dot-prod protocol. Note that the prover needs to commit to $Z(x)$ at the beginning so it cannot just come up with a $Z(x)$ that passes the final check of the second zk-sum-check.

View File

@@ -0,0 +1,395 @@
use crate::{Fp, Fq};
use libspartan::{
dense_mlpoly::{PolyCommitment, PolyEvalProof},
group::CompressedGroup,
nizk::{BulletReductionProof, DotProductProof, EqualityProof, KnowledgeProof, ProductProof},
scalar::Scalar,
sumcheck::ZKSumcheckInstanceProof,
};
use secpq_curves::{
group::{prime::PrimeCurveAffine, Curve},
CurveAffine, Secq256k1, Secq256k1Affine,
};
use secq256k1::{
affine::Group,
elliptic_curve::{
subtle::{Choice, ConditionallySelectable, ConstantTimeEq},
Field, PrimeField,
},
};
use std::option::Option;
// ############################
// `CV` stands for `Circuit Value`.
// ############################
#[derive(Debug)]
pub struct CVSumCheckProof {
pub comm_polys: Vec<Option<Secq256k1>>,
pub comm_evals: Vec<Option<Secq256k1>>,
pub proofs: Vec<CVDotProdProof>,
}
impl CVSumCheckProof {
pub fn without_witness(num_rounds: usize, poly_degree: usize) -> Self {
Self {
comm_polys: vec![None; num_rounds],
comm_evals: vec![None; num_rounds],
// We pass poly_degree + 1 because we're counting the degree 0 term as well.
proofs: vec![CVDotProdProof::without_witness(poly_degree + 1); num_rounds],
}
}
}
pub struct CVBulletReductionProof {
pub L_vec: Vec<Option<Secq256k1>>,
pub R_vec: Vec<Option<Secq256k1>>,
}
impl CVBulletReductionProof {
fn without_witness(vec_len: usize) -> Self {
assert!(vec_len % 2 == 0, "vec_len must be even");
Self {
L_vec: vec![None; vec_len / 2],
R_vec: vec![None; vec_len / 2],
}
}
}
#[derive(Debug, Clone)]
pub struct CVDotProdProof {
pub delta: Option<Secq256k1>,
pub beta: Option<Secq256k1>,
pub z: Vec<Option<Fq>>,
pub z_delta: Option<Fq>,
pub z_beta: Option<Fq>,
}
impl CVDotProdProof {
fn without_witness(vec_len: usize) -> Self {
Self {
delta: None,
beta: None,
z: vec![None; vec_len],
z_delta: None,
z_beta: None,
}
}
}
pub struct CVEqualityProof {
pub alpha: Option<Secq256k1>,
pub z: Option<Fq>,
}
impl Default for CVEqualityProof {
fn default() -> Self {
Self {
alpha: None,
z: None,
}
}
}
pub struct CVKnowledgeProof {
pub alpha: Option<Secq256k1>,
pub z1: Option<Fq>,
pub z2: Option<Fq>,
}
impl Default for CVKnowledgeProof {
fn default() -> Self {
Self {
alpha: None,
z1: None,
z2: None,
}
}
}
pub struct CVProductProof {
pub alpha: Option<Secq256k1>,
pub beta: Option<Secq256k1>,
pub delta: Option<Secq256k1>,
pub z: [Option<Fq>; 5],
}
impl Default for CVProductProof {
fn default() -> Self {
Self {
alpha: None,
beta: None,
delta: None,
z: [None; 5],
}
}
}
pub struct CVDotProductProofLog {
pub bullet_reduction_proof: CVBulletReductionProof,
pub delta: Option<Secq256k1>,
pub beta: Option<Secq256k1>,
pub z1: Option<Fq>,
pub z2: Option<Fq>,
}
impl CVDotProductProofLog {
fn without_witness(vec_len: usize) -> Self {
Self {
bullet_reduction_proof: CVBulletReductionProof::without_witness(vec_len),
delta: None,
beta: None,
z1: None,
z2: None,
}
}
}
pub struct CVPolyEvalProof {
pub proof: CVDotProductProofLog,
}
impl CVPolyEvalProof {
pub fn without_witness(vec_len: usize) -> Self {
Self {
proof: CVDotProductProofLog::without_witness(vec_len),
}
}
}
pub struct CVPolyCommitment {
pub C: Vec<Option<Secq256k1>>,
}
impl CVPolyCommitment {
pub fn without_witness(vec_len: usize) -> Self {
let C = vec![None; vec_len];
Self { C }
}
}
// Convert the types defined in the `secq256k1` crate
// to the types defined in the `secpq_curves` crate.
// This conversion is necessary because,
// `libspartan` uses `secq256k1` for curve/field operations
// whereas halo2 uses `secpq_curves`
// In general, we need to do the following two conversions
// `CompressedGroup` -> `Secq256k1`
// `Scalar` -> `Fq`
pub trait ToCircuitVal<V> {
fn to_circuit_val(&self) -> V;
}
pub trait FromCircuitVal<V> {
fn from_circuit_val(v: &V) -> Self;
}
impl FromCircuitVal<Secq256k1> for CompressedGroup {
fn from_circuit_val(point: &Secq256k1) -> CompressedGroup {
if point.is_identity().into() {
return CompressedGroup::identity();
}
let coords = point.to_affine().coordinates().unwrap();
let mut x = coords.x().to_bytes();
let mut y = coords.y().to_bytes();
x.reverse();
y.reverse();
let result = CompressedGroup::from_affine_coordinates(&x.into(), &y.into(), true);
result
}
}
impl ToCircuitVal<Fq> for Scalar {
fn to_circuit_val(&self) -> Fq {
let bytes = self.to_bytes();
Fq::from_bytes(&bytes).unwrap()
}
}
impl ToCircuitVal<CVEqualityProof> for EqualityProof {
fn to_circuit_val(&self) -> CVEqualityProof {
let alpha = Some(self.alpha.to_circuit_val());
let z = Some(self.z.to_circuit_val());
CVEqualityProof { alpha, z }
}
}
impl ToCircuitVal<CVKnowledgeProof> for KnowledgeProof {
fn to_circuit_val(&self) -> CVKnowledgeProof {
let alpha = Some(self.alpha.to_circuit_val());
let z1 = Some(self.z1.to_circuit_val());
let z2 = Some(self.z2.to_circuit_val());
CVKnowledgeProof { alpha, z1, z2 }
}
}
impl ToCircuitVal<CVProductProof> for ProductProof {
fn to_circuit_val(&self) -> CVProductProof {
let alpha = Some(self.alpha.to_circuit_val());
let beta = Some(self.beta.to_circuit_val());
let delta = Some(self.delta.to_circuit_val());
let z: [Option<Fq>; 5] = self
.z
.iter()
.map(|z_i| Some(z_i.to_circuit_val()))
.collect::<Vec<Option<Fq>>>()
.try_into()
.unwrap();
CVProductProof {
alpha,
beta,
delta,
z,
}
}
}
impl ToCircuitVal<CVPolyEvalProof> for PolyEvalProof {
fn to_circuit_val(&self) -> CVPolyEvalProof {
let dotprod_proof_log = &self.proof;
let beta = Some(dotprod_proof_log.beta.to_circuit_val());
let delta = Some(dotprod_proof_log.delta.to_circuit_val());
let z1 = Some(dotprod_proof_log.z1.to_circuit_val());
let z2 = Some(dotprod_proof_log.z2.to_circuit_val());
let cv_bullet_reduction_proof = CVBulletReductionProof {
L_vec: dotprod_proof_log
.bullet_reduction_proof
.L_vec
.iter()
.map(|val| Some(val.compress().to_circuit_val()))
.collect::<Vec<Option<Secq256k1>>>()
.try_into()
.unwrap(),
R_vec: dotprod_proof_log
.bullet_reduction_proof
.R_vec
.iter()
.map(|val| Some(val.compress().to_circuit_val()))
.collect::<Vec<Option<Secq256k1>>>()
.try_into()
.unwrap(),
};
let cv_dotprod_proof_log = CVDotProductProofLog {
delta,
beta,
z1,
z2,
bullet_reduction_proof: cv_bullet_reduction_proof,
};
CVPolyEvalProof {
proof: cv_dotprod_proof_log,
}
}
}
impl ToCircuitVal<CVPolyCommitment> for PolyCommitment {
fn to_circuit_val(&self) -> CVPolyCommitment {
let C = self
.C
.iter()
.map(|c| Some(c.to_circuit_val()))
.collect::<Vec<Option<Secq256k1>>>()
.try_into()
.unwrap();
CVPolyCommitment { C }
}
}
impl ToCircuitVal<Secq256k1> for CompressedGroup {
fn to_circuit_val(&self) -> Secq256k1 {
if self.is_identity() {
return Secq256k1::identity();
}
let mut x_bytes: [u8; 32] = (*self.x().unwrap()).try_into().unwrap();
// x_bytes is in big-endian!
x_bytes.reverse();
let x = Fp::from_bytes(&x_bytes).unwrap();
let coords = self.coordinates();
let y_odd: Choice = match coords.tag() {
secq256k1::elliptic_curve::sec1::Tag::CompressedOddY => Choice::from(1),
secq256k1::elliptic_curve::sec1::Tag::CompressedEvenY => Choice::from(0),
_ => Choice::from(0),
};
let x3 = x.square() * x;
let b = Fp::from_raw([7, 0, 0, 0]);
let y = (x3 + b).sqrt();
let res = y
.map(|y| {
let y = Fp::conditional_select(&-y, &y, y.is_odd().ct_eq(&y_odd));
let p = Secq256k1Affine::from_xy(x, y).unwrap();
p.to_curve()
})
.unwrap();
res
}
}
impl ToCircuitVal<CVDotProdProof> for DotProductProof {
fn to_circuit_val(&self) -> CVDotProdProof {
CVDotProdProof {
delta: Some(self.delta.to_circuit_val()),
beta: Some(self.beta.to_circuit_val()),
z_beta: Some(self.z_beta.to_circuit_val()),
z_delta: Some(self.z_delta.to_circuit_val()),
z: self
.z
.iter()
.map(|z_i| Some(z_i.to_circuit_val()))
.collect::<Vec<Option<Fq>>>()
.try_into()
.unwrap(),
}
}
}
impl ToCircuitVal<CVSumCheckProof> for ZKSumcheckInstanceProof {
fn to_circuit_val(&self) -> CVSumCheckProof {
let mut proofs = vec![];
let mut comm_polys = vec![];
let mut comm_evals = vec![];
for i in 0..self.proofs.len() {
proofs.push(self.proofs[i].to_circuit_val());
comm_polys.push(Some(self.comm_polys[i].to_circuit_val()));
comm_evals.push(Some(self.comm_evals[i].to_circuit_val()));
}
CVSumCheckProof {
comm_polys,
comm_evals,
proofs,
}
}
}
impl ToCircuitVal<CVBulletReductionProof> for BulletReductionProof {
fn to_circuit_val(&self) -> CVBulletReductionProof {
let mut L_vec = vec![];
let mut R_vec = vec![];
for i in 0..self.L_vec.len() {
L_vec.push(Some(self.L_vec[i].to_circuit_val()));
R_vec.push(Some(self.R_vec[i].to_circuit_val()));
}
CVBulletReductionProof { L_vec, R_vec }
}
}

View File

@@ -0,0 +1,98 @@
use crate::Fq;
use secpq_curves::Secq256k1;
use secq256k1::{affine::Group, AffinePoint};
use sha3::{
digest::{ExtendableOutput, Input},
Shake256,
};
use std::{io::Read, ops::Mul};
use crate::circuit_vals::ToCircuitVal;
pub struct MultiCommitGens {
pub G: Vec<Secq256k1>,
pub h: Secq256k1,
}
impl Default for MultiCommitGens {
fn default() -> Self {
MultiCommitGens {
G: vec![],
h: Secq256k1::default(),
}
}
}
impl From<libspartan::commitments::MultiCommitGens> for MultiCommitGens {
fn from(gens: libspartan::commitments::MultiCommitGens) -> Self {
MultiCommitGens {
G: gens
.G
.iter()
.map(|g| g.compress().to_circuit_val())
.collect(),
h: gens.h.compress().to_circuit_val(),
}
}
}
impl MultiCommitGens {
pub fn new(n: usize, label: &[u8]) -> Self {
let mut shake = Shake256::default();
shake.input(label);
shake.input(AffinePoint::generator().compress().as_bytes());
let mut reader = shake.xof_result();
let mut gens: Vec<Secq256k1> = Vec::new();
let mut uniform_bytes = [0u8; 128];
for _ in 0..n + 1 {
reader.read_exact(&mut uniform_bytes).unwrap();
let gen = AffinePoint::from_uniform_bytes(&uniform_bytes).compress();
gens.push(gen.to_circuit_val());
}
MultiCommitGens {
G: gens[..n].to_vec(),
h: gens[n],
}
}
pub fn scale(&self, s: &Fq) -> MultiCommitGens {
MultiCommitGens {
h: self.h,
G: (0..self.G.len()).map(|i| self.G[i] * s).collect(),
}
}
}
pub trait Commitments {
fn commit(&self, blind: &Fq, gens: &MultiCommitGens) -> Secq256k1;
}
impl Commitments for Fq {
fn commit(&self, blind: &Fq, gens: &MultiCommitGens) -> Secq256k1 {
gens.G[0] * self + gens.h * blind
}
}
impl Commitments for Vec<Fq> {
fn commit(&self, blind: &Fq, gens: &MultiCommitGens) -> Secq256k1 {
let mut result = Secq256k1::identity();
for (i, val) in self.iter().enumerate() {
result += gens.G[i] * val;
}
result += gens.h * blind;
result
}
}
impl Commitments for [Fq] {
fn commit(&self, blind: &Fq, gens: &MultiCommitGens) -> Secq256k1 {
let mut result = Secq256k1::identity();
for (i, val) in self.iter().enumerate() {
result += gens.G[i] * val;
}
result += gens.h * blind;
result
}
}

View File

@@ -0,0 +1,73 @@
use crate::{
circuit_vals::{CVDotProdProof, FromCircuitVal},
commitments::Commitments,
utils::to_fq,
Fq, MultiCommitGens,
};
use libspartan::{
group::CompressedGroup,
transcript::{AppendToTranscript, ProofTranscript, Transcript},
};
use secpq_curves::{group::Curve, Secq256k1};
// Utilities
pub fn dot_prod(x: &[Fq], a: &[Fq]) -> Fq {
let mut result = Fq::zero();
for (x, a) in x.iter().zip(a.iter()) {
result += *x * *a;
}
result
}
// https://eprint.iacr.org/2017/1132.pdf
// P.18, Figure 6, steps 4
pub fn verify(
tau: &Secq256k1,
a: &[Fq],
proof: &CVDotProdProof,
com_poly: &Secq256k1,
gens_1: &MultiCommitGens,
gens_n: &MultiCommitGens,
transcript: &mut Transcript,
) {
transcript.append_protocol_name(b"dot product proof");
CompressedGroup::from_circuit_val(com_poly).append_to_transcript(b"Cx", transcript);
CompressedGroup::from_circuit_val(tau).append_to_transcript(b"Cy", transcript);
transcript.append_message(b"a", b"begin_append_vector");
for a_i in a {
transcript.append_message(b"a", &a_i.to_bytes());
}
transcript.append_message(b"a", b"end_append_vector");
CompressedGroup::from_circuit_val(&proof.delta.unwrap())
.append_to_transcript(b"delta", transcript);
CompressedGroup::from_circuit_val(&proof.beta.unwrap())
.append_to_transcript(b"beta", transcript);
let c = to_fq(&transcript.challenge_scalar(b"c"));
// (13)
let lhs = (com_poly * c) + proof.delta.unwrap();
let rhs = proof
.z
.iter()
.map(|z_i| z_i.unwrap())
.collect::<Vec<Fq>>()
.commit(&proof.z_delta.unwrap(), gens_n);
assert!(lhs == rhs, "dot prod verification failed (13)");
// (14)
let lhs = (tau * c) + proof.beta.unwrap();
let rhs = dot_prod(
&proof.z.iter().map(|z_i| z_i.unwrap()).collect::<Vec<Fq>>(),
a,
)
.commit(&proof.z_beta.unwrap(), gens_1);
assert!(lhs == rhs, "dot prod verification failed (14)");
}

319
packages/hoplite/src/lib.rs Normal file
View File

@@ -0,0 +1,319 @@
#![allow(non_snake_case)]
use crate::circuit_vals::{CVSumCheckProof, ToCircuitVal};
use commitments::{Commitments, MultiCommitGens};
pub use libspartan::scalar::Scalar;
use libspartan::{
group::DecompressEncodedPoint,
transcript::{AppendToTranscript, ProofTranscript, Transcript},
Instance, NIZKGens, NIZK,
};
use secpq_curves::{group::Curve, Secq256k1};
pub mod circuit_vals;
pub mod commitments;
pub mod dotprod;
pub mod poly_evaluation_proof;
pub mod proof_bullet_reduce;
pub mod proof_log_of_dotprod;
pub mod proof_of_eq;
pub mod proof_of_opening;
pub mod proof_of_prod;
pub mod sumcheck;
pub mod utils;
use utils::eval_ml_poly;
pub type Fp = secpq_curves::Fq;
pub type Fq = secpq_curves::Fp;
pub fn eq_eval(t: &[Fq], x: &[Fq]) -> Fq {
let mut result = Fq::one();
for i in 0..t.len() {
result *= t[i] * x[i] + (Fq::one() - t[i]) * (Fq::one() - x[i]);
}
result
}
/**
* Verify a SpartanNIZK proof
*/
pub fn verify_nizk(
inst: &Instance,
input: &[libspartan::scalar::Scalar],
proof: &NIZK,
gens: &NIZKGens,
) {
// Append the domain parameters to the transcript
let mut transcript = Transcript::new(b"test_verify");
transcript.append_protocol_name(b"Spartan NIZK proof");
transcript.append_message(b"R1CSInstanceDigest", &inst.digest);
transcript.append_protocol_name(b"R1CS proof");
input.append_to_transcript(b"input", &mut transcript);
proof
.r1cs_sat_proof
.comm_vars
.append_to_transcript(b"poly_commitment", &mut transcript);
let tau: Vec<Fq> = transcript
.challenge_vector(
b"challenge_tau",
proof.r1cs_sat_proof.sc_proof_phase1.proofs.len(),
)
.iter()
.map(|tau_i| tau_i.to_circuit_val())
.collect();
// Convert the generators to circuit value representations
let gens_1: MultiCommitGens = gens.gens_r1cs_sat.gens_sc.gens_1.clone().into();
let gens_3: MultiCommitGens = gens.gens_r1cs_sat.gens_sc.gens_3.clone().into();
let gens_4: MultiCommitGens = gens.gens_r1cs_sat.gens_sc.gens_4.clone().into();
let gens_pc_gens = &gens.gens_r1cs_sat.gens_pc.gens;
let gens_pc_1: MultiCommitGens = gens_pc_gens.gens_1.clone().into();
let gens_pc_n: MultiCommitGens = gens_pc_gens.gens_n.clone().into();
let sc_proof_phase1: CVSumCheckProof = proof.r1cs_sat_proof.sc_proof_phase1.to_circuit_val();
// The expected sum of the phase 1 sum-check is zero
let phase1_expected_sum = Fq::zero().commit(&Fq::zero(), &gens_1);
// comm_claim_post_phase1: Commitment to the claimed evaluation of the final round polynomial over rx
let (comm_claim_post_phase1, rx) = sumcheck::verify(
3,
&phase1_expected_sum,
&sc_proof_phase1,
&gens_1,
&gens_4,
&mut transcript,
);
// Verify Az * Bz = Cz
let (comm_Az_claim, comm_Bz_claim, comm_Cz_claim, comm_prod_Az_Bz_claims) =
&proof.r1cs_sat_proof.claims_phase2;
// First, we verify that the prover knows the opening to comm_Cz_claim
let (pok_Cz_claim, proof_prod) = &proof.r1cs_sat_proof.pok_claims_phase2;
proof_of_opening::verify(
&comm_Cz_claim.to_circuit_val(),
&pok_Cz_claim.to_circuit_val(),
&gens_1,
&mut transcript,
);
// Second, we verify Az * Bz = "Commitment to the claimed prod"
proof_of_prod::verify(
&proof_prod.to_circuit_val(),
comm_Az_claim.to_circuit_val(),
comm_Bz_claim.to_circuit_val(),
comm_prod_Az_Bz_claims.to_circuit_val(),
&gens_1,
&mut transcript,
);
comm_Az_claim.append_to_transcript(b"comm_Az_claim", &mut transcript);
comm_Bz_claim.append_to_transcript(b"comm_Bz_claim", &mut transcript);
comm_Cz_claim.append_to_transcript(b"comm_Cz_claim", &mut transcript);
comm_prod_Az_Bz_claims.append_to_transcript(b"comm_prod_Az_Bz_claims", &mut transcript);
// Verify the final query to the polynomial
// Now, we verify that
// (Az * Bz - Cz) * eq(tau, rx) = Commitment to the claimed evaluation of the final round polynomial over rx
// In the first sum-check, we verify that
// (A(x, y) * Z(y) + B(x, y) * Z(y) - C(x, y) * Z(y)) * eq(tau, rx) = 0
// So the final round polynomial's evaluation over rx should equal to the
// evaluation of the above poly over rx
let eq_tau_rx = eq_eval(&tau, &rx);
let expected_claim_post_phase1 = (comm_prod_Az_Bz_claims.decompress().unwrap()
+ -comm_Cz_claim.decompress().unwrap())
.compress()
.to_circuit_val()
.to_affine()
* eq_tau_rx;
// Check the equality between the evaluation of the final round poly of the sum-check
// and the evaluation of the F(x) poly over rx
let proof_eq_sc_phase1 = &proof.r1cs_sat_proof.proof_eq_sc_phase1;
proof_of_eq::verify(
&expected_claim_post_phase1,
&comm_claim_post_phase1,
&proof_eq_sc_phase1.to_circuit_val(),
&gens_1,
&mut transcript,
);
// Verify that the commitments to Az, Bz and Cz are correct
let r_A = transcript.challenge_scalar(b"challenege_Az");
let r_B = transcript.challenge_scalar(b"challenege_Bz");
let r_C = transcript.challenge_scalar(b"challenege_Cz");
// M(r_y) = r_A * comm_Az_claim + r_B * comm_Bz_claim + r_C * comm_Cz_claim;
let comm_claim_phase2 = r_A * comm_Az_claim.decompress().unwrap()
+ r_B * comm_Bz_claim.decompress().unwrap()
+ r_C * comm_Cz_claim.decompress().unwrap();
// Verify the sum-check over M(x)
let sc_proof_phase2: CVSumCheckProof = proof.r1cs_sat_proof.sc_proof_phase2.to_circuit_val();
// comm_claim_post_phase2: Claimed evaluation of the final round polynomial over ry
let (comm_claim_post_phase2, ry) = sumcheck::verify(
2,
&comm_claim_phase2.compress().to_circuit_val(),
&sc_proof_phase2,
&gens_1,
&gens_3,
&mut transcript,
);
// Verify that the final round polynomial's evaluation over ry is equal to the
// evaluation of M(x) over ry.
// In order to do so, we need to get the evaluation of Z(X) over ry.
// We use proof_log of dot prod to verify that.
// comm_vars: Commitment to the evaluations of Z(X) over the boolean hypercube
let comm_vars = proof
.r1cs_sat_proof
.comm_vars
.C
.iter()
.map(|c_i| c_i.to_circuit_val())
.collect::<Vec<Secq256k1>>();
let poly_eval_proof = &proof.r1cs_sat_proof.proof_eval_vars_at_ry;
let comm_vars_at_ry = proof.r1cs_sat_proof.comm_vars_at_ry.to_circuit_val();
poly_evaluation_proof::verify(
&gens_pc_1,
&gens_pc_n,
&ry[1..],
&comm_vars_at_ry,
&comm_vars,
&poly_eval_proof.to_circuit_val(),
&mut transcript,
);
// Interpolate the input as a multilinear polynomial and evaluate at ry[1..]
let mut input_with_one: Vec<Fq> = vec![Fq::one()];
input_with_one.extend_from_slice(
&input
.iter()
.map(|x| x.to_circuit_val())
.collect::<Vec<Fq>>(),
);
let poly_input_eval = eval_ml_poly(&input_with_one, &ry[1..]);
let comm_poly_input_eval = poly_input_eval.commit(&Fq::zero(), &gens_pc_1);
// compute commitment to eval_Z_at_ry = (Scalar::one() - ry[0]) * self.eval_vars_at_ry + ry[0] * poly_input_eval
let comm_eval_Z_at_ry = comm_vars_at_ry * (Fq::one() - ry[0]) + comm_poly_input_eval * ry[0];
let (claimed_rx, claimed_ry) = &proof.r;
let inst_evals = inst.inst.evaluate(&claimed_rx, &claimed_ry);
let (eval_A_r, eval_B_r, eval_C_r) = inst_evals;
// Z(r_y) * (r_A * A(r_y) + r_B * B(r_y) + r_C * C(r_y))
let expected_claim_post_phase2 = comm_eval_Z_at_ry
* (r_A.to_circuit_val() * eval_A_r.to_circuit_val()
+ r_B.to_circuit_val() * eval_B_r.to_circuit_val()
+ r_C.to_circuit_val() * eval_C_r.to_circuit_val());
// Verify that the commitment to the evaluation of the final round polynomial
// is correct
proof_of_eq::verify(
&expected_claim_post_phase2,
&comm_claim_post_phase2,
&proof.r1cs_sat_proof.proof_eq_sc_phase2.to_circuit_val(),
&gens_1,
&mut transcript,
);
}
#[cfg(test)]
mod tests {
use super::*;
use libspartan::{InputsAssignment, Instance, NIZKGens, VarsAssignment};
#[test]
fn test_verify_nizk() {
// parameters of the R1CS instance
let num_cons = 2;
let num_vars = 5;
let num_inputs = 0;
// The constraint
// x ** 2 + y = ~out
// Constraints in R1CS format
// sym_1 = x * x
// ~out = sym_1 + y
// Variables
// 'y', 'x', 'sym_1', '~out', '~one'
let mut A: Vec<(usize, usize, [u8; 32])> = Vec::new(); // <row, column, value>
let mut B: Vec<(usize, usize, [u8; 32])> = Vec::new();
let mut C: Vec<(usize, usize, [u8; 32])> = Vec::new();
let one = Fq::one().to_bytes();
// sym_1 = x * x
A.push((0, 1, one));
B.push((0, 1, one));
C.push((0, 2, one));
// ~out = sym_1 + y
A.push((1, 0, one));
A.push((1, 2, one));
B.push((1, 4, one));
C.push((1, 3, one));
let vars = [
Fq::from(2).to_bytes(),
Fq::from(2).to_bytes(),
Fq::from(4).to_bytes(),
Fq::from(6).to_bytes(),
Fq::from(1).to_bytes(),
];
let inputs = vec![];
let assignment_inputs = InputsAssignment::new(&inputs).unwrap();
let assignment_vars = VarsAssignment::new(&vars).unwrap();
// Check if instance is satisfiable
let inst = Instance::new(num_cons, num_vars, num_inputs, &A, &B, &C).unwrap();
let res = inst.is_sat(&assignment_vars, &assignment_inputs);
assert!(res.unwrap(), "should be satisfied");
let gens = NIZKGens::new(num_cons, num_vars, num_inputs);
let mut prover_transcript = Transcript::new(b"test_verify");
let proof = NIZK::prove(
&inst,
assignment_vars,
&assignment_inputs,
&gens,
&mut prover_transcript,
);
let mut verifier_transcript = Transcript::new(b"test_verify");
// Just running the verification of the original implementation as a reference
let _result = proof.verify(&inst, &assignment_inputs, &mut verifier_transcript, &gens);
// In the phase 1 sum check com_eval uses gens_1 and dot product uses gens_4
// com_eval uses gens_1, and dot product uses gen_3
verify_nizk(&inst, &assignment_inputs.assignment, &proof, &gens);
}
}

View File

@@ -0,0 +1,50 @@
use crate::circuit_vals::CVPolyEvalProof;
use crate::{commitments::MultiCommitGens, proof_log_of_dotprod, Fq};
use libspartan::math::Math;
use libspartan::transcript::{ProofTranscript, Transcript};
use secpq_curves::{group::Group, Secq256k1};
fn evals(r: &[Fq]) -> Vec<Fq> {
let ell = r.len();
let mut evals: Vec<Fq> = vec![Fq::one(); ell.pow2()];
let mut size = 1;
for j in 0..ell {
// in each iteration, we double the size of chis
size *= 2;
for i in (0..size).rev().step_by(2) {
// copy each element from the prior iteration twice
let scalar = evals[i / 2];
evals[i] = scalar * r[j];
evals[i - 1] = scalar - evals[i];
}
}
evals
}
pub fn verify(
gens_1: &MultiCommitGens,
gens_n: &MultiCommitGens,
r: &[Fq], // point at which the polynomial is evaluated
C_Zr: &Secq256k1, // commitment to \widetilde{Z}(r)
comm_poly: &[Secq256k1], // commitment to the evaluations of the polynomial over the boolean hypercube
proof: &CVPolyEvalProof,
transcript: &mut Transcript,
) {
transcript.append_protocol_name(b"polynomial evaluation proof");
// Evaluate the eq poly over the boolean hypercube bounded to r
let r_left = &r[0..(r.len() / 2)];
let r_right = &r[(r.len() / 2)..];
let L = evals(r_left);
let R = evals(r_right);
// L * r_left;
let mut C_LZ = Secq256k1::identity();
for i in 0..comm_poly.len() {
C_LZ += comm_poly[i] * L[i];
}
proof_log_of_dotprod::verify(gens_1, gens_n, &R, &C_LZ, C_Zr, &proof.proof, transcript);
}

View File

@@ -0,0 +1,85 @@
use crate::{
circuit_vals::{FromCircuitVal, ToCircuitVal},
Fq,
};
use libspartan::{
group::CompressedGroup,
scalar::Scalar,
transcript::{ProofTranscript, Transcript},
};
use secpq_curves::{
group::{Curve, Group},
Secq256k1,
};
pub fn verify(
upsilon: &Secq256k1, // The upsilon calculated in this func should equal this
a: &[Fq],
G: &[Secq256k1],
upsilon_L: &[Secq256k1],
upsilon_R: &[Secq256k1],
transcript: &mut Transcript,
) -> (Secq256k1, Fq, Secq256k1) {
// #####
// 1: Compute the verification scalars
// #####
// Compute challenges
let mut challenges = vec![];
for (L, R) in upsilon_L.iter().zip(upsilon_R.iter()) {
transcript.append_point(b"L", &CompressedGroup::from_circuit_val(L));
transcript.append_point(b"R", &CompressedGroup::from_circuit_val(R));
// CompressedGroup::from_circuit_val(R).append_to_transcript(b"R", transcript);
challenges.push(transcript.challenge_scalar(b"u"));
}
let mut challenges_inv = challenges.clone();
// 2. Compute the invert of the challenges
Scalar::batch_invert(&mut challenges_inv);
// 3. Compute the square of the challenges
let challenges_sq = challenges
.iter()
.map(|c| c.square())
.collect::<Vec<Scalar>>();
let challenges_inv_sq = challenges_inv
.iter()
.map(|c| c.square())
.collect::<Vec<Scalar>>();
let mut upsilon_hat = Secq256k1::identity();
upsilon_hat += upsilon;
let n = upsilon_L.len();
for i in 0..n {
upsilon_hat += upsilon_L[i] * challenges_sq[i].to_circuit_val()
+ upsilon_R[i] * challenges_inv_sq[i].to_circuit_val();
}
let mut a = &mut a.to_owned()[..];
let mut G = &mut G.to_owned()[..];
let mut n = G.len();
while n != 1 {
n /= 2;
let (a_L, a_R) = a.split_at_mut(n);
let (G_L, G_R) = G.split_at_mut(n);
for i in 0..n {
let u = challenges[challenges.len() - n / 2 - 1];
let u_inv = challenges_inv[challenges.len() - n / 2 - 1];
a_L[i] = a_L[i] * u_inv.to_circuit_val() + a_R[i] * u.to_circuit_val();
G_L[i] = G_L[i] * u_inv.to_circuit_val() + G_R[i] * u.to_circuit_val();
}
a = a_L;
G = G_L;
}
let a_hat = a[0];
let g_hat = G[0];
(upsilon_hat, a_hat, g_hat)
}

View File

@@ -0,0 +1,77 @@
use crate::{
circuit_vals::{CVBulletReductionProof, CVDotProductProofLog, FromCircuitVal, ToCircuitVal},
commitments::MultiCommitGens,
proof_bullet_reduce,
utils::to_fq,
Fq,
};
use libspartan::{
group::CompressedGroup,
nizk::DotProductProofLog,
transcript::{AppendToTranscript, ProofTranscript, Transcript},
};
use secpq_curves::{group::Curve, Secq256k1};
// https://eprint.iacr.org/2017/1132.pdf
// P.19 proof_log-of-dot-prod
pub fn verify(
gens_1: &MultiCommitGens,
gens_n: &MultiCommitGens,
a: &[Fq],
Cx: &Secq256k1, // commitment to the evaluation (Cy)
Cy: &Secq256k1, // commitment to the evaluation (Cy)
proof: &CVDotProductProofLog,
transcript: &mut Transcript,
) {
transcript.append_protocol_name(b"dot product proof (log)");
CompressedGroup::from_circuit_val(Cx).append_to_transcript(b"Cx", transcript);
CompressedGroup::from_circuit_val(Cy).append_to_transcript(b"Cy", transcript);
transcript.append_message(b"a", b"begin_append_vector");
for a_i in a {
transcript.append_message(b"a", &a_i.to_bytes());
}
transcript.append_message(b"a", b"end_append_vector");
// sample a random base and scale the generator used for
// the output of the inner product
let r = to_fq(&transcript.challenge_scalar(b"r"));
let gens_1_scaled = gens_1.scale(&r);
// Upsilon
let Gamma = Cx + Cy * r;
let L_vec = proof
.bullet_reduction_proof
.L_vec
.iter()
.map(|L_i| L_i.unwrap())
.collect::<Vec<Secq256k1>>();
let upsilon_L = L_vec.as_slice();
let R_vec = &proof
.bullet_reduction_proof
.R_vec
.iter()
.map(|R_i| R_i.unwrap())
.collect::<Vec<Secq256k1>>();
let upsilon_R = R_vec.as_slice();
let (Gamma_hat, a_hat, g_hat) =
proof_bullet_reduce::verify(&Gamma, &a, &gens_n.G, upsilon_L, upsilon_R, transcript);
CompressedGroup::from_circuit_val(&proof.delta.unwrap())
.append_to_transcript(b"delta", transcript);
CompressedGroup::from_circuit_val(&proof.beta.unwrap())
.append_to_transcript(b"beta", transcript);
let c = to_fq(&transcript.challenge_scalar(b"c"));
let lhs = (Gamma_hat * c + proof.beta.unwrap()) * a_hat + proof.delta.unwrap();
let rhs = (g_hat + gens_1_scaled.G[0] * a_hat) * proof.z1.unwrap()
+ gens_1_scaled.h * proof.z2.unwrap();
assert!(rhs == lhs, "Proof (log) of dot prod verification failed");
}

View File

@@ -0,0 +1,34 @@
use crate::{
circuit_vals::{CVEqualityProof, FromCircuitVal},
commitments::MultiCommitGens,
utils::to_fq,
Fq,
};
use libspartan::{
group::CompressedGroup,
transcript::{AppendToTranscript, ProofTranscript, Transcript},
};
use secpq_curves::Secq256k1;
// https://eprint.iacr.org/2017/1132.pdf
// P.17 proof-of-equality
pub fn verify(
C1: &Secq256k1,
C2: &Secq256k1,
proof: &CVEqualityProof,
gens_n: &MultiCommitGens,
transcript: &mut Transcript,
) {
transcript.append_protocol_name(b"equality proof");
CompressedGroup::from_circuit_val(C1).append_to_transcript(b"C1", transcript);
CompressedGroup::from_circuit_val(C2).append_to_transcript(b"C2", transcript);
CompressedGroup::from_circuit_val(&proof.alpha.unwrap())
.append_to_transcript(b"alpha", transcript);
let lhs = gens_n.h * proof.z.unwrap();
let c = to_fq(&transcript.challenge_scalar(b"c"));
let rhs = (C1 - C2) * c + proof.alpha.unwrap();
assert!(rhs == lhs, "Proof of equality verification failed");
}

View File

@@ -0,0 +1,32 @@
use crate::{
circuit_vals::{CVKnowledgeProof, FromCircuitVal},
commitments::{Commitments, MultiCommitGens},
utils::to_fq,
Fq,
};
use libspartan::{
group::CompressedGroup,
transcript::{AppendToTranscript, ProofTranscript, Transcript},
};
use secpq_curves::Secq256k1;
// https://eprint.iacr.org/2017/1132.pdf
// P.17 Knowledge of opening
pub fn verify(
C: &Secq256k1,
proof: &CVKnowledgeProof,
gens_n: &MultiCommitGens,
transcript: &mut Transcript,
) {
transcript.append_protocol_name(b"knowledge proof");
let alpha = proof.alpha.unwrap();
CompressedGroup::from_circuit_val(C).append_to_transcript(b"C", transcript);
CompressedGroup::from_circuit_val(&alpha).append_to_transcript(b"alpha", transcript);
let c = to_fq(&transcript.challenge_scalar(b"c"));
let lhs = proof.z1.unwrap().commit(&proof.z2.unwrap(), gens_n);
let rhs = C * c + alpha;
assert!(lhs == rhs, "proof of opening verification failed");
}

View File

@@ -0,0 +1,72 @@
use crate::{
circuit_vals::CVProductProof,
commitments::{Commitments, MultiCommitGens},
utils::to_fq,
Fq,
};
use libspartan::{
group::CompressedGroup,
transcript::{AppendToTranscript, ProofTranscript, Transcript},
};
use secpq_curves::Secq256k1;
use crate::circuit_vals::FromCircuitVal;
// https://eprint.iacr.org/2017/1132.pdf
// P.17 Figure 5
pub fn verify(
proof: &CVProductProof,
X: Secq256k1,
Y: Secq256k1,
Z: Secq256k1,
gens_n: &MultiCommitGens,
transcript: &mut Transcript,
) {
let alpha = proof.alpha.unwrap();
let beta = proof.beta.unwrap();
let delta = proof.delta.unwrap();
let z: [Fq; 5] = proof
.z
.iter()
.map(|z_i| z_i.unwrap())
.collect::<Vec<Fq>>()
.try_into()
.unwrap();
transcript.append_protocol_name(b"product proof");
CompressedGroup::from_circuit_val(&X).append_to_transcript(b"X", transcript);
CompressedGroup::from_circuit_val(&Y).append_to_transcript(b"Y", transcript);
CompressedGroup::from_circuit_val(&Z).append_to_transcript(b"Z", transcript);
CompressedGroup::from_circuit_val(&alpha).append_to_transcript(b"alpha", transcript);
CompressedGroup::from_circuit_val(&beta).append_to_transcript(b"beta", transcript);
CompressedGroup::from_circuit_val(&delta).append_to_transcript(b"delta", transcript);
let c = to_fq(&transcript.challenge_scalar(b"c"));
let z1 = z[0];
let z2 = z[1];
let z3 = z[2];
let z4 = z[3];
let z5 = z[4];
// (7)
let lhs = alpha + X * c;
let rhs = z1.commit(&z2, gens_n);
assert!(lhs == rhs, "prod proof verification failed (7)");
// (8)
let lhs = beta + Y * c;
let rhs = z3.commit(&z4, gens_n);
assert!(lhs == rhs, "prod proof verification failed (8)");
// (9)
let lhs = delta + Z * c;
let gens_x = MultiCommitGens {
G: vec![X],
h: gens_n.h,
};
let rhs = z3.commit(&z5, &gens_x);
assert!(lhs == rhs, "prod proof verification failed (9)");
}

View File

@@ -0,0 +1,97 @@
use crate::{
circuit_vals::{CVDotProdProof, CVSumCheckProof, FromCircuitVal},
dotprod,
utils::to_fq,
Fq, MultiCommitGens,
};
use libspartan::{
group::CompressedGroup,
transcript::{AppendToTranscript, ProofTranscript, Transcript},
};
use secpq_curves::{group::Curve, Secq256k1};
#[derive(Debug, Clone)]
pub struct RoundProof {
pub dotprod_proof: CVDotProdProof,
pub com_eval: Secq256k1,
}
// This function should be able to verify proofs generated by the above `prove` function
// and also the proofs generated by the original Spartan implementation
#[allow(dead_code)]
pub fn verify(
degree_bound: usize,
target_com: &Secq256k1,
proof: &CVSumCheckProof,
gens_1: &MultiCommitGens,
gens_n: &MultiCommitGens,
transcript: &mut Transcript,
) -> (Secq256k1, Vec<Fq>) {
let mut r = vec![];
for (i, round_dotprod_proof) in proof.proofs.iter().enumerate() {
let com_poly = &proof.comm_polys[i].unwrap();
let com_poly_encoded = CompressedGroup::from_circuit_val(com_poly);
com_poly_encoded.append_to_transcript(b"comm_poly", transcript);
let com_eval = &proof.comm_evals[i].unwrap();
let r_i = to_fq(&transcript.challenge_scalar(b"challenge_nextround"));
r.push(r_i);
// The sum over (0, 1) is expected to be equal to the challenge evaluation of the prev round
let com_round_sum = if i == 0 {
*target_com
} else {
proof.comm_evals[i - 1].unwrap()
};
let com_round_sum_encoded = CompressedGroup::from_circuit_val(&com_round_sum);
com_round_sum_encoded.append_to_transcript(b"comm_claim_per_round", transcript);
CompressedGroup::from_circuit_val(&com_eval.clone())
.append_to_transcript(b"comm_eval", transcript);
let w_scalar = transcript.challenge_vector(b"combine_two_claims_to_one", 2);
let w = w_scalar.iter().map(|x| to_fq(x)).collect::<Vec<Fq>>();
let a = {
// the vector to use to decommit for sum-check test
let a_sc = {
let mut a = vec![Fq::one(); degree_bound + 1];
a[0] += Fq::one();
a
};
// the vector to use to decommit for evaluation
let a_eval = {
let mut a = vec![Fq::one(); degree_bound + 1];
for j in 1..a.len() {
a[j] = a[j - 1] * r_i;
}
a
};
// take weighted sum of the two vectors using w
assert_eq!(a_sc.len(), a_eval.len());
(0..a_sc.len())
.map(|i| w[0] * a_sc[i] + w[1] * a_eval[i])
.collect::<Vec<Fq>>()
};
let tau = com_round_sum * w[0] + com_eval * w[1];
// Check that the dot product proofs are valid
dotprod::verify(
&tau,
&a,
&round_dotprod_proof,
&com_poly,
&gens_1,
&gens_n,
transcript,
);
}
(proof.comm_evals[proof.comm_evals.len() - 1].unwrap(), r)
}

View File

@@ -0,0 +1,49 @@
use libspartan::math::Math;
use crate::{Fp, Fq};
pub fn hypercube(n: u32) -> Vec<Vec<u8>> {
let mut v = vec![];
for i in 0..(2u64.pow(n)) {
let mut row = vec![];
for j in 0..n {
row.push(((i >> j) & 1) as u8);
}
v.push(row);
}
v
}
pub fn to_fp(x: &libspartan::scalar::Scalar) -> Fp {
Fp::from_bytes(&x.to_bytes().into()).unwrap()
}
pub fn to_fq(x: &libspartan::scalar::Scalar) -> Fq {
Fq::from_bytes(&x.to_bytes().into()).unwrap()
}
fn compute_chi(e: &[Fq], x: &[Fq]) -> Fq {
let mut chi = Fq::one();
for i in 0..e.len() {
chi *= e[i] * x[i] + (Fq::one() - e[i]) * (Fq::one() - x[i]);
}
chi
}
pub fn eval_ml_poly(z: &[Fq], r: &[Fq]) -> Fq {
let mut eval = Fq::zero();
// compute chi
for i in 0..z.len() {
let i_bits: Vec<Fq> = i
.get_bits(r.len())
.iter()
.map(|b| if *b { Fq::one() } else { Fq::zero() })
.collect();
eval += compute_chi(&i_bits, r) * z[i];
}
eval
}

View File

@@ -0,0 +1,29 @@
[package]
name = "hoplite_circuit"
version = "0.1.0"
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[features]
default = ["dev-graph"]
dev-graph = ["halo2_proofs/dev-graph", "plotters"]
[dependencies]
halo2_proofs = { git = "https://github.com/privacy-scaling-explorations/halo2", tag = "v2023_01_20" }
halo2-base = { git = "https://github.com/axiom-crypto/halo2-lib.git", default-features = false, features = ["halo2-pse"] }
halo2-ecc = { git = "https://github.com/axiom-crypto/halo2-lib.git", default-features = false, features = ["halo2-pse"] }
num-bigint = { version = "0.4", features = ["rand"] }
secpq_curves = { git = "https://github.com/DanTehrani/secpq_curves.git" }
plotters = { version = "0.3.0", optional = true }
tabbycat = { version = "0.1", features = ["attributes"], optional = true }
spartan = { git = "https://github.com/DanTehrani/Spartan-secq", branch = "hoplite" }
secq256k1 = { git = "https://github.com/personaelabs/spartan-ecdsa", branch = "main" }
hoplite = { path = "../hoplite" }
rand_core = { version = "0.6", default-features = false, features = ["getrandom"] }
circuit_reader = { path = "../circuit_reader" }
bincode = "1.2.1"
num-traits = "0.2.15"
[dev-dependencies]
ark-std = { version = "0.3.0", features = ["print-trace"] }

View File

@@ -0,0 +1,17 @@
{
"name": "node",
"version": "1.0.0",
"main": "node.bench.ts",
"license": "MIT",
"scripts": {
"prove": "ts-node ./src/prover.ts"
},
"dependencies": {
"@ethereumjs/util": "^8.0.3",
"@personaelabs/spartan-ecdsa": "*"
},
"devDependencies": {
"ts-node": "^10.9.1",
"typescript": "^4.9.4"
}
}

View File

@@ -0,0 +1,84 @@
import * as fs from "fs";
import {
MembershipProver,
Poseidon,
Tree,
MembershipVerifier
} from "@personaelabs/spartan-ecdsa";
import {
hashPersonalMessage,
ecsign,
ecrecover,
privateToPublic
} from "@ethereumjs/util";
import * as path from "path";
const prove = async () => {
const privKey = Buffer.from("".padStart(16, "🧙"), "utf16le");
const msg = Buffer.from("harry potter");
const msgHash = hashPersonalMessage(msg);
const { v, r, s } = ecsign(msgHash, privKey);
const pubKey = ecrecover(msgHash, v, r, s);
const sig = `0x${r.toString("hex")}${s.toString("hex")}${v.toString(16)}`;
// Init the Poseidon hash
const poseidon = new Poseidon();
await poseidon.initWasm();
const treeDepth = 20;
const tree = new Tree(treeDepth, poseidon);
// Get the prover public key hash
const proverPubkeyHash = poseidon.hashPubKey(pubKey);
// Insert prover public key hash into the tree
tree.insert(proverPubkeyHash);
// Insert other members into the tree
for (const member of ["🕵️", "🥷", "👩‍🔬"]) {
const pubKey = privateToPublic(
Buffer.from("".padStart(16, member), "utf16le")
);
tree.insert(poseidon.hashPubKey(pubKey));
}
// Compute the merkle proof
const index = tree.indexOf(proverPubkeyHash);
const merkleProof = tree.createProof(index);
const proverConfig = {
circuit: path.join(
__dirname,
"../../../circuits/build/pubkey_membership/pubkey_membership.circuit"
),
witnessGenWasm: path.join(
__dirname,
"../../../circuits/build/pubkey_membership/pubkey_membership_js/pubkey_membership.wasm"
),
enableProfiler: true
};
// Init the prover
const prover = new MembershipProver(proverConfig);
await prover.initWasm();
// Prove membership
const { proof, publicInput } = await prover.prove(sig, msgHash, merkleProof);
fs.writeFileSync("./proof.bin", proof);
fs.writeFileSync("./input.bin", publicInput.serialize());
const verifierConfig = {
circuit: proverConfig.circuit,
enableProfiler: true
};
// Init verifier
const verifier = new MembershipVerifier(verifierConfig);
await verifier.initWasm();
// Verify proof
await verifier.verify(proof, publicInput.serialize());
};
prove();

View File

@@ -0,0 +1,21 @@
{
"include": [
"./src/**/*",
],
"exclude": [
"./node_modules",
"./build"
],
"compilerOptions": {
"target": "ES6",
"module": "CommonJS",
"rootDir": "./src",
"moduleResolution": "node",
"allowJs": true,
"outDir": "./build",
"esModuleInterop": true,
"forceConsistentCasingInFileNames": true,
"strict": true,
"skipLibCheck": true
}
}

View File

@@ -0,0 +1,515 @@
# THIS IS AN AUTOGENERATED FILE. DO NOT EDIT THIS FILE DIRECTLY.
# yarn lockfile v1
"@chainsafe/as-sha256@^0.3.1":
version "0.3.1"
resolved "https://registry.yarnpkg.com/@chainsafe/as-sha256/-/as-sha256-0.3.1.tgz#3639df0e1435cab03f4d9870cc3ac079e57a6fc9"
integrity sha512-hldFFYuf49ed7DAakWVXSJODuq3pzJEguD8tQ7h+sGkM18vja+OFoJI9krnGmgzyuZC2ETX0NOIcCTy31v2Mtg==
"@chainsafe/persistent-merkle-tree@^0.4.2":
version "0.4.2"
resolved "https://registry.yarnpkg.com/@chainsafe/persistent-merkle-tree/-/persistent-merkle-tree-0.4.2.tgz#4c9ee80cc57cd3be7208d98c40014ad38f36f7ff"
integrity sha512-lLO3ihKPngXLTus/L7WHKaw9PnNJWizlOF1H9NNzHP6Xvh82vzg9F2bzkXhYIFshMZ2gTCEz8tq6STe7r5NDfQ==
dependencies:
"@chainsafe/as-sha256" "^0.3.1"
"@chainsafe/ssz@0.9.4":
version "0.9.4"
resolved "https://registry.yarnpkg.com/@chainsafe/ssz/-/ssz-0.9.4.tgz#696a8db46d6975b600f8309ad3a12f7c0e310497"
integrity sha512-77Qtg2N1ayqs4Bg/wvnWfg5Bta7iy7IRh8XqXh7oNMeP2HBbBwx8m6yTpA8p0EHItWPEBkgZd5S5/LSlp3GXuQ==
dependencies:
"@chainsafe/as-sha256" "^0.3.1"
"@chainsafe/persistent-merkle-tree" "^0.4.2"
case "^1.6.3"
"@cspotcode/source-map-support@^0.8.0":
version "0.8.1"
resolved "https://registry.yarnpkg.com/@cspotcode/source-map-support/-/source-map-support-0.8.1.tgz#00629c35a688e05a88b1cda684fb9d5e73f000a1"
integrity sha512-IchNf6dN4tHoMFIn/7OE8LWZ19Y6q/67Bmf6vnGREv8RSbBVb9LPJxEcnwrcwX6ixSvaiGoomAUvu4YSxXrVgw==
dependencies:
"@jridgewell/trace-mapping" "0.3.9"
"@ethereumjs/rlp@^4.0.1":
version "4.0.1"
resolved "https://registry.yarnpkg.com/@ethereumjs/rlp/-/rlp-4.0.1.tgz#626fabfd9081baab3d0a3074b0c7ecaf674aaa41"
integrity sha512-tqsQiBQDQdmPWE1xkkBq4rlSW5QZpLOUJ5RJh2/9fug+q9tnUhuZoVLk7s0scUIKTOzEtR72DFBXI4WiZcMpvw==
"@ethereumjs/util@^8.0.3":
version "8.0.5"
resolved "https://registry.yarnpkg.com/@ethereumjs/util/-/util-8.0.5.tgz#b9088fc687cc13f0c1243d6133d145dfcf3fe446"
integrity sha512-259rXKK3b3D8HRVdRmlOEi6QFvwxdt304hhrEAmpZhsj7ufXEOTIc9JRZPMnXatKjECokdLNBcDOFBeBSzAIaw==
dependencies:
"@chainsafe/ssz" "0.9.4"
"@ethereumjs/rlp" "^4.0.1"
ethereum-cryptography "^1.1.2"
"@iden3/bigarray@0.0.2":
version "0.0.2"
resolved "https://registry.yarnpkg.com/@iden3/bigarray/-/bigarray-0.0.2.tgz#6fc4ba5be18daf8a26ee393f2fb62b80d98c05e9"
integrity sha512-Xzdyxqm1bOFF6pdIsiHLLl3HkSLjbhqJHVyqaTxXt3RqXBEnmsUmEW47H7VOi/ak7TdkRpNkxjyK5Zbkm+y52g==
"@iden3/binfileutils@0.0.11":
version "0.0.11"
resolved "https://registry.yarnpkg.com/@iden3/binfileutils/-/binfileutils-0.0.11.tgz#9ffbbcc1279f2b2182bb6dcff4eee8a5b2167911"
integrity sha512-LylnJoZ0CTdgErnKY8OxohvW4K+p6UHD3sxt+3P9AmMyBQjYR4IpoqoYZZ+9aMj89cmCQ21UvdhndAx04er3NA==
dependencies:
fastfile "0.0.20"
ffjavascript "^0.2.48"
"@jridgewell/resolve-uri@^3.0.3":
version "3.1.0"
resolved "https://registry.yarnpkg.com/@jridgewell/resolve-uri/-/resolve-uri-3.1.0.tgz#2203b118c157721addfe69d47b70465463066d78"
integrity sha512-F2msla3tad+Mfht5cJq7LSXcdudKTWCVYUgw6pLFOOHSTtZlj6SWNYAp+AhuqLmWdBO2X5hPrLcu8cVP8fy28w==
"@jridgewell/sourcemap-codec@^1.4.10":
version "1.4.14"
resolved "https://registry.yarnpkg.com/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.4.14.tgz#add4c98d341472a289190b424efbdb096991bb24"
integrity sha512-XPSJHWmi394fuUuzDnGz1wiKqWfo1yXecHQMRf2l6hztTO+nPru658AyDngaBe7isIxEkRsPR3FZh+s7iVa4Uw==
"@jridgewell/trace-mapping@0.3.9":
version "0.3.9"
resolved "https://registry.yarnpkg.com/@jridgewell/trace-mapping/-/trace-mapping-0.3.9.tgz#6534fd5933a53ba7cbf3a17615e273a0d1273ff9"
integrity sha512-3Belt6tdc8bPgAtbcmdtNJlirVoTmEb5e2gC94PnkwEW9jI6CAHUeoG85tjWP5WquqfavoMtMwiG4P926ZKKuQ==
dependencies:
"@jridgewell/resolve-uri" "^3.0.3"
"@jridgewell/sourcemap-codec" "^1.4.10"
"@noble/hashes@1.2.0", "@noble/hashes@~1.2.0":
version "1.2.0"
resolved "https://registry.yarnpkg.com/@noble/hashes/-/hashes-1.2.0.tgz#a3150eeb09cc7ab207ebf6d7b9ad311a9bdbed12"
integrity sha512-FZfhjEDbT5GRswV3C6uvLPHMiVD6lQBmpoX5+eSiPaMTXte/IKqI5dykDxzZB/WBeK/CDuQRBWarPdi3FNY2zQ==
"@noble/secp256k1@1.7.1", "@noble/secp256k1@~1.7.0":
version "1.7.1"
resolved "https://registry.yarnpkg.com/@noble/secp256k1/-/secp256k1-1.7.1.tgz#b251c70f824ce3ca7f8dc3df08d58f005cc0507c"
integrity sha512-hOUk6AyBFmqVrv7k5WAw/LpszxVbj9gGN4JRkIX52fdFAj1UA61KXmZDvqVEm+pOyec3+fIeZB02LYa/pWOArw==
"@personaelabs/spartan-ecdsa@*":
version "1.0.2"
resolved "https://registry.yarnpkg.com/@personaelabs/spartan-ecdsa/-/spartan-ecdsa-1.0.2.tgz#22a858e1d7d5729a7198873557d3845ebd2af4f2"
integrity sha512-Wi5NykpV2slwDOSqy4DQ2Q1RicPR4U1W1S0MRZ8XMSYhND10GKIsWVcRqIB5VDh+ijbu1FfRcE8eT8Ov6TR+DA==
dependencies:
"@ethereumjs/util" "^8.0.3"
"@zk-kit/incremental-merkle-tree" "^1.0.0"
elliptic "^6.5.4"
snarkjs "^0.5.0"
"@scure/base@~1.1.0":
version "1.1.1"
resolved "https://registry.yarnpkg.com/@scure/base/-/base-1.1.1.tgz#ebb651ee52ff84f420097055f4bf46cfba403938"
integrity sha512-ZxOhsSyxYwLJj3pLZCefNitxsj093tb2vq90mp2txoYeBqbcjDjqFhyM8eUjq/uFm6zJ+mUuqxlS2FkuSY1MTA==
"@scure/bip32@1.1.5":
version "1.1.5"
resolved "https://registry.yarnpkg.com/@scure/bip32/-/bip32-1.1.5.tgz#d2ccae16dcc2e75bc1d75f5ef3c66a338d1ba300"
integrity sha512-XyNh1rB0SkEqd3tXcXMi+Xe1fvg+kUIcoRIEujP1Jgv7DqW2r9lg3Ah0NkFaCs9sTkQAQA8kw7xiRXzENi9Rtw==
dependencies:
"@noble/hashes" "~1.2.0"
"@noble/secp256k1" "~1.7.0"
"@scure/base" "~1.1.0"
"@scure/bip39@1.1.1":
version "1.1.1"
resolved "https://registry.yarnpkg.com/@scure/bip39/-/bip39-1.1.1.tgz#b54557b2e86214319405db819c4b6a370cf340c5"
integrity sha512-t+wDck2rVkh65Hmv280fYdVdY25J9YeEUIgn2LG1WM6gxFkGzcksoDiUkWVpVp3Oex9xGC68JU2dSbUfwZ2jPg==
dependencies:
"@noble/hashes" "~1.2.0"
"@scure/base" "~1.1.0"
"@tsconfig/node10@^1.0.7":
version "1.0.9"
resolved "https://registry.yarnpkg.com/@tsconfig/node10/-/node10-1.0.9.tgz#df4907fc07a886922637b15e02d4cebc4c0021b2"
integrity sha512-jNsYVVxU8v5g43Erja32laIDHXeoNvFEpX33OK4d6hljo3jDhCBDhx5dhCCTMWUojscpAagGiRkBKxpdl9fxqA==
"@tsconfig/node12@^1.0.7":
version "1.0.11"
resolved "https://registry.yarnpkg.com/@tsconfig/node12/-/node12-1.0.11.tgz#ee3def1f27d9ed66dac6e46a295cffb0152e058d"
integrity sha512-cqefuRsh12pWyGsIoBKJA9luFu3mRxCA+ORZvA4ktLSzIuCUtWVxGIuXigEwO5/ywWFMZ2QEGKWvkZG1zDMTag==
"@tsconfig/node14@^1.0.0":
version "1.0.3"
resolved "https://registry.yarnpkg.com/@tsconfig/node14/-/node14-1.0.3.tgz#e4386316284f00b98435bf40f72f75a09dabf6c1"
integrity sha512-ysT8mhdixWK6Hw3i1V2AeRqZ5WfXg1G43mqoYlM2nc6388Fq5jcXyr5mRsqViLx/GJYdoL0bfXD8nmF+Zn/Iow==
"@tsconfig/node16@^1.0.2":
version "1.0.3"
resolved "https://registry.yarnpkg.com/@tsconfig/node16/-/node16-1.0.3.tgz#472eaab5f15c1ffdd7f8628bd4c4f753995ec79e"
integrity sha512-yOlFc+7UtL/89t2ZhjPvvB/DeAr3r+Dq58IgzsFkOAvVC6NMJXmCGjbptdXdR9qsX7pKcTL+s87FtYREi2dEEQ==
"@zk-kit/incremental-merkle-tree@^1.0.0":
version "1.0.0"
resolved "https://registry.yarnpkg.com/@zk-kit/incremental-merkle-tree/-/incremental-merkle-tree-1.0.0.tgz#5a9ec2a2ebcb00972035b175c58906651ef6aa39"
integrity sha512-2iRLZfHnZ6wKE+oZN2CnpkKYCE5f5dpv6YRIwLDCz0xwJZrIMQ81AamFBdxPesQSYMMP0GkC0iv1rm6gxAL2Ow==
acorn-walk@^8.1.1:
version "8.2.0"
resolved "https://registry.yarnpkg.com/acorn-walk/-/acorn-walk-8.2.0.tgz#741210f2e2426454508853a2f44d0ab83b7f69c1"
integrity sha512-k+iyHEuPgSw6SbuDpGQM+06HQUa04DZ3o+F6CSzXMvvI5KMvnaEqXe+YVe555R9nn6GPt404fos4wcgpw12SDA==
acorn@^8.4.1:
version "8.8.2"
resolved "https://registry.yarnpkg.com/acorn/-/acorn-8.8.2.tgz#1b2f25db02af965399b9776b0c2c391276d37c4a"
integrity sha512-xjIYgE8HBrkpd/sJqOGNspf8uHG+NOHGOw6a/Urj8taM2EXfdNAH2oFcPeIFfsv3+kz/mJrS5VuMqbNLjCa2vw==
ansi-styles@^4.1.0:
version "4.3.0"
resolved "https://registry.yarnpkg.com/ansi-styles/-/ansi-styles-4.3.0.tgz#edd803628ae71c04c85ae7a0906edad34b648937"
integrity sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==
dependencies:
color-convert "^2.0.1"
arg@^4.1.0:
version "4.1.3"
resolved "https://registry.yarnpkg.com/arg/-/arg-4.1.3.tgz#269fc7ad5b8e42cb63c896d5666017261c144089"
integrity sha512-58S9QDqG0Xx27YwPSt9fJxivjYl432YCwfDMfZ+71RAqUrZef7LrKQZ3LHLOwCS4FLNBplP533Zx895SeOCHvA==
async@^3.2.3:
version "3.2.4"
resolved "https://registry.yarnpkg.com/async/-/async-3.2.4.tgz#2d22e00f8cddeb5fde5dd33522b56d1cf569a81c"
integrity sha512-iAB+JbDEGXhyIUavoDl9WP/Jj106Kz9DEn1DPgYw5ruDn0e3Wgi3sKFm55sASdGBNOQB8F59d9qQ7deqrHA8wQ==
b4a@^1.0.1:
version "1.6.1"
resolved "https://registry.yarnpkg.com/b4a/-/b4a-1.6.1.tgz#9effac93a469a868d024e16fd77162c653544cbd"
integrity sha512-AsKjNhz72yxteo/0EtQEiwkMUgk/tGmycXlbG4g3Ard2/ULtNLUykGOkeK0egmN27h0xMAhb76jYccW+XTBExA==
balanced-match@^1.0.0:
version "1.0.2"
resolved "https://registry.yarnpkg.com/balanced-match/-/balanced-match-1.0.2.tgz#e83e3a7e3f300b34cb9d87f615fa0cbf357690ee"
integrity sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==
bfj@^7.0.2:
version "7.0.2"
resolved "https://registry.yarnpkg.com/bfj/-/bfj-7.0.2.tgz#1988ce76f3add9ac2913fd8ba47aad9e651bfbb2"
integrity sha512-+e/UqUzwmzJamNF50tBV6tZPTORow7gQ96iFow+8b562OdMpEK0BcJEq2OSPEDmAbSMBQ7PKZ87ubFkgxpYWgw==
dependencies:
bluebird "^3.5.5"
check-types "^11.1.1"
hoopy "^0.1.4"
tryer "^1.0.1"
blake2b-wasm@^2.4.0:
version "2.4.0"
resolved "https://registry.yarnpkg.com/blake2b-wasm/-/blake2b-wasm-2.4.0.tgz#9115649111edbbd87eb24ce7c04b427e4e2be5be"
integrity sha512-S1kwmW2ZhZFFFOghcx73+ZajEfKBqhP82JMssxtLVMxlaPea1p9uoLiUZ5WYyHn0KddwbLc+0vh4wR0KBNoT5w==
dependencies:
b4a "^1.0.1"
nanoassert "^2.0.0"
bluebird@^3.5.5:
version "3.7.2"
resolved "https://registry.yarnpkg.com/bluebird/-/bluebird-3.7.2.tgz#9f229c15be272454ffa973ace0dbee79a1b0c36f"
integrity sha512-XpNj6GDQzdfW+r2Wnn7xiSAd7TM3jzkxGXBGTtWKuSXv1xUV+azxAm8jdWZN06QTQk+2N2XB9jRDkvbmQmcRtg==
bn.js@^4.11.9:
version "4.12.0"
resolved "https://registry.yarnpkg.com/bn.js/-/bn.js-4.12.0.tgz#775b3f278efbb9718eec7361f483fb36fbbfea88"
integrity sha512-c98Bf3tPniI+scsdk237ku1Dc3ujXQTSgyiPUDEOe7tRkhrqridvh8klBv0HCEso1OLOYcHuCv/cS6DNxKH+ZA==
brace-expansion@^1.1.7:
version "1.1.11"
resolved "https://registry.yarnpkg.com/brace-expansion/-/brace-expansion-1.1.11.tgz#3c7fcbf529d87226f3d2f52b966ff5271eb441dd"
integrity sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==
dependencies:
balanced-match "^1.0.0"
concat-map "0.0.1"
brace-expansion@^2.0.1:
version "2.0.1"
resolved "https://registry.yarnpkg.com/brace-expansion/-/brace-expansion-2.0.1.tgz#1edc459e0f0c548486ecf9fc99f2221364b9a0ae"
integrity sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==
dependencies:
balanced-match "^1.0.0"
brorand@^1.1.0:
version "1.1.0"
resolved "https://registry.yarnpkg.com/brorand/-/brorand-1.1.0.tgz#12c25efe40a45e3c323eb8675a0a0ce57b22371f"
integrity sha512-cKV8tMCEpQs4hK/ik71d6LrPOnpkpGBR0wzxqr68g2m/LB2GxVYQroAjMJZRVM1Y4BCjCKc3vAamxSzOY2RP+w==
case@^1.6.3:
version "1.6.3"
resolved "https://registry.yarnpkg.com/case/-/case-1.6.3.tgz#0a4386e3e9825351ca2e6216c60467ff5f1ea1c9"
integrity sha512-mzDSXIPaFwVDvZAHqZ9VlbyF4yyXRuX6IvB06WvPYkqJVO24kX1PPhv9bfpKNFZyxYFmmgo03HUiD8iklmJYRQ==
chalk@^4.0.2:
version "4.1.2"
resolved "https://registry.yarnpkg.com/chalk/-/chalk-4.1.2.tgz#aac4e2b7734a740867aeb16bf02aad556a1e7a01"
integrity sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==
dependencies:
ansi-styles "^4.1.0"
supports-color "^7.1.0"
check-types@^11.1.1:
version "11.2.2"
resolved "https://registry.yarnpkg.com/check-types/-/check-types-11.2.2.tgz#7afc0b6a860d686885062f2dba888ba5710335b4"
integrity sha512-HBiYvXvn9Z70Z88XKjz3AEKd4HJhBXsa3j7xFnITAzoS8+q6eIGi8qDB8FKPBAjtuxjI/zFpwuiCb8oDtKOYrA==
circom_runtime@0.1.21:
version "0.1.21"
resolved "https://registry.yarnpkg.com/circom_runtime/-/circom_runtime-0.1.21.tgz#0ee93bb798b5afb8ecec30725ed14d94587a999b"
integrity sha512-qTkud630B/GK8y76hnOaaS1aNuF6prfV0dTrkeRsiJKnlP1ryQbP2FWLgDOPqn6aKyaPlam+Z+DTbBhkEzh8dA==
dependencies:
ffjavascript "0.2.56"
color-convert@^2.0.1:
version "2.0.1"
resolved "https://registry.yarnpkg.com/color-convert/-/color-convert-2.0.1.tgz#72d3a68d598c9bdb3af2ad1e84f21d896abd4de3"
integrity sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==
dependencies:
color-name "~1.1.4"
color-name@~1.1.4:
version "1.1.4"
resolved "https://registry.yarnpkg.com/color-name/-/color-name-1.1.4.tgz#c2a09a87acbde69543de6f63fa3995c826c536a2"
integrity sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==
concat-map@0.0.1:
version "0.0.1"
resolved "https://registry.yarnpkg.com/concat-map/-/concat-map-0.0.1.tgz#d8a96bd77fd68df7793a73036a3ba0d5405d477b"
integrity sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==
create-require@^1.1.0:
version "1.1.1"
resolved "https://registry.yarnpkg.com/create-require/-/create-require-1.1.1.tgz#c1d7e8f1e5f6cfc9ff65f9cd352d37348756c333"
integrity sha512-dcKFX3jn0MpIaXjisoRvexIJVEKzaq7z2rZKxf+MSr9TkdmHmsU4m2lcLojrj/FHl8mk5VxMmYA+ftRkP/3oKQ==
diff@^4.0.1:
version "4.0.2"
resolved "https://registry.yarnpkg.com/diff/-/diff-4.0.2.tgz#60f3aecb89d5fae520c11aa19efc2bb982aade7d"
integrity sha512-58lmxKSA4BNyLz+HHMUzlOEpg09FV+ev6ZMe3vJihgdxzgcwZ8VoEEPmALCZG9LmqfVoNMMKpttIYTVG6uDY7A==
ejs@^3.1.6:
version "3.1.8"
resolved "https://registry.yarnpkg.com/ejs/-/ejs-3.1.8.tgz#758d32910c78047585c7ef1f92f9ee041c1c190b"
integrity sha512-/sXZeMlhS0ArkfX2Aw780gJzXSMPnKjtspYZv+f3NiKLlubezAHDU5+9xz6gd3/NhG3txQCo6xlglmTS+oTGEQ==
dependencies:
jake "^10.8.5"
elliptic@^6.5.4:
version "6.5.4"
resolved "https://registry.yarnpkg.com/elliptic/-/elliptic-6.5.4.tgz#da37cebd31e79a1367e941b592ed1fbebd58abbb"
integrity sha512-iLhC6ULemrljPZb+QutR5TQGB+pdW6KGD5RSegS+8sorOZT+rdQFbsQFJgvN3eRqNALqJer4oQ16YvJHlU8hzQ==
dependencies:
bn.js "^4.11.9"
brorand "^1.1.0"
hash.js "^1.0.0"
hmac-drbg "^1.0.1"
inherits "^2.0.4"
minimalistic-assert "^1.0.1"
minimalistic-crypto-utils "^1.0.1"
ethereum-cryptography@^1.1.2:
version "1.2.0"
resolved "https://registry.yarnpkg.com/ethereum-cryptography/-/ethereum-cryptography-1.2.0.tgz#5ccfa183e85fdaf9f9b299a79430c044268c9b3a"
integrity sha512-6yFQC9b5ug6/17CQpCyE3k9eKBMdhyVjzUy1WkiuY/E4vj/SXDBbCw8QEIaXqf0Mf2SnY6RmpDcwlUmBSS0EJw==
dependencies:
"@noble/hashes" "1.2.0"
"@noble/secp256k1" "1.7.1"
"@scure/bip32" "1.1.5"
"@scure/bip39" "1.1.1"
fastfile@0.0.20:
version "0.0.20"
resolved "https://registry.yarnpkg.com/fastfile/-/fastfile-0.0.20.tgz#794a143d58cfda2e24c298e5ef619c748c8a1879"
integrity sha512-r5ZDbgImvVWCP0lA/cGNgQcZqR+aYdFx3u+CtJqUE510pBUVGMn4ulL/iRTI4tACTYsNJ736uzFxEBXesPAktA==
ffjavascript@0.2.56:
version "0.2.56"
resolved "https://registry.yarnpkg.com/ffjavascript/-/ffjavascript-0.2.56.tgz#3509f98fcbd3e44ea93cd23519071b76d6eae433"
integrity sha512-em6G5Lrj7ucIqj4TYEgyoHs/j99Urwwqa4+YxEVY2hggnpRimVj+noX5pZQTxI1pvtiekZI4rG65JBf0xraXrg==
dependencies:
wasmbuilder "0.0.16"
wasmcurves "0.2.0"
web-worker "^1.2.0"
ffjavascript@^0.2.48:
version "0.2.57"
resolved "https://registry.yarnpkg.com/ffjavascript/-/ffjavascript-0.2.57.tgz#ba1be96015b2688192e49f2f4de2cc5150fd8594"
integrity sha512-V+vxZ/zPNcthrWmqfe/1YGgqdkTamJeXiED0tsk7B84g40DKlrTdx47IqZuiygqAVG6zMw4qYuvXftIJWsmfKQ==
dependencies:
wasmbuilder "0.0.16"
wasmcurves "0.2.0"
web-worker "^1.2.0"
filelist@^1.0.1:
version "1.0.4"
resolved "https://registry.yarnpkg.com/filelist/-/filelist-1.0.4.tgz#f78978a1e944775ff9e62e744424f215e58352b5"
integrity sha512-w1cEuf3S+DrLCQL7ET6kz+gmlJdbq9J7yXCSjK/OZCPA+qEN1WyF4ZAf0YYJa4/shHJra2t/d/r8SV4Ji+x+8Q==
dependencies:
minimatch "^5.0.1"
has-flag@^4.0.0:
version "4.0.0"
resolved "https://registry.yarnpkg.com/has-flag/-/has-flag-4.0.0.tgz#944771fd9c81c81265c4d6941860da06bb59479b"
integrity sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==
hash.js@^1.0.0, hash.js@^1.0.3:
version "1.1.7"
resolved "https://registry.yarnpkg.com/hash.js/-/hash.js-1.1.7.tgz#0babca538e8d4ee4a0f8988d68866537a003cf42"
integrity sha512-taOaskGt4z4SOANNseOviYDvjEJinIkRgmp7LbKP2YTTmVxWBl87s/uzK9r+44BclBSp2X7K1hqeNfz9JbBeXA==
dependencies:
inherits "^2.0.3"
minimalistic-assert "^1.0.1"
hmac-drbg@^1.0.1:
version "1.0.1"
resolved "https://registry.yarnpkg.com/hmac-drbg/-/hmac-drbg-1.0.1.tgz#d2745701025a6c775a6c545793ed502fc0c649a1"
integrity sha512-Tti3gMqLdZfhOQY1Mzf/AanLiqh1WTiJgEj26ZuYQ9fbkLomzGchCws4FyrSd4VkpBfiNhaE1On+lOz894jvXg==
dependencies:
hash.js "^1.0.3"
minimalistic-assert "^1.0.0"
minimalistic-crypto-utils "^1.0.1"
hoopy@^0.1.4:
version "0.1.4"
resolved "https://registry.yarnpkg.com/hoopy/-/hoopy-0.1.4.tgz#609207d661100033a9a9402ad3dea677381c1b1d"
integrity sha512-HRcs+2mr52W0K+x8RzcLzuPPmVIKMSv97RGHy0Ea9y/mpcaK+xTrjICA04KAHi4GRzxliNqNJEFYWHghy3rSfQ==
inherits@^2.0.3, inherits@^2.0.4:
version "2.0.4"
resolved "https://registry.yarnpkg.com/inherits/-/inherits-2.0.4.tgz#0fa2c64f932917c3433a0ded55363aae37416b7c"
integrity sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==
jake@^10.8.5:
version "10.8.5"
resolved "https://registry.yarnpkg.com/jake/-/jake-10.8.5.tgz#f2183d2c59382cb274226034543b9c03b8164c46"
integrity sha512-sVpxYeuAhWt0OTWITwT98oyV0GsXyMlXCF+3L1SuafBVUIr/uILGRB+NqwkzhgXKvoJpDIpQvqkUALgdmQsQxw==
dependencies:
async "^3.2.3"
chalk "^4.0.2"
filelist "^1.0.1"
minimatch "^3.0.4"
js-sha3@^0.8.0:
version "0.8.0"
resolved "https://registry.yarnpkg.com/js-sha3/-/js-sha3-0.8.0.tgz#b9b7a5da73afad7dedd0f8c463954cbde6818840"
integrity sha512-gF1cRrHhIzNfToc802P800N8PpXS+evLLXfsVpowqmAFR9uwbi89WvXg2QspOmXL8QL86J4T1EpFu+yUkwJY3Q==
logplease@^1.2.15:
version "1.2.15"
resolved "https://registry.yarnpkg.com/logplease/-/logplease-1.2.15.tgz#3da442e93751a5992cc19010a826b08d0293c48a"
integrity sha512-jLlHnlsPSJjpwUfcNyUxXCl33AYg2cHhIf9QhGL2T4iPT0XPB+xP1LRKFPgIg1M/sg9kAJvy94w9CzBNrfnstA==
make-error@^1.1.1:
version "1.3.6"
resolved "https://registry.yarnpkg.com/make-error/-/make-error-1.3.6.tgz#2eb2e37ea9b67c4891f684a1394799af484cf7a2"
integrity sha512-s8UhlNe7vPKomQhC1qFelMokr/Sc3AgNbso3n74mVPA5LTZwkB9NlXf4XPamLxJE8h0gh73rM94xvwRT2CVInw==
minimalistic-assert@^1.0.0, minimalistic-assert@^1.0.1:
version "1.0.1"
resolved "https://registry.yarnpkg.com/minimalistic-assert/-/minimalistic-assert-1.0.1.tgz#2e194de044626d4a10e7f7fbc00ce73e83e4d5c7"
integrity sha512-UtJcAD4yEaGtjPezWuO9wC4nwUnVH/8/Im3yEHQP4b67cXlD/Qr9hdITCU1xDbSEXg2XKNaP8jsReV7vQd00/A==
minimalistic-crypto-utils@^1.0.1:
version "1.0.1"
resolved "https://registry.yarnpkg.com/minimalistic-crypto-utils/-/minimalistic-crypto-utils-1.0.1.tgz#f6c00c1c0b082246e5c4d99dfb8c7c083b2b582a"
integrity sha512-JIYlbt6g8i5jKfJ3xz7rF0LXmv2TkDxBLUkiBeZ7bAx4GnnNMr8xFpGnOxn6GhTEHx3SjRrZEoU+j04prX1ktg==
minimatch@^3.0.4:
version "3.1.2"
resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-3.1.2.tgz#19cd194bfd3e428f049a70817c038d89ab4be35b"
integrity sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==
dependencies:
brace-expansion "^1.1.7"
minimatch@^5.0.1:
version "5.1.6"
resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-5.1.6.tgz#1cfcb8cf5522ea69952cd2af95ae09477f122a96"
integrity sha512-lKwV/1brpG6mBUFHtb7NUmtABCb2WZZmm2wNiOA5hAb8VdCS4B3dtMWyvcoViccwAW/COERjXLt0zP1zXUN26g==
dependencies:
brace-expansion "^2.0.1"
nanoassert@^2.0.0:
version "2.0.0"
resolved "https://registry.yarnpkg.com/nanoassert/-/nanoassert-2.0.0.tgz#a05f86de6c7a51618038a620f88878ed1e490c09"
integrity sha512-7vO7n28+aYO4J+8w96AzhmU8G+Y/xpPDJz/se19ICsqj/momRbb9mh9ZUtkoJ5X3nTnPdhEJyc0qnM6yAsHBaA==
r1csfile@0.0.41:
version "0.0.41"
resolved "https://registry.yarnpkg.com/r1csfile/-/r1csfile-0.0.41.tgz#e3d2709d36923156dd1fc2db9858987b30c92948"
integrity sha512-Q1WDF3u1vYeAwjHo4YuddkA8Aq0TulbKjmGm99+Atn13Lf5fTsMZBnBV9T741w8iSyPFG6Uh6sapQby77sREqA==
dependencies:
"@iden3/bigarray" "0.0.2"
"@iden3/binfileutils" "0.0.11"
fastfile "0.0.20"
ffjavascript "0.2.56"
snarkjs@^0.5.0:
version "0.5.0"
resolved "https://registry.yarnpkg.com/snarkjs/-/snarkjs-0.5.0.tgz#cf26bf1d3835eb16b4b330a438bad9824837d6b0"
integrity sha512-KWz8mZ2Y+6wvn6GGkQo6/ZlKwETdAGohd40Lzpwp5TUZCn6N6O4Az1SuX1rw/qREGL6Im+ycb19suCFE8/xaKA==
dependencies:
"@iden3/binfileutils" "0.0.11"
bfj "^7.0.2"
blake2b-wasm "^2.4.0"
circom_runtime "0.1.21"
ejs "^3.1.6"
fastfile "0.0.20"
ffjavascript "0.2.56"
js-sha3 "^0.8.0"
logplease "^1.2.15"
r1csfile "0.0.41"
supports-color@^7.1.0:
version "7.2.0"
resolved "https://registry.yarnpkg.com/supports-color/-/supports-color-7.2.0.tgz#1b7dcdcb32b8138801b3e478ba6a51caa89648da"
integrity sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==
dependencies:
has-flag "^4.0.0"
tryer@^1.0.1:
version "1.0.1"
resolved "https://registry.yarnpkg.com/tryer/-/tryer-1.0.1.tgz#f2c85406800b9b0f74c9f7465b81eaad241252f8"
integrity sha512-c3zayb8/kWWpycWYg87P71E1S1ZL6b6IJxfb5fvsUgsf0S2MVGaDhDXXjDMpdCpfWXqptc+4mXwmiy1ypXqRAA==
ts-node@^10.9.1:
version "10.9.1"
resolved "https://registry.yarnpkg.com/ts-node/-/ts-node-10.9.1.tgz#e73de9102958af9e1f0b168a6ff320e25adcff4b"
integrity sha512-NtVysVPkxxrwFGUUxGYhfux8k78pQB3JqYBXlLRZgdGUqTO5wU/UyHop5p70iEbGhB7q5KmiZiU0Y3KlJrScEw==
dependencies:
"@cspotcode/source-map-support" "^0.8.0"
"@tsconfig/node10" "^1.0.7"
"@tsconfig/node12" "^1.0.7"
"@tsconfig/node14" "^1.0.0"
"@tsconfig/node16" "^1.0.2"
acorn "^8.4.1"
acorn-walk "^8.1.1"
arg "^4.1.0"
create-require "^1.1.0"
diff "^4.0.1"
make-error "^1.1.1"
v8-compile-cache-lib "^3.0.1"
yn "3.1.1"
typescript@^4.9.4:
version "4.9.5"
resolved "https://registry.yarnpkg.com/typescript/-/typescript-4.9.5.tgz#095979f9bcc0d09da324d58d03ce8f8374cbe65a"
integrity sha512-1FXk9E2Hm+QzZQ7z+McJiHL4NW1F2EzMu9Nq9i3zAaGqibafqYwCVU6WyWAuyQRRzOlxou8xZSyXLEN8oKj24g==
v8-compile-cache-lib@^3.0.1:
version "3.0.1"
resolved "https://registry.yarnpkg.com/v8-compile-cache-lib/-/v8-compile-cache-lib-3.0.1.tgz#6336e8d71965cb3d35a1bbb7868445a7c05264bf"
integrity sha512-wa7YjyUGfNZngI/vtK0UHAN+lgDCxBPCylVXGp0zu59Fz5aiGtNXaq3DhIov063MorB+VfufLh3JlF2KdTK3xg==
wasmbuilder@0.0.16:
version "0.0.16"
resolved "https://registry.yarnpkg.com/wasmbuilder/-/wasmbuilder-0.0.16.tgz#f34c1f2c047d2f6e1065cbfec5603988f16d8549"
integrity sha512-Qx3lEFqaVvp1cEYW7Bfi+ebRJrOiwz2Ieu7ZG2l7YyeSJIok/reEQCQCuicj/Y32ITIJuGIM9xZQppGx5LrQdA==
wasmcurves@0.2.0:
version "0.2.0"
resolved "https://registry.yarnpkg.com/wasmcurves/-/wasmcurves-0.2.0.tgz#ccfc5a7d3778b6e0768b82a9336c80054f9bc0cf"
integrity sha512-3e2rbxdujOwaod657gxgmdhZNn+i1qKdHO3Y/bK+8E7bV8ttV/fu5FO4/WLBACF375cK0QDLOP+65Na63qYuWA==
dependencies:
wasmbuilder "0.0.16"
web-worker@^1.2.0:
version "1.2.0"
resolved "https://registry.yarnpkg.com/web-worker/-/web-worker-1.2.0.tgz#5d85a04a7fbc1e7db58f66595d7a3ac7c9c180da"
integrity sha512-PgF341avzqyx60neE9DD+XS26MMNMoUQRz9NOZwW32nPQrF6p77f1htcnjBSEV8BGMKZ16choqUG4hyI0Hx7mA==
yn@3.1.1:
version "3.1.1"
resolved "https://registry.yarnpkg.com/yn/-/yn-3.1.1.tgz#1e87401a09d767c1d5eab26a6e4c185182d2eb50"
integrity sha512-Ux4ygGWsu2c7isFWe8Yu1YluJmqVhxqK2cLXNQA5AcC3QfbGNpM7fu0Y8b/z16pXLnFxZYvWhd3fhBY9DLmC6Q==

View File

@@ -0,0 +1,185 @@
use crate::{
chips::pedersen_commit::PedersenCommitChip,
transcript::HopliteTranscript,
{FpChip, Fq, FqChip},
};
use halo2_base::{utils::PrimeField, Context};
use halo2_ecc::bigint::CRTInteger;
use halo2_ecc::ecc::{EcPoint, EccChip};
use halo2_ecc::fields::FieldChip;
use halo2_proofs::circuit::Value;
use hoplite::{
circuit_vals::{CVDotProdProof, ToCircuitVal},
commitments::MultiCommitGens,
};
use libspartan::transcript::{ProofTranscript, Transcript};
use super::{
secq256k1::Secq256k1Chip,
utils::{Assign, AssignArray},
};
#[derive(Clone, Debug)]
pub struct AssignedZKDotProdProof<'v, const DIMENSION: usize, F: PrimeField> {
pub delta: EcPoint<F, CRTInteger<'v, F>>,
pub beta: EcPoint<F, CRTInteger<'v, F>>,
pub z: [CRTInteger<'v, F>; DIMENSION],
pub z_delta: CRTInteger<'v, F>,
pub z_beta: CRTInteger<'v, F>,
}
impl<'v, const DIMENSION: usize, F: PrimeField>
Assign<'v, F, AssignedZKDotProdProof<'v, DIMENSION, F>> for CVDotProdProof<DIMENSION>
{
fn assign(
&self,
ctx: &mut Context<'v, F>,
secq_chip: &Secq256k1Chip<F>,
) -> AssignedZKDotProdProof<'v, DIMENSION, F> {
let beta = self.beta.assign(ctx, secq_chip);
let delta = self.delta.assign(ctx, secq_chip);
let z: [CRTInteger<'v, F>; DIMENSION] = self
.z
.iter()
.map(|z_i| z_i.assign(ctx, secq_chip))
.collect::<Vec<CRTInteger<'v, F>>>()
.try_into()
.unwrap();
let z_beta = self.z_beta.assign(ctx, secq_chip);
let z_delta = self.z_delta.assign(ctx, secq_chip);
AssignedZKDotProdProof {
beta,
delta,
z,
z_beta,
z_delta,
}
}
}
#[derive(Clone)]
pub struct ZKDotProdChip<const DIMENSION: usize, F: PrimeField> {
pub ecc_chip: EccChip<F, FpChip<F>>,
pub fq_chip: FqChip<F>,
pub pedersen_chip: PedersenCommitChip<F>,
window_bits: usize,
}
impl<const DIMENSION: usize, F: PrimeField> ZKDotProdChip<DIMENSION, F> {
pub fn construct(
ecc_chip: EccChip<F, FpChip<F>>,
fq_chip: FqChip<F>,
pedersen_chip: PedersenCommitChip<F>,
) -> Self {
Self {
ecc_chip,
fq_chip,
pedersen_chip,
window_bits: 4,
}
}
fn dot_prod<'v>(
&self,
ctx: &mut Context<'v, F>,
a: &[CRTInteger<'v, F>],
b: &[CRTInteger<'v, F>],
) -> CRTInteger<'v, F> {
let mut sum = self
.fq_chip
.load_private(ctx, FqChip::<F>::fe_to_witness(&Value::known(Fq::zero())));
// Implement this
for i in 0..a.len() {
let prod_no_carry = self.fq_chip.mul_no_carry(ctx, &a[i], &b[i]);
let sum_no_carry = self.fq_chip.add_no_carry(ctx, &sum, &prod_no_carry);
sum = self.fq_chip.carry_mod(ctx, &sum_no_carry);
}
sum
}
pub fn verify<'v>(
&self,
ctx: &mut Context<'v, F>,
tau: &EcPoint<F, CRTInteger<'v, F>>,
a: [CRTInteger<'v, F>; DIMENSION],
com_poly: &EcPoint<F, CRTInteger<'v, F>>,
proof: &AssignedZKDotProdProof<'v, DIMENSION, F>,
gens_1: &MultiCommitGens,
gens_n: &MultiCommitGens,
transcript: &mut Transcript,
) {
transcript.append_protocol_name(b"dot product proof");
transcript.append_circuit_point(b"Cx", com_poly.clone());
transcript.append_circuit_point(b"Cy", tau.clone());
transcript.append_message(b"a", b"begin_append_vector");
// TODO: Implement this in a trait
for a_i_val in &a {
let mut a_i = [0u8; 32];
a_i_val.clone().value.and_then(|val| {
let mut a_i_bytes = val.to_bytes_be().1;
a_i_bytes.resize(32, 0);
a_i_bytes.reverse();
a_i = a_i_bytes.try_into().unwrap();
Value::known(val)
});
transcript.append_message(b"a", &a_i);
}
transcript.append_message(b"a", b"end_append_vector");
transcript.append_circuit_point(b"delta", (&proof.delta).clone());
transcript.append_circuit_point(b"beta", (&proof.beta).clone());
let max_bits = self.fq_chip.limb_bits;
let c = transcript.challenge_scalar(b"c");
let c = self.fq_chip.load_private(
ctx,
FqChip::<F>::fe_to_witness(&Value::known(c.to_circuit_val())),
);
// (13)
let epsilon_c = self.ecc_chip.scalar_mult(
ctx,
&com_poly,
&c.truncation.limbs,
max_bits,
self.window_bits,
);
// (epsilon * c) + delta
let lhs = self
.ecc_chip
.add_unequal(ctx, &epsilon_c, &proof.delta, true);
// com(z, z_delta)
let rhs = self
.pedersen_chip
.multi_commit(ctx, &proof.z, &proof.z_delta, &gens_n);
self.ecc_chip.assert_equal(ctx, &lhs, &rhs);
// (14)
let tau_c = self
.ecc_chip
.scalar_mult(ctx, &tau, &c.truncation.limbs, max_bits, 4);
// (tau * c) + beta
let lhs = self.ecc_chip.add_unequal(ctx, &tau_c, &proof.beta, true);
let a_dot_z = self.dot_prod(ctx, &a, &proof.z);
// com((a ・ z), z_beta)
let rhs = self
.pedersen_chip
.commit(ctx, &a_dot_z, &proof.z_beta, &gens_1);
self.ecc_chip.assert_equal(ctx, &lhs, &rhs);
}
}

View File

@@ -0,0 +1,31 @@
use crate::FpChip;
use halo2_base::{utils::PrimeField, Context};
use halo2_ecc::{bigint::CRTInteger, fields::FieldChip};
use num_bigint::BigUint;
use num_traits::Zero;
pub struct EvalMLPolyChip<F: PrimeField, const N_VARS: usize> {
pub fp_chip: FpChip<F>,
}
impl<'v, F: PrimeField, const N_VARS: usize> EvalMLPolyChip<F, N_VARS> {
pub fn construct(fp_chip: FpChip<F>) -> Self {
Self { fp_chip }
}
pub fn eval(
&self,
ctx: &mut Context<'v, F>,
coeffs: &[CRTInteger<'v, F>; N_VARS],
vals: &[CRTInteger<'v, F>; N_VARS],
) -> CRTInteger<'v, F> {
let mut acc = self.fp_chip.load_constant(ctx, BigUint::zero());
for (coeff, val) in coeffs.iter().zip(vals.iter()) {
let term = self.fp_chip.mul(ctx, coeff, val);
acc = self.fp_chip.add_no_carry(ctx, &term, &acc);
self.fp_chip.carry_mod(ctx, &acc);
}
acc
}
}

View File

@@ -0,0 +1,12 @@
pub mod dotprod;
pub mod eval_poly;
pub mod pedersen_commit;
pub mod poly_eval_proof;
pub mod proof_bullet_reduce;
pub mod proof_log_of_dotprod;
pub mod proof_of_eq;
pub mod proof_of_opening;
pub mod proof_of_prod;
pub mod secq256k1;
pub mod sumcheck;
pub mod utils;

View File

@@ -0,0 +1,97 @@
use crate::FpChip;
use halo2_base::{utils::PrimeField, Context};
use halo2_ecc::bigint::CRTInteger;
use halo2_ecc::ecc::{fixed_base, EcPoint, EccChip};
use hoplite::commitments::MultiCommitGens;
use secpq_curves::group::Curve;
#[derive(Clone)]
pub struct PedersenCommitChip<F: PrimeField> {
pub ecc_chip: EccChip<F, FpChip<F>>,
pub fp_chip: FpChip<F>,
window_bits: usize,
}
impl<F: PrimeField> PedersenCommitChip<F> {
pub fn construct(ecc_chip: EccChip<F, FpChip<F>>, fp_chip: FpChip<F>) -> Self {
Self {
ecc_chip,
fp_chip,
window_bits: 4,
}
}
pub fn commit<'v>(
&self,
ctx: &mut Context<'v, F>,
x: &CRTInteger<'v, F>,
blinder: &CRTInteger<'v, F>,
gens: &MultiCommitGens,
) -> EcPoint<F, CRTInteger<'v, F>> {
let max_bits = self.fp_chip.limb_bits;
let gx = fixed_base::scalar_multiply(
&self.fp_chip,
ctx,
&gens.G[0].to_affine(),
&x.truncation.limbs,
max_bits,
self.window_bits,
);
let hb = fixed_base::scalar_multiply(
&self.fp_chip,
ctx,
&gens.h.to_affine(),
&blinder.truncation.limbs,
max_bits,
self.window_bits,
);
let com = self.ecc_chip.add_unequal(ctx, &gx, &hb, true);
com
}
pub fn multi_commit<'v>(
&self,
ctx: &mut Context<'v, F>,
x: &[CRTInteger<'v, F>],
blinder: &CRTInteger<'v, F>,
gens: &MultiCommitGens,
) -> EcPoint<F, CRTInteger<'v, F>> {
let max_bits = self.fp_chip.limb_bits;
let mut g_sum = fixed_base::scalar_multiply(
&self.fp_chip,
ctx,
&gens.G[0].to_affine(),
&x[0].truncation.limbs,
max_bits,
self.window_bits,
);
for (i, x_i) in x[1..].iter().enumerate() {
let g = fixed_base::scalar_multiply(
&self.fp_chip,
ctx,
&gens.G[i + 1].to_affine(),
&x_i.truncation.limbs,
max_bits,
self.window_bits,
);
g_sum = self.ecc_chip.add_unequal(ctx, &g_sum, &g, true);
}
let hb = fixed_base::scalar_multiply(
&self.fp_chip,
ctx,
&gens.h.to_affine(),
&blinder.truncation.limbs,
max_bits,
self.window_bits,
);
let com = self.ecc_chip.add_unequal(ctx, &g_sum, &hb, true);
com
}
}

View File

@@ -0,0 +1,140 @@
use super::{
proof_log_of_dotprod::{AssignedDotProductProofLog, ProofLogOfDotProdChip},
utils::{Assign, AssignArray},
};
use crate::chips::{proof_bullet_reduce::AssignedBulletReductionProof, secq256k1::Secq256k1Chip};
use halo2_base::{utils::PrimeField, Context};
use halo2_ecc::bigint::CRTInteger;
use halo2_ecc::ecc::EcPoint;
use hoplite::{circuit_vals::CVPolyEvalProof, commitments::MultiCommitGens};
use libspartan::transcript::{ProofTranscript, Transcript};
use secpq_curves::{
group::{Curve, Group},
Secq256k1,
};
pub trait AssignN<'v, F: PrimeField, const N: usize> {
fn assign(
&self,
ctx: &mut Context<'v, F>,
secq_chip: &Secq256k1Chip<F>,
) -> AssignedPolyEvalProof<'v, F, N>;
}
pub struct AssignedPolyEvalProof<'v, F: PrimeField, const N: usize> {
pub proof: AssignedDotProductProofLog<'v, F, N>,
}
impl<'v, F: PrimeField, const N: usize> AssignN<'v, F, N> for CVPolyEvalProof<N> {
fn assign(
&self,
ctx: &mut Context<'v, F>,
secq_chip: &Secq256k1Chip<F>,
) -> AssignedPolyEvalProof<'v, F, N> {
let z1 = self.proof.z1.assign(ctx, secq_chip);
let z2 = self.proof.z2.assign(ctx, secq_chip);
let beta = self.proof.beta.assign(ctx, secq_chip);
let delta = self.proof.delta.assign(ctx, secq_chip);
let L_vec = self
.proof
.bullet_reduction_proof
.L_vec
.assign(ctx, secq_chip);
let R_vec = self
.proof
.bullet_reduction_proof
.R_vec
.assign(ctx, secq_chip);
let bullet_reduction_proof = AssignedBulletReductionProof { L_vec, R_vec };
let proof = AssignedDotProductProofLog {
bullet_reduction_proof,
delta,
beta,
z1,
z2,
};
AssignedPolyEvalProof { proof }
}
}
pub struct PolyEvalProofChip<F: PrimeField, const N: usize, const N_HALF: usize> {
pub secq_chip: Secq256k1Chip<F>,
pub proof_log_dotprod_chip: ProofLogOfDotProdChip<F, N, N_HALF>,
pub window_bits: usize,
}
impl<'v, F: PrimeField, const N: usize, const N_HALF: usize> PolyEvalProofChip<F, N, N_HALF> {
pub fn construct(
secq_chip: Secq256k1Chip<F>,
proof_log_dotprod_chip: ProofLogOfDotProdChip<F, N, N_HALF>,
window_bits: usize,
) -> Self {
Self {
secq_chip,
proof_log_dotprod_chip,
window_bits,
}
}
pub fn verify(
&self,
ctx: &mut Context<'v, F>,
r: &[CRTInteger<'v, F>; N],
C_Zr: &EcPoint<F, CRTInteger<'v, F>>,
comm_polys: &[EcPoint<F, CRTInteger<'v, F>>; N],
proof: AssignedPolyEvalProof<'v, F, N_HALF>,
gens_1: &MultiCommitGens,
gens_n: &MultiCommitGens,
transcript: &mut Transcript,
) {
let limbs_bits = self.secq_chip.ecc_chip.field_chip.limb_bits;
transcript.append_protocol_name(b"polynomial evaluation proof");
// Evaluate the eq poly over the boolean hypercube bounded to r
let r_left = &r[0..N / 2];
let r_right = &r[N / 2..];
// TODO: IMplement the evals() constraint
// L = evals(r_left);
// R = evals(r_right);
let L = r_left;
let R = r_right;
// L * r_left;
let mut C_LZ = self
.secq_chip
.ecc_chip
.assign_constant_point(ctx, Secq256k1::identity().to_affine());
for i in 0..comm_polys.len() {
let comm_poly_L = self.secq_chip.ecc_chip.scalar_mult(
ctx,
&comm_polys[i],
&L[i].truncation.limbs,
limbs_bits,
self.window_bits,
);
C_LZ = self
.secq_chip
.ecc_chip
.add_unequal(ctx, &comm_poly_L, &C_LZ, true);
}
self.proof_log_dotprod_chip.verify(
ctx,
R.try_into().unwrap(),
&C_LZ,
&C_Zr,
&proof.proof,
&gens_1,
&gens_n,
transcript,
);
}
}

View File

@@ -0,0 +1,177 @@
use super::utils::{Assign, AssignArray};
use crate::{chips::secq256k1::Secq256k1Chip, transcript::HopliteTranscript, Fq};
use halo2_base::{utils::PrimeField, Context};
use halo2_ecc::ecc::EcPoint;
use halo2_ecc::fields::FieldChip;
use halo2_ecc::{bigint::CRTInteger, ecc::fixed_base};
use hoplite::{
circuit_vals::{CVProductProof, FromCircuitVal, ToCircuitVal},
commitments::MultiCommitGens,
};
use libspartan::{
group::CompressedGroup,
transcript::{ProofTranscript, Transcript},
};
use num_bigint::BigUint;
use num_traits::identities::Zero;
use secpq_curves::group::{Curve, Group};
use secpq_curves::Secq256k1;
use super::pedersen_commit::PedersenCommitChip;
#[derive(Clone)]
pub struct AssignedBulletReductionProof<'v, F: PrimeField, const N: usize> {
pub L_vec: [EcPoint<F, CRTInteger<'v, F>>; N],
pub R_vec: [EcPoint<F, CRTInteger<'v, F>>; N],
}
#[derive(Clone)]
pub struct BulletReduceChip<F: PrimeField, const N: usize> {
pub secq_chip: Secq256k1Chip<F>,
pub pedersen_chip: PedersenCommitChip<F>,
pub window_bits: usize,
}
impl<'v, F: PrimeField, const N: usize> BulletReduceChip<F, N> {
pub fn construct(
secq_chip: Secq256k1Chip<F>,
pedersen_chip: PedersenCommitChip<F>,
window_bits: usize,
) -> Self {
Self {
secq_chip,
pedersen_chip,
window_bits,
}
}
fn batch_invert(&self, ctx: &mut Context<'v, F>, a: [CRTInteger<'v, F>; N]) {}
pub fn verify(
&self,
ctx: &mut Context<'v, F>,
upsilon: &EcPoint<F, CRTInteger<'v, F>>, // The upsilon calculated in this func should equal this
a_L: &[CRTInteger<'v, F>; N],
a_R: &[CRTInteger<'v, F>; N],
upsilon_L: &[EcPoint<F, CRTInteger<'v, F>>; N],
upsilon_R: &[EcPoint<F, CRTInteger<'v, F>>; N],
G_L: &[Secq256k1; N],
G_R: &[Secq256k1; N],
transcript: &mut Transcript,
) -> (
EcPoint<F, CRTInteger<'v, F>>,
CRTInteger<'v, F>,
EcPoint<F, CRTInteger<'v, F>>,
) {
let limb_bits = self.secq_chip.ecc_chip.field_chip.limb_bits;
// #####
// 1: Compute the verification scalars
// #####
// Compute challenges
let mut challenges = Vec::with_capacity(N);
for (L, R) in upsilon_L.iter().zip(upsilon_R.iter()) {
transcript.append_circuit_point(b"L", L.clone());
transcript.append_circuit_point(b"R", R.clone());
let c_i = transcript.challenge_scalar(b"u");
let c_i = Some(c_i.to_circuit_val()).assign(ctx, &self.secq_chip);
challenges.push(c_i);
}
let challenges_inv = challenges.clone();
// 2. Compute the invert of the challenges
// TODO: Compute the invert!
// Scalar::batch_invert(&mut challenges_inv);
// 3. Compute the square of the challenges
let mut challenges_sq = vec![];
for c in challenges.clone() {
let c_i_squared = self.secq_chip.fq_chip.mul(ctx, &c, &c);
challenges_sq.push(c_i_squared.clone());
}
let mut challenges_inv_sq = vec![];
for c in challenges_inv.clone() {
let c_i_squared = self.secq_chip.fq_chip.mul(ctx, &c, &c);
challenges_inv_sq.push(c_i_squared.clone());
}
let mut upsilon_hat = self
.secq_chip
.ecc_chip
.assign_constant_point(ctx, Secq256k1::identity().to_affine());
for i in 0..N {
let p_i_l = self.secq_chip.ecc_chip.scalar_mult(
ctx,
&upsilon_L[i],
&challenges_sq[i].truncation.limbs,
limb_bits,
4,
);
let p_i_r = self.secq_chip.ecc_chip.scalar_mult(
ctx,
&upsilon_R[i],
&challenges_inv_sq[i].truncation.limbs,
limb_bits,
4,
);
let p_i = self
.secq_chip
.ecc_chip
.add_unequal(ctx, &p_i_l, &p_i_r, true);
upsilon_hat = self
.secq_chip
.ecc_chip
.add_unequal(ctx, &p_i, &upsilon_hat, true);
}
let mut a_hat = self.secq_chip.fq_chip.load_constant(ctx, BigUint::zero());
for i in 0..N {
let a_i_l = self.secq_chip.fq_chip.mul(ctx, &a_L[i], &challenges_inv[i]);
let a_i_r = self.secq_chip.fq_chip.mul(ctx, &a_R[i], &challenges[i]);
let a_i_no_carry = self.secq_chip.fq_chip.add_no_carry(ctx, &a_i_l, &a_i_r);
let a_i = self.secq_chip.fq_chip.carry_mod(ctx, &a_i_no_carry);
let a_hat_no_carry = self.secq_chip.fq_chip.add_no_carry(ctx, &a_i, &a_hat);
a_hat = self.secq_chip.fq_chip.carry_mod(ctx, &a_hat_no_carry);
}
let mut g_hat = self
.secq_chip
.ecc_chip
.assign_constant_point(ctx, Secq256k1::identity().to_affine());
for i in 0..N {
let g_i_l = fixed_base::scalar_multiply(
&self.secq_chip.ecc_chip.field_chip,
ctx,
&G_L[i].to_affine(),
&challenges_inv[i].truncation.limbs,
limb_bits,
self.window_bits,
);
let g_i_r = fixed_base::scalar_multiply(
&self.secq_chip.ecc_chip.field_chip,
ctx,
&G_R[i].to_affine(),
&challenges[i].truncation.limbs,
limb_bits,
self.window_bits,
);
let g_i = self
.secq_chip
.ecc_chip
.add_unequal(ctx, &g_i_l, &g_i_r, true);
g_hat = self.secq_chip.ecc_chip.add_unequal(ctx, &g_i, &g_hat, true);
}
(upsilon_hat, a_hat, g_hat)
}
}

View File

@@ -0,0 +1,150 @@
use crate::{chips::proof_bullet_reduce::BulletReduceChip, transcript::HopliteTranscript};
use halo2_base::{utils::PrimeField, Context};
use halo2_ecc::ecc::EcPoint;
use halo2_ecc::{bigint::CRTInteger, ecc::fixed_base};
use hoplite::{circuit_vals::ToCircuitVal, commitments::MultiCommitGens};
use libspartan::transcript::{ProofTranscript, Transcript};
use super::{
proof_bullet_reduce::AssignedBulletReductionProof, secq256k1::Secq256k1Chip, utils::Assign,
};
use secpq_curves::group::Curve;
pub struct AssignedDotProductProofLog<'v, F: PrimeField, const N: usize> {
pub bullet_reduction_proof: AssignedBulletReductionProof<'v, F, N>,
pub delta: EcPoint<F, CRTInteger<'v, F>>,
pub beta: EcPoint<F, CRTInteger<'v, F>>,
pub z1: CRTInteger<'v, F>,
pub z2: CRTInteger<'v, F>,
}
#[derive(Clone)]
pub struct ProofLogOfDotProdChip<F: PrimeField, const N: usize, const N_HALF: usize> {
pub secq_chip: Secq256k1Chip<F>,
pub bullet_reduce_chip: BulletReduceChip<F, N_HALF>,
pub window_bits: usize,
}
impl<'v, F: PrimeField, const N: usize, const N_HALF: usize> ProofLogOfDotProdChip<F, N, N_HALF> {
pub fn construct(
secq_chip: Secq256k1Chip<F>,
bullet_reduce_chip: BulletReduceChip<F, N_HALF>,
window_bits: usize,
) -> Self {
Self {
secq_chip,
bullet_reduce_chip,
window_bits,
}
}
pub fn verify(
&self,
ctx: &mut Context<'v, F>,
a: &[CRTInteger<'v, F>; N],
Cx: &EcPoint<F, CRTInteger<'v, F>>,
Cy: &EcPoint<F, CRTInteger<'v, F>>,
proof: &AssignedDotProductProofLog<'v, F, N_HALF>,
gens_1: &MultiCommitGens,
gens_n: &MultiCommitGens,
transcript: &mut Transcript,
) {
let limb_bits = self.secq_chip.ecc_chip.field_chip.limb_bits;
transcript.append_protocol_name(b"dot product proof (log)");
transcript.append_circuit_point(b"Cx", Cx.clone());
transcript.append_circuit_point(b"Cy", Cy.clone());
transcript.append_message(b"a", b"begin_append_vector");
for a_i in a {
transcript.append_circuit_fq(b"a", a_i.clone());
}
transcript.append_message(b"a", b"end_append_vector");
// Upsilon
let Gamma = self.secq_chip.ecc_chip.add_unequal(ctx, &Cx, &Cy, true);
let a_L = a[0..N_HALF].try_into().unwrap();
let a_R = a[N_HALF..].try_into().unwrap();
let G_L = &gens_n.G[0..N_HALF].try_into().unwrap();
let G_R = &gens_n.G[N_HALF..].try_into().unwrap();
let bullet_reduction_proof = &proof.bullet_reduction_proof;
let upsilon_L = &bullet_reduction_proof.clone().L_vec.try_into().unwrap();
let upsilon_R = &bullet_reduction_proof.clone().R_vec.try_into().unwrap();
let (Gamma_hat, a_hat, g_hat) = self.bullet_reduce_chip.verify(
ctx, &Gamma, a_L, a_R, upsilon_L, upsilon_R, G_L, G_R, transcript,
);
transcript.append_circuit_point(b"delta", proof.delta.clone());
transcript.append_circuit_point(b"beta", proof.beta.clone());
let c = transcript.challenge_scalar(b"c");
let c = Some(c.to_circuit_val()).assign(ctx, &self.secq_chip);
let Gamma_hat_c = self.secq_chip.ecc_chip.scalar_mult(
ctx,
&Gamma_hat,
&c.truncation.limbs,
limb_bits,
self.window_bits,
);
let Gamma_hat_c_beta =
self.secq_chip
.ecc_chip
.add_unequal(ctx, &Gamma_hat_c, &proof.beta, true);
let lhs_1 = self.secq_chip.ecc_chip.scalar_mult(
ctx,
&Gamma_hat_c_beta,
&a_hat.truncation.limbs,
limb_bits,
self.window_bits,
);
let lhs = self
.secq_chip
.ecc_chip
.add_unequal(ctx, &lhs_1, &proof.delta, true);
let G_a_hat = fixed_base::scalar_multiply(
self.secq_chip.ecc_chip.field_chip(),
ctx,
&gens_1.G[0].to_affine(),
&a_hat.truncation.limbs,
limb_bits,
self.window_bits,
);
let rhs_1 = self
.secq_chip
.ecc_chip
.add_unequal(ctx, &G_a_hat, &g_hat, true);
let rhs_2 = self.secq_chip.ecc_chip.scalar_mult(
ctx,
&rhs_1,
&proof.z1.truncation.limbs,
limb_bits,
self.window_bits,
);
let rhs_3 = fixed_base::scalar_multiply(
self.secq_chip.ecc_chip.field_chip(),
ctx,
&gens_1.h.to_affine(),
&proof.z2.truncation.limbs,
limb_bits,
self.window_bits,
);
let rhs = self
.secq_chip
.ecc_chip
.add_unequal(ctx, &rhs_2, &rhs_3, true);
self.secq_chip.ecc_chip.assert_equal(ctx, &lhs, &rhs);
}
}

View File

@@ -0,0 +1,90 @@
use super::utils::Assign;
use crate::{chips::secq256k1::Secq256k1Chip, transcript::HopliteTranscript};
use halo2_base::{utils::PrimeField, Context};
use halo2_ecc::ecc::EcPoint;
use halo2_ecc::{bigint::CRTInteger, ecc::fixed_base};
use hoplite::{
circuit_vals::{CVEqualityProof, ToCircuitVal},
commitments::MultiCommitGens,
};
use libspartan::transcript::{ProofTranscript, Transcript};
use secpq_curves::group::Curve;
pub struct AssignedProofOfEq<'v, F: PrimeField> {
pub alpha: EcPoint<F, CRTInteger<'v, F>>,
pub z: CRTInteger<'v, F>,
}
impl<'v, F: PrimeField> Assign<'v, F, AssignedProofOfEq<'v, F>> for CVEqualityProof {
fn assign(
&self,
ctx: &mut Context<'v, F>,
secq_chip: &Secq256k1Chip<F>,
) -> AssignedProofOfEq<'v, F> {
let alpha = self.alpha.assign(ctx, secq_chip);
let z = self.z.assign(ctx, secq_chip);
AssignedProofOfEq { alpha, z }
}
}
pub struct ProofOfEqChip<F: PrimeField> {
pub secq_chip: Secq256k1Chip<F>,
pub window_bits: usize,
}
impl<'v, F: PrimeField> ProofOfEqChip<F> {
pub fn construct(secq_chip: Secq256k1Chip<F>, window_bits: usize) -> Self {
Self {
secq_chip,
window_bits,
}
}
pub fn verify(
&self,
ctx: &mut Context<'v, F>,
C1: &EcPoint<F, CRTInteger<'v, F>>,
C2: &EcPoint<F, CRTInteger<'v, F>>,
proof: AssignedProofOfEq<'v, F>,
gens_n: &MultiCommitGens,
transcript: &mut Transcript,
) {
let limb_bits = self.secq_chip.ecc_chip.field_chip.limb_bits;
let window_bits = self.window_bits;
transcript.append_protocol_name(b"equality proof");
transcript.append_circuit_point(b"C1", C1.clone());
transcript.append_circuit_point(b"C2", C2.clone());
transcript.append_circuit_point(b"alpha", (&proof.alpha).clone());
let lhs = fixed_base::scalar_multiply(
&self.secq_chip.ecc_chip.field_chip,
ctx,
&gens_n.h.to_affine(),
&proof.z.truncation.limbs,
limb_bits,
window_bits,
);
let c = transcript.challenge_scalar(b"c");
let c = Some(c.to_circuit_val()).assign(ctx, &self.secq_chip);
let C1_minus_C2 = self.secq_chip.ecc_chip.sub_unequal(ctx, &C1, &C2, true);
let C1_minus_C2_c = self.secq_chip.ecc_chip.scalar_mult(
ctx,
&C1_minus_C2,
&c.truncation.limbs,
limb_bits,
window_bits,
);
let rhs = self
.secq_chip
.ecc_chip
.add_unequal(ctx, &C1_minus_C2_c, &proof.alpha, true);
self.secq_chip.ecc_chip.assert_equal(ctx, &lhs, &rhs);
}
}

View File

@@ -0,0 +1,97 @@
use super::utils::Assign;
use crate::{
chips::secq256k1::Secq256k1Chip,
transcript::HopliteTranscript,
{FpChip, FqChip},
};
use halo2_base::{utils::PrimeField, Context};
use halo2_ecc::bigint::CRTInteger;
use halo2_ecc::ecc::{EcPoint, EccChip};
use halo2_ecc::fields::FieldChip;
use halo2_proofs::circuit::Value;
use hoplite::{
circuit_vals::{CVKnowledgeProof, ToCircuitVal},
commitments::MultiCommitGens,
};
use libspartan::transcript::{ProofTranscript, Transcript};
use super::pedersen_commit::PedersenCommitChip;
pub struct AssignedProofOfOpening<'v, F: PrimeField> {
pub alpha: EcPoint<F, CRTInteger<'v, F>>,
pub z1: CRTInteger<'v, F>,
pub z2: CRTInteger<'v, F>,
}
impl<'v, F: PrimeField> Assign<'v, F, AssignedProofOfOpening<'v, F>> for CVKnowledgeProof {
fn assign(
&self,
ctx: &mut Context<'v, F>,
secq_chip: &Secq256k1Chip<F>,
) -> AssignedProofOfOpening<'v, F> {
let alpha = self.alpha.assign(ctx, secq_chip);
let z1 = self.z1.assign(ctx, secq_chip);
let z2 = self.z2.assign(ctx, secq_chip);
AssignedProofOfOpening { alpha, z1, z2 }
}
}
pub struct ZKKnowledgeProofChip<F: PrimeField> {
pub ecc_chip: EccChip<F, FpChip<F>>,
pub fp_chip: FpChip<F>,
pub fq_chip: FqChip<F>,
pub pedersen_chip: PedersenCommitChip<F>,
pub window_bits: usize,
}
impl<'v, F: PrimeField> ZKKnowledgeProofChip<F> {
pub fn construct(
ecc_chip: EccChip<F, FpChip<F>>,
fp_chip: FpChip<F>,
fq_chip: FqChip<F>,
pedersen_chip: PedersenCommitChip<F>,
window_bits: usize,
) -> Self {
Self {
ecc_chip,
fp_chip,
fq_chip,
pedersen_chip,
window_bits,
}
}
pub fn verify(
&self,
ctx: &mut Context<'v, F>,
C: &EcPoint<F, CRTInteger<'v, F>>,
proof: AssignedProofOfOpening<'v, F>,
gens_n: &MultiCommitGens,
transcript: &mut Transcript,
) {
let limb_bits = self.fp_chip.limb_bits;
transcript.append_protocol_name(b"knowledge proof");
let alpha = &proof.alpha;
transcript.append_circuit_point(b"C", C.clone());
transcript.append_circuit_point(b"alpha", alpha.clone());
let c = &transcript.challenge_scalar(b"c");
let c = self.fq_chip.load_private(
ctx,
FqChip::<F>::fe_to_witness(&Value::known(c.to_circuit_val())),
);
let lhs = self.pedersen_chip.commit(ctx, &proof.z1, &proof.z2, gens_n);
let C_mult_c =
self.ecc_chip
.scalar_mult(ctx, C, &c.truncation.limbs, limb_bits, self.window_bits);
let rhs = self.ecc_chip.add_unequal(ctx, &C_mult_c, &alpha, true);
self.ecc_chip.assert_equal(ctx, &lhs, &rhs);
}
}

View File

@@ -0,0 +1,162 @@
use super::utils::{Assign, AssignArray};
use crate::{chips::secq256k1::Secq256k1Chip, transcript::HopliteTranscript};
use halo2_base::{utils::PrimeField, Context};
use halo2_ecc::ecc::EcPoint;
use halo2_ecc::{bigint::CRTInteger, ecc::fixed_base};
use hoplite::{
circuit_vals::{CVProductProof, ToCircuitVal},
commitments::MultiCommitGens,
};
use libspartan::transcript::{ProofTranscript, Transcript};
use secpq_curves::group::Curve;
use super::pedersen_commit::PedersenCommitChip;
pub struct AssignedProofOfProd<'v, F: PrimeField> {
pub alpha: EcPoint<F, CRTInteger<'v, F>>,
pub beta: EcPoint<F, CRTInteger<'v, F>>,
pub delta: EcPoint<F, CRTInteger<'v, F>>,
pub z: [CRTInteger<'v, F>; 5],
}
impl<'v, F: PrimeField> Assign<'v, F, AssignedProofOfProd<'v, F>> for CVProductProof {
fn assign(
&self,
ctx: &mut Context<'v, F>,
secq_chip: &Secq256k1Chip<F>,
) -> AssignedProofOfProd<'v, F> {
let alpha = self.alpha.assign(ctx, secq_chip);
let beta = self.beta.assign(ctx, secq_chip);
let delta = self.delta.assign(ctx, secq_chip);
let z = self.z.assign(ctx, secq_chip);
AssignedProofOfProd {
alpha,
beta,
delta,
z,
}
}
}
pub struct ProofOfProdChip<F: PrimeField> {
pub secq_chip: Secq256k1Chip<F>,
pub pedersen_chip: PedersenCommitChip<F>,
pub window_bits: usize,
}
impl<'v, F: PrimeField> ProofOfProdChip<F> {
pub fn construct(
secq_chip: Secq256k1Chip<F>,
pedersen_chip: PedersenCommitChip<F>,
window_bits: usize,
) -> Self {
Self {
secq_chip,
pedersen_chip,
window_bits,
}
}
pub fn verify(
&self,
ctx: &mut Context<'v, F>,
X: &EcPoint<F, CRTInteger<'v, F>>,
Y: &EcPoint<F, CRTInteger<'v, F>>,
Z: &EcPoint<F, CRTInteger<'v, F>>,
proof: AssignedProofOfProd<'v, F>,
gens_n: &MultiCommitGens,
transcript: &mut Transcript,
) {
let limb_bits = self.secq_chip.ecc_chip.field_chip.limb_bits;
let window_bits = self.window_bits;
transcript.append_protocol_name(b"product proof");
transcript.append_circuit_point(b"X", X.clone());
transcript.append_circuit_point(b"Y", Y.clone());
transcript.append_circuit_point(b"Z", Z.clone());
transcript.append_circuit_point(b"alpha", (&proof.alpha).clone());
transcript.append_circuit_point(b"beta", (&proof.beta).clone());
transcript.append_circuit_point(b"delta", (&proof.delta).clone());
let c = transcript.challenge_scalar(b"c");
let c = Some(c.to_circuit_val()).assign(ctx, &self.secq_chip);
let z1 = &proof.z[0];
let z2 = &proof.z[1];
let z3 = &proof.z[2];
let z4 = &proof.z[3];
let z5 = &proof.z[4];
// (7)
let X_c = self.secq_chip.ecc_chip.scalar_mult(
ctx,
X,
&c.truncation.limbs,
limb_bits,
window_bits,
);
let lhs_7 = self
.secq_chip
.ecc_chip
.add_unequal(ctx, &X_c, &proof.alpha, true);
let rhs_7 = self.pedersen_chip.commit(ctx, &z1, &z2, gens_n);
self.secq_chip.ecc_chip.assert_equal(ctx, &lhs_7, &rhs_7);
// (8)
let Y_c = self.secq_chip.ecc_chip.scalar_mult(
ctx,
Y,
&c.truncation.limbs,
limb_bits,
window_bits,
);
let lhs_8 = self
.secq_chip
.ecc_chip
.add_unequal(ctx, &Y_c, &proof.beta, true);
let rhs_8 = self.pedersen_chip.commit(ctx, &z3, &z4, gens_n);
self.secq_chip.ecc_chip.assert_equal(ctx, &lhs_8, &rhs_8);
// (9)
let Z_c = self.secq_chip.ecc_chip.scalar_mult(
ctx,
Z,
&c.truncation.limbs,
limb_bits,
window_bits,
);
let lhs_9 = self
.secq_chip
.ecc_chip
.add_unequal(ctx, &Z_c, &proof.delta, true);
let rhs_9_gx = self.secq_chip.ecc_chip.scalar_mult(
ctx,
X,
&z3.truncation.limbs,
limb_bits,
window_bits,
);
let rhs_9_hb = fixed_base::scalar_multiply(
&self.secq_chip.ecc_chip.field_chip,
ctx,
&gens_n.h.to_affine(),
&z5.truncation.limbs,
limb_bits,
window_bits,
);
let rhs_9 = self
.secq_chip
.ecc_chip
.add_unequal(ctx, &rhs_9_gx, &rhs_9_hb, true);
self.secq_chip.ecc_chip.assert_equal(ctx, &lhs_9, &rhs_9);
}
}

View File

@@ -0,0 +1,81 @@
use super::utils::{Assign, AssignArray};
use crate::{FpChip, Fq, FqChip};
use halo2_base::{utils::PrimeField, Context};
use halo2_ecc::{
bigint::CRTInteger,
ecc::{EcPoint, EccChip},
fields::FieldChip,
};
use halo2_proofs::circuit::Value;
use secpq_curves::Secq256k1;
#[derive(Clone)]
pub struct Secq256k1Chip<F: PrimeField> {
pub ecc_chip: EccChip<F, FpChip<F>>,
pub fq_chip: FqChip<F>,
}
impl<F: PrimeField> Secq256k1Chip<F> {
pub fn construct(ecc_chip: EccChip<F, FpChip<F>>, fq_chip: FqChip<F>) -> Self {
Self { ecc_chip, fq_chip }
}
}
impl<'v, F: PrimeField> Assign<'v, F, EcPoint<F, CRTInteger<'v, F>>> for Option<Secq256k1> {
fn assign(
&self,
ctx: &mut Context<'v, F>,
secq_chip: &Secq256k1Chip<F>,
) -> EcPoint<F, CRTInteger<'v, F>> {
secq_chip.ecc_chip.load_private(
ctx,
(
self.map_or(Value::unknown(), |p| Value::known(p.x)),
self.map_or(Value::unknown(), |p| Value::known(p.y)),
),
)
}
}
impl<'v, F: PrimeField> Assign<'v, F, CRTInteger<'v, F>> for Option<Fq> {
fn assign(&self, ctx: &mut Context<'v, F>, secq_chip: &Secq256k1Chip<F>) -> CRTInteger<'v, F> {
secq_chip.fq_chip.load_private(
ctx,
self.map_or(Value::unknown(), |z| {
FqChip::<F>::fe_to_witness(&Value::known(z))
}),
)
}
}
impl<'v, F: PrimeField, const N: usize> AssignArray<'v, F, CRTInteger<'v, F>, N>
for [Option<Fq>; N]
{
fn assign(
&self,
ctx: &mut Context<'v, F>,
secq_chip: &Secq256k1Chip<F>,
) -> [CRTInteger<'v, F>; N] {
self.iter()
.map(|v| v.assign(ctx, secq_chip))
.collect::<Vec<CRTInteger<'v, F>>>()
.try_into()
.unwrap()
}
}
impl<'v, F: PrimeField, const N: usize> AssignArray<'v, F, EcPoint<F, CRTInteger<'v, F>>, N>
for [Option<Secq256k1>; N]
{
fn assign(
&self,
ctx: &mut Context<'v, F>,
secq_chip: &Secq256k1Chip<F>,
) -> [EcPoint<F, CRTInteger<'v, F>>; N] {
self.iter()
.map(|v| v.assign(ctx, secq_chip))
.collect::<Vec<EcPoint<F, CRTInteger<'v, F>>>>()
.try_into()
.unwrap()
}
}

View File

@@ -0,0 +1,270 @@
use crate::{
chips::{
dotprod::{AssignedZKDotProdProof, ZKDotProdChip},
pedersen_commit::PedersenCommitChip,
secq256k1::Secq256k1Chip,
},
transcript::HopliteTranscript,
{FpChip, Fq, FqChip},
};
use halo2_base::{utils::PrimeField, Context};
use halo2_ecc::bigint::CRTInteger;
use halo2_ecc::ecc::{fixed_base, EcPoint, EccChip};
use halo2_ecc::fields::FieldChip;
use halo2_proofs::circuit::Value;
use hoplite::{
circuit_vals::{CVSumCheckProof, ToCircuitVal},
commitments::MultiCommitGens,
};
use libspartan::transcript::{ProofTranscript, Transcript};
use secpq_curves::group::Group;
use secpq_curves::{group::Curve, Secq256k1};
use super::utils::{Assign, AssignArray};
#[derive(Clone)]
pub struct AssignedZKSumCheck<'v, const N_ROUNDS: usize, const DIMENSION: usize, F: PrimeField> {
pub comm_polys: [EcPoint<F, CRTInteger<'v, F>>; N_ROUNDS],
pub comm_evals: [EcPoint<F, CRTInteger<'v, F>>; N_ROUNDS],
pub proofs: [AssignedZKDotProdProof<'v, DIMENSION, F>; N_ROUNDS],
}
pub trait AssignZKSumCheckProof<'v, const N_ROUNDS: usize, const DIMENSION: usize, F: PrimeField> {
fn assign(
&self,
ctx: &mut Context<'v, F>,
secq_chip: &Secq256k1Chip<F>,
) -> AssignedZKSumCheck<'v, N_ROUNDS, DIMENSION, F>;
}
impl<'v, const N_ROUNDS: usize, const DIMENSION: usize, F: PrimeField>
AssignZKSumCheckProof<'v, N_ROUNDS, DIMENSION, F> for CVSumCheckProof<N_ROUNDS, DIMENSION>
{
fn assign(
&self,
ctx: &mut Context<'v, F>,
secq_chip: &Secq256k1Chip<F>,
) -> AssignedZKSumCheck<'v, N_ROUNDS, DIMENSION, F> {
let comm_evals = self.comm_evals.assign(ctx, secq_chip);
let comm_polys = self.comm_polys.assign(ctx, secq_chip);
let proofs = self
.proofs
.iter()
.map(|proof| proof.assign(ctx, secq_chip))
.collect::<Vec<AssignedZKDotProdProof<'v, DIMENSION, F>>>()
.try_into()
.unwrap();
AssignedZKSumCheck {
comm_evals,
comm_polys,
proofs,
}
}
}
pub struct ZKSumCheckChip<const N_ROUNDS: usize, const DIMENSION: usize, F: PrimeField> {
pub ecc_chip: EccChip<F, FpChip<F>>,
pub fp_chip: FpChip<F>,
pub fq_chip: FqChip<F>,
pub pedersen_chip: PedersenCommitChip<F>,
pub zkdotprod_chip: ZKDotProdChip<DIMENSION, F>,
pub window_bits: usize,
}
impl<const N_ROUNDS: usize, const DIMENSION: usize, F: PrimeField>
ZKSumCheckChip<N_ROUNDS, DIMENSION, F>
{
pub fn construct(
ecc_chip: EccChip<F, FpChip<F>>,
fp_chip: FpChip<F>,
fq_chip: FqChip<F>,
pedersen_chip: PedersenCommitChip<F>,
zkdotprod_chip: ZKDotProdChip<DIMENSION, F>,
) -> Self {
Self {
ecc_chip,
fp_chip,
fq_chip,
pedersen_chip,
zkdotprod_chip,
window_bits: 4,
}
}
pub fn verify<'v>(
&self,
ctx: &mut Context<'v, F>,
proof: &AssignedZKSumCheck<'v, N_ROUNDS, DIMENSION, F>,
gens_n: &MultiCommitGens,
gens_1: &MultiCommitGens,
target_sum: EcPoint<F, CRTInteger<'v, F>>,
target_sum_identity: bool,
transcript: &mut Transcript,
) -> (EcPoint<F, CRTInteger<'v, F>>, [CRTInteger<'v, F>; N_ROUNDS]) {
let limb_bits = self.fp_chip.limb_bits;
let num_limbs = self.fp_chip.num_limbs;
let mut r = vec![];
for i in 0..N_ROUNDS {
// Load claimed_sum
let com_eval = &proof.comm_evals[i];
let com_poly = &proof.comm_polys[i];
transcript.append_circuit_point(b"comm_poly", com_poly.clone());
let r_i = &transcript.challenge_scalar(b"challenge_nextround");
let r_i = self.fp_chip.load_private(
ctx,
FqChip::<F>::fe_to_witness(&Value::known(r_i.to_circuit_val())),
);
r.push(r_i.clone());
let com_round_sum = if i == 0 {
&target_sum
} else {
&proof.comm_evals[i - 1]
};
transcript.append_circuit_point(b"comm_claim_per_round", com_round_sum.clone());
transcript.append_circuit_point(b"comm_eval", com_eval.clone());
// Convert the CRT integer back into native
// Might be easier to use CRT integer in the original implementation as well.
// Need to append bunch of hashes to transcript
// The point should be SEC-1 encoded as well
let w_scalar = transcript.challenge_vector(b"combine_two_claims_to_one", 2);
let w_0: CRTInteger<F> = self.fq_chip.load_private(
ctx,
FqChip::<F>::fe_to_witness(&Value::known(w_scalar[0].to_circuit_val())),
);
let w_1: CRTInteger<F> = self.fq_chip.load_private(
ctx,
FqChip::<F>::fe_to_witness(&Value::known(w_scalar[1].to_circuit_val())),
);
let tau_0 = if target_sum_identity {
fixed_base::scalar_multiply(
&self.fp_chip,
ctx,
&Secq256k1::identity().to_affine(),
&w_0.truncation.limbs,
limb_bits,
self.window_bits,
)
} else {
self.ecc_chip.scalar_mult(
ctx,
&com_round_sum,
&w_0.truncation.limbs,
limb_bits,
self.window_bits,
)
};
let tau_1 = self.ecc_chip.scalar_mult(
ctx,
&com_eval,
&w_1.truncation.limbs,
limb_bits,
self.window_bits,
);
let tau = if target_sum_identity {
tau_1
} else {
self.ecc_chip.add_unequal(ctx, &tau_0, &tau_1, true)
};
let mut a_sc = vec![];
let mut a_eval_base = vec![]; // All ones
let mut a_eval = vec![];
for i in 0..DIMENSION {
// TODO These should be instance column values?
if i == 0 {
a_sc.push(
self.fq_chip.load_private(
ctx,
FqChip::<F>::fe_to_witness(&Value::known(Fq::from(2))),
),
);
} else {
a_sc.push(
self.fq_chip.load_private(
ctx,
FqChip::<F>::fe_to_witness(&Value::known(Fq::from(1))),
),
);
}
}
for _ in 0..DIMENSION {
// TODO These should be instance column values?
a_eval_base.push(
self.fq_chip
.load_private(ctx, FqChip::<F>::fe_to_witness(&Value::known(Fq::from(1)))),
);
}
a_eval.push(
self.fq_chip
.load_private(ctx, FqChip::<F>::fe_to_witness(&Value::known(Fq::from(1)))),
);
for i in 1..DIMENSION {
// TODO These should be instance column values?
if i == 1 {
let a_eval_i_no_carry = self.fq_chip.mul_no_carry(ctx, &a_eval_base[i], &r_i);
let a_eval_i = self.fq_chip.carry_mod(ctx, &a_eval_i_no_carry);
a_eval.push(a_eval_i);
} else {
let a_eval_i_no_carry = self.fq_chip.mul_no_carry(ctx, &a_eval[i - 1], &r_i);
let a_eval_i = self.fq_chip.carry_mod(ctx, &a_eval_i_no_carry);
a_eval.push(a_eval_i);
}
}
let mut a = vec![];
for i in 0..DIMENSION {
let a_i_lhs = self.fq_chip.mul_no_carry(ctx, &a_sc[i], &w_0);
let a_i_rhs = self.fq_chip.mul_no_carry(ctx, &a_eval[i], &w_1);
let a_i_no_carry = self.fq_chip.add_no_carry(ctx, &a_i_lhs, &a_i_rhs);
let a_i = self.fq_chip.carry_mod(ctx, &a_i_no_carry);
a.push(a_i);
}
let zk_dot_prod_chip = ZKDotProdChip::construct(
self.ecc_chip.clone(),
self.fq_chip.clone(),
self.pedersen_chip.clone(),
);
let round_proof: &AssignedZKDotProdProof<DIMENSION, F> = &proof.proofs[i];
zk_dot_prod_chip.verify(
ctx,
&tau,
a.try_into().unwrap(),
com_poly,
round_proof,
gens_1,
gens_n,
transcript,
);
}
self.fp_chip.finalize(ctx);
(
proof.comm_evals[proof.comm_evals.len() - 1].clone(),
r.try_into().unwrap(),
)
}
}

View File

@@ -0,0 +1,10 @@
use super::secq256k1::Secq256k1Chip;
use halo2_base::{utils::PrimeField, Context};
pub trait Assign<'v, F: PrimeField, A> {
fn assign(&self, ctx: &mut Context<'v, F>, secq_chip: &Secq256k1Chip<F>) -> A;
}
pub trait AssignArray<'v, F: PrimeField, A, const N: usize> {
fn assign(&self, ctx: &mut Context<'v, F>, secq_chip: &Secq256k1Chip<F>) -> [A; N];
}

View File

@@ -0,0 +1,765 @@
#![allow(non_snake_case)]
mod chips;
mod transcript;
use chips::{
dotprod::ZKDotProdChip,
eval_poly::EvalMLPolyChip,
pedersen_commit::PedersenCommitChip,
poly_eval_proof::{AssignN, PolyEvalProofChip},
proof_bullet_reduce::BulletReduceChip,
proof_log_of_dotprod::ProofLogOfDotProdChip,
proof_of_eq::ProofOfEqChip,
proof_of_opening::ZKKnowledgeProofChip,
proof_of_prod::ProofOfProdChip,
secq256k1::Secq256k1Chip,
sumcheck::{AssignZKSumCheckProof, ZKSumCheckChip},
utils::{Assign, AssignArray},
};
use halo2_base::utils::{modulus, PrimeField};
use halo2_ecc::fields::FieldChip;
use halo2_ecc::{
ecc::{fixed_base::FixedEcPoint, EccChip},
fields::fp::{FpConfig, FpStrategy},
};
use halo2_proofs::{
circuit::{Layouter, SimpleFloorPlanner},
plonk,
plonk::{Circuit, Column, ConstraintSystem, Instance},
};
use hoplite::circuit_vals::{FromCircuitVal, ToCircuitVal};
use hoplite::{
circuit_vals::{
CVEqualityProof, CVKnowledgeProof, CVPolyCommitment, CVPolyEvalProof, CVProductProof,
CVSumCheckProof,
},
commitments::{Commitments, MultiCommitGens},
};
use libspartan::{
group::CompressedGroup,
transcript::{ProofTranscript, Transcript},
};
use num_bigint::BigUint;
use num_traits::{One, Zero};
use secpq_curves::{group::Curve, Secq256k1};
use transcript::HopliteTranscript;
pub type Fp = secpq_curves::Fq;
pub type Fq = secpq_curves::Fp;
pub type FqChip<F> = FpConfig<F, secpq_curves::Fp>;
pub type FpChip<F> = FpConfig<F, secpq_curves::Fq>;
#[derive(Clone, Debug)]
pub struct HopliteCircuitConfig<F: PrimeField> {
field_config: FpChip<F>,
/// Public inputs
instance: Column<Instance>,
window_bits: usize,
}
// SpartanNIZK verification circuit
pub struct HopliteCircuit<
const NUM_INPUTS: usize,
const NUM_CONSTRAINTS: usize,
const NUM_VARS: usize,
const NUM_VARS_H: usize,
> {
pub inst: Vec<u8>,
pub input: Vec<Fq>,
pub comm_vars: CVPolyCommitment<NUM_VARS>,
pub sc_proof_phase1: CVSumCheckProof<NUM_CONSTRAINTS, 4>,
pub claims_phase2: (
Option<Secq256k1>,
Option<Secq256k1>,
Option<Secq256k1>,
Option<Secq256k1>,
),
pub pok_claims_phase2: (CVKnowledgeProof, CVProductProof),
pub proof_eq_sc_phase1: CVEqualityProof,
pub sc_proof_phase2: CVSumCheckProof<14, 3>,
pub comm_vars_at_ry: Option<Secq256k1>,
pub proof_eval_vars_at_ry: CVPolyEvalProof<NUM_VARS_H>,
pub proof_eq_sc_phase2: CVEqualityProof,
pub gens_sc_1: MultiCommitGens,
pub gens_sc_3: MultiCommitGens,
pub gens_sc_4: MultiCommitGens,
pub gens_pc_1: MultiCommitGens,
pub gens_pc_n: MultiCommitGens,
}
pub struct CircuitParams {
strategy: FpStrategy,
degree: u32,
num_advice: usize,
num_lookup_advice: usize,
num_fixed: usize,
lookup_bits: usize,
limb_bits: usize,
num_limbs: usize,
}
impl<
const NUM_INPUTS: usize,
const NUM_CONSTRAINTS: usize,
const NUM_VARS: usize,
const NUM_VARS_H: usize,
F: PrimeField,
> Circuit<F> for HopliteCircuit<NUM_INPUTS, NUM_CONSTRAINTS, NUM_VARS, NUM_VARS_H>
{
type Config = HopliteCircuitConfig<F>;
type FloorPlanner = SimpleFloorPlanner;
fn configure(meta: &mut ConstraintSystem<F>) -> Self::Config {
let params = CircuitParams {
strategy: FpStrategy::Simple,
degree: 21,
num_advice: 20,
num_lookup_advice: 6,
num_fixed: 1,
lookup_bits: 17,
limb_bits: 88,
num_limbs: 3,
};
let field_config = FpChip::<F>::configure(
meta,
params.strategy,
&[params.num_advice],
&[params.num_lookup_advice],
params.num_fixed,
params.lookup_bits,
params.limb_bits,
params.num_limbs,
modulus::<Fp>(),
0,
params.degree as usize,
);
let instance = meta.instance_column();
meta.enable_equality(instance);
HopliteCircuitConfig {
instance,
field_config,
window_bits: 4,
}
}
fn without_witnesses(&self) -> Self {
HopliteCircuit::<NUM_INPUTS, NUM_CONSTRAINTS, NUM_VARS, NUM_VARS_H> {
comm_vars: CVPolyCommitment::<NUM_VARS>::default(),
inst: vec![],
input: vec![Fq::zero(); NUM_INPUTS],
sc_proof_phase1: CVSumCheckProof::<NUM_CONSTRAINTS, 4>::default(),
claims_phase2: (None, None, None, None),
pok_claims_phase2: (CVKnowledgeProof::default(), CVProductProof::default()),
proof_eq_sc_phase1: CVEqualityProof::default(),
sc_proof_phase2: CVSumCheckProof::<14, 3>::default(),
comm_vars_at_ry: None,
proof_eval_vars_at_ry: CVPolyEvalProof::<NUM_VARS_H>::default(),
proof_eq_sc_phase2: CVEqualityProof::default(),
gens_sc_1: MultiCommitGens::default(),
gens_sc_3: MultiCommitGens::default(),
gens_sc_4: MultiCommitGens::default(),
gens_pc_1: MultiCommitGens::default(),
gens_pc_n: MultiCommitGens::default(),
}
}
fn synthesize(
&self,
config: Self::Config,
mut layouter: impl Layouter<F>,
) -> Result<(), plonk::Error> {
// Scalar mult
let n_rounds = 1;
let fp_chip = config.field_config;
fp_chip.range.load_lookup_table(&mut layouter)?;
// Actually perform the calculation
let limb_bits = fp_chip.limb_bits;
let num_limbs = fp_chip.num_limbs;
let _num_fixed = fp_chip.range.gate.constants.len();
let _lookup_bits = fp_chip.range.lookup_bits;
let _num_advice = fp_chip.range.gate.num_advice;
// We can construct the fp_chip from the config of the fp_chip
// (fp_chip can use the same columns as the fp_chip)
let fq_chip =
FqChip::<F>::construct(fp_chip.range.clone(), limb_bits, num_limbs, modulus::<Fq>());
let ecc_chip = EccChip::construct(fp_chip.clone());
let secq_chip = Secq256k1Chip::construct(ecc_chip.clone(), fq_chip.clone());
let pedersen_chip = PedersenCommitChip::construct(ecc_chip.clone(), fp_chip.clone());
let phase_1_zkdotprod_chip: ZKDotProdChip<4, F> =
ZKDotProdChip::construct(ecc_chip.clone(), fq_chip.clone(), pedersen_chip.clone());
let phase_1_zksumcheck_chip = ZKSumCheckChip::construct(
ecc_chip.clone(),
fp_chip.clone(),
fq_chip.clone(),
pedersen_chip.clone(),
phase_1_zkdotprod_chip.clone(),
);
let knowledge_proof_chip = ZKKnowledgeProofChip::construct(
ecc_chip.clone(),
fp_chip.clone(),
fq_chip.clone(),
pedersen_chip.clone(),
4,
);
let proof_of_prod_chip =
ProofOfProdChip::construct(secq_chip.clone(), pedersen_chip.clone(), 4);
let proof_of_eq_chip = ProofOfEqChip::construct(secq_chip.clone(), 4);
let eval_poly_chip = EvalMLPolyChip::<F, NUM_INPUTS>::construct(fp_chip.clone());
// let mut results = Vec::new();
layouter.assign_region(
|| "",
|region| {
let mut ctx = fp_chip.new_context(region);
let mut transcript = Transcript::new(b"test_verify");
transcript.append_protocol_name(b"Spartan NIZK proof");
transcript.append_message(b"R1CSInstanceDigest", &self.inst);
transcript.append_protocol_name(b"R1CS proof");
// Append input to the transcript
transcript.append_message(b"input", b"begin_append_vector");
for item in &self.input {
transcript.append_message(b"input", &item.to_bytes());
}
transcript.append_message(b"input", b"end_append_vector");
// Append poly commitment to the transcript
transcript.append_message(b"poly_commitment", b"poly_commitment_begin");
for comm_var in self.comm_vars.C {
transcript.append_point(
b"poly_commitment_share",
&CompressedGroup::from_circuit_val(&comm_var.unwrap()),
);
}
transcript.append_message(b"poly_commitment", b"poly_commitment_end");
let phase1_expected_sum = Fq::zero().commit(&Fq::zero(), &self.gens_sc_1);
let phase1_expected_sum =
FixedEcPoint::from_curve(phase1_expected_sum.to_affine(), num_limbs, limb_bits);
let phase1_expected_sum = FixedEcPoint::assign(
phase1_expected_sum,
&fp_chip,
&mut ctx,
fp_chip.native_modulus(),
);
let _tau: Vec<Fq> = transcript
.challenge_vector(b"challenge_tau", n_rounds)
.iter()
.map(|tau_i| tau_i.to_circuit_val())
.collect();
let phase1_sc_proof = self.sc_proof_phase1.assign(&mut ctx, &secq_chip);
let (comm_claim_post_phase1, ry) = phase_1_zksumcheck_chip.verify(
&mut ctx,
&phase1_sc_proof,
&self.gens_sc_4,
&self.gens_sc_1,
phase1_expected_sum,
true,
&mut transcript,
);
// Verify Az * Bz = Cz
let (comm_Az_claim, comm_Bz_claim, comm_Cz_claim, comm_prod_Az_Bz_claims) =
&self.claims_phase2;
let (pok_Cz_claim, proof_prod) = &self.pok_claims_phase2;
let pok_Cz_claim = pok_Cz_claim.assign(&mut ctx, &secq_chip);
let proof_prod = proof_prod.assign(&mut ctx, &secq_chip);
let comm_Cz_claim = comm_Cz_claim.assign(&mut ctx, &secq_chip);
// Assign points
let comm_Az_claim = comm_Az_claim.assign(&mut ctx, &secq_chip);
let comm_Bz_claim = comm_Bz_claim.assign(&mut ctx, &secq_chip);
let comm_prod_Az_Bz_claims = comm_prod_Az_Bz_claims.assign(&mut ctx, &secq_chip);
knowledge_proof_chip.verify(
&mut ctx,
&comm_Cz_claim,
pok_Cz_claim,
&self.gens_sc_1,
&mut transcript,
);
proof_of_prod_chip.verify(
&mut ctx,
&comm_Az_claim,
&comm_Bz_claim,
&comm_Cz_claim,
proof_prod,
&self.gens_sc_1,
&mut transcript,
);
transcript.append_circuit_point(b"comm_Az_claim", comm_Az_claim.clone());
transcript.append_circuit_point(b"comm_Bz_claim", comm_Bz_claim.clone());
transcript.append_circuit_point(b"comm_Cz_claim", comm_Cz_claim.clone());
transcript.append_circuit_point(
b"comm_prod_Az_Bz_claims",
comm_prod_Az_Bz_claims.clone(),
);
// eq_eval
let expected_claim_post_phase1 =
ecc_chip.sub_unequal(&mut ctx, &comm_prod_Az_Bz_claims, &comm_Cz_claim, true);
// eq_tau_rx;
let proof_eq_sc_phase1 = self.proof_eq_sc_phase1.assign(&mut ctx, &secq_chip);
proof_of_eq_chip.verify(
&mut ctx,
&expected_claim_post_phase1,
&comm_claim_post_phase1,
proof_eq_sc_phase1,
&self.gens_sc_1,
&mut transcript,
);
let r_A = transcript.challenge_scalar(b"challenege_Az");
let r_B = transcript.challenge_scalar(b"challenege_Bz");
let r_C = transcript.challenge_scalar(b"challenege_Cz");
let r_A = Some(r_A.to_circuit_val()).assign(&mut ctx, &secq_chip);
let r_B = Some(r_B.to_circuit_val()).assign(&mut ctx, &secq_chip);
let r_C = Some(r_C.to_circuit_val()).assign(&mut ctx, &secq_chip);
// M(r_y) = r_A * comm_Az_claim + r_B * comm_Bz_claim + r_C * comm_Cz_claim;
let r_A_comm_Az = ecc_chip.scalar_mult(
&mut ctx,
&comm_Az_claim,
&r_A.truncation.limbs,
limb_bits,
4,
);
let r_B_comm_Bz = ecc_chip.scalar_mult(
&mut ctx,
&comm_Bz_claim,
&r_B.truncation.limbs,
limb_bits,
4,
);
let r_C_comm_Cz = ecc_chip.scalar_mult(
&mut ctx,
&comm_Cz_claim,
&r_C.truncation.limbs,
limb_bits,
4,
);
let r_AB_comm_ABz =
ecc_chip.add_unequal(&mut ctx, &r_A_comm_Az, &r_B_comm_Bz, true);
let comm_claim_phase2 =
ecc_chip.add_unequal(&mut ctx, &r_AB_comm_ABz, &r_C_comm_Cz, true);
let phase_2_zkdotprod_chip: ZKDotProdChip<3, F> = ZKDotProdChip::construct(
ecc_chip.clone(),
fq_chip.clone(),
pedersen_chip.clone(),
);
let phase_2_zksumcheck_chip = ZKSumCheckChip::construct(
ecc_chip.clone(),
fp_chip.clone(),
fq_chip.clone(),
pedersen_chip.clone(),
phase_2_zkdotprod_chip.clone(),
);
let sc_proof_phase2 = self.sc_proof_phase2.assign(&mut ctx, &secq_chip);
let (comm_claim_post_phase2, ry) = phase_2_zksumcheck_chip.verify(
&mut ctx,
&sc_proof_phase2,
&self.gens_sc_3,
&self.gens_sc_1,
comm_claim_phase2,
false,
&mut transcript,
);
let comm_vars = self.comm_vars.C.assign(&mut ctx, &secq_chip);
let bullet_reduce_chip =
BulletReduceChip::construct(secq_chip.clone(), pedersen_chip.clone(), 4);
let proof_of_log_dotprod_chip = ProofLogOfDotProdChip::construct(
secq_chip.clone(),
bullet_reduce_chip.clone(),
4,
);
let polly_eval_proof_chip = PolyEvalProofChip::construct(
secq_chip.clone(),
proof_of_log_dotprod_chip.clone(),
4,
);
let poly_eval_proof = self.proof_eval_vars_at_ry.assign(&mut ctx, &secq_chip);
let comm_vars_at_ry = self.comm_vars_at_ry.assign(&mut ctx, &secq_chip);
polly_eval_proof_chip.verify(
&mut ctx,
(&ry[1..]).try_into().unwrap(),
&comm_vars_at_ry,
&comm_vars.try_into().unwrap(),
poly_eval_proof,
&self.gens_pc_1,
&self.gens_pc_n,
&mut transcript,
);
// Interpolate the input as a multilinear polynomial and evaluate at ry[1..]
let mut input_with_one: Vec<Fq> = vec![Fq::one()];
input_with_one.extend_from_slice(&self.input);
let mut input_with_one = vec![fp_chip.load_constant(&mut ctx, BigUint::one())];
for i in 1..self.input.len() {
input_with_one.push(fp_chip.load_constant(
&mut ctx,
BigUint::from_bytes_le(&self.input[i].to_bytes()),
));
}
let poly_input_eval = eval_poly_chip.eval(
&mut ctx,
input_with_one.as_slice().try_into().unwrap(),
ry[1..].try_into().unwrap(),
);
let blinder = fp_chip.load_constant(&mut ctx, BigUint::zero());
pedersen_chip.commit(&mut ctx, &poly_input_eval, &blinder, &self.gens_pc_1);
// TODO: TBD
Ok(())
},
)?;
Ok(())
}
}
#[cfg(test)]
#[allow(non_camel_case_types)]
mod tests {
use super::*;
use ark_std::{end_timer, start_timer};
use bincode;
use circuit_reader::load_as_spartan_inst;
use halo2_base::utils::{decompose_biguint, fs::gen_srs};
use halo2_proofs::{dev::MockProver, halo2curves::bn256::Fr};
use halo2_proofs::{
halo2curves::bn256::{Bn256, G1Affine},
plonk::{create_proof, keygen_pk, keygen_vk, verify_proof},
poly::{
commitment::ParamsProver,
kzg::{
commitment::{KZGCommitmentScheme, ParamsVerifierKZG},
multiopen::{ProverSHPLONK, VerifierSHPLONK},
strategy::SingleStrategy,
},
},
transcript::{
Blake2bRead, Blake2bWrite, Challenge255, TranscriptReadBuffer, TranscriptWriterBuffer,
},
};
use hoplite::{circuit_vals::ToCircuitVal, verify_nizk};
use libspartan::{
transcript::Transcript, InputsAssignment, Instance, NIZKGens, VarsAssignment, NIZK,
};
use rand_core::OsRng;
use secpq_curves::group::cofactor::CofactorCurveAffine;
use secpq_curves::Secq256k1Affine;
use std::fs::File;
use std::io::Read;
const NUM_INPUTS: usize = 5;
const NUM_CONSTRAINTS: usize = 8076;
const NUM_VARS: usize = 8097;
type SPARTAN_ECDSA_CIRCUIT = HopliteCircuit<5, 13, 64, 7>;
fn spartan_ecdsa_circuit() -> SPARTAN_ECDSA_CIRCUIT {
let mut proof_file = File::open("./prover/proof.bin").expect("Proof file not found.");
let mut input_file = File::open("./prover/input.bin").expect("Input file not found");
let mut proof = vec![];
let mut input = vec![];
proof_file.read_to_end(&mut proof).unwrap();
input_file.read_to_end(&mut input).unwrap();
let proof: NIZK = bincode::deserialize(&proof).unwrap();
let inst = load_as_spartan_inst(
"../circuits/build/pubkey_membership/pubkey_membership.r1cs".into(),
5,
);
let sc_proof_phase1: CVSumCheckProof<13, 4> =
proof.r1cs_sat_proof.sc_proof_phase1.to_circuit_val();
let r1cs_sat_proof = &proof.r1cs_sat_proof;
let claims_phase2 = &r1cs_sat_proof.claims_phase2;
let mut inputs = Vec::new();
for i in 0..NUM_INPUTS {
inputs.push(input[(i * 32)..((i + 1) * 32)].try_into().unwrap());
}
let assignment_inputs = InputsAssignment::new(&inputs).unwrap();
let input = assignment_inputs
.assignment
.iter()
.map(|x| x.to_circuit_val())
.collect();
let gens = NIZKGens::new(NUM_CONSTRAINTS, NUM_VARS, NUM_INPUTS);
/*
verify_nizk(
&inst,
num_cons,
num_vars,
&assignment_inputs.assignment,
&proof,
&gens,
);
*/
let circuit = SPARTAN_ECDSA_CIRCUIT {
inst: inst.digest,
input,
comm_vars: r1cs_sat_proof.comm_vars.to_circuit_val(),
sc_proof_phase1: sc_proof_phase1,
sc_proof_phase2: r1cs_sat_proof.sc_proof_phase2.to_circuit_val(),
claims_phase2: (
Some(claims_phase2.0.to_circuit_val()),
Some(claims_phase2.1.to_circuit_val()),
Some(claims_phase2.2.to_circuit_val()),
Some(claims_phase2.3.to_circuit_val()),
),
pok_claims_phase2: (
r1cs_sat_proof.pok_claims_phase2.0.to_circuit_val(),
r1cs_sat_proof.pok_claims_phase2.1.to_circuit_val(),
),
proof_eq_sc_phase1: r1cs_sat_proof.proof_eq_sc_phase1.to_circuit_val(),
proof_eq_sc_phase2: r1cs_sat_proof.proof_eq_sc_phase2.to_circuit_val(),
comm_vars_at_ry: Some(r1cs_sat_proof.comm_vars_at_ry.to_circuit_val()),
proof_eval_vars_at_ry: r1cs_sat_proof.proof_eval_vars_at_ry.to_circuit_val(),
gens_pc_1: gens.gens_r1cs_sat.gens_pc.gens.gens_1.into(),
gens_pc_n: gens.gens_r1cs_sat.gens_pc.gens.gens_n.into(),
gens_sc_1: gens.gens_r1cs_sat.gens_sc.gens_1.into(),
gens_sc_3: gens.gens_r1cs_sat.gens_sc.gens_3.into(),
gens_sc_4: gens.gens_r1cs_sat.gens_sc.gens_4.into(),
};
circuit
}
fn tiny_circuit() -> HopliteCircuit<4, 1, 2, 1> {
// parameters of the R1CS instance
let num_cons = 1;
let num_vars = 0;
let num_inputs = 3;
// We will encode the above constraints into three matrices, where
// the coefficients in the matrix are in the little-endian byte order
let mut A: Vec<(usize, usize, [u8; 32])> = Vec::new(); // <row, column, value>
let mut B: Vec<(usize, usize, [u8; 32])> = Vec::new();
let mut C: Vec<(usize, usize, [u8; 32])> = Vec::new();
// Create a^2 + b + 13
A.push((0, num_vars + 2, Fq::one().to_bytes())); // 1*a
B.push((0, num_vars + 2, Fq::one().to_bytes())); // 1*a
C.push((0, num_vars + 1, Fq::one().to_bytes())); // 1*z
C.push((0, num_vars, (-Fq::from(13u64)).to_bytes())); // -13*1
C.push((0, num_vars + 3, (-Fq::one()).to_bytes())); // -1*b
// Var Assignments (Z_0 = 16 is the only output)
let vars = vec![Fq::zero().to_bytes(); num_vars];
// create an InputsAssignment (a = 1, b = 2)
let mut inputs = vec![Fq::zero().to_bytes(); num_inputs];
inputs[0] = Fq::from(16u64).to_bytes();
inputs[1] = Fq::from(1u64).to_bytes();
inputs[2] = Fq::from(2u64).to_bytes();
let assignment_inputs = InputsAssignment::new(&inputs).unwrap();
let assignment_vars = VarsAssignment::new(&vars).unwrap();
// Check if instance is satisfiable
let inst = Instance::new(num_cons, num_vars, num_inputs, &A, &B, &C).unwrap();
let res = inst.is_sat(&assignment_vars, &assignment_inputs);
assert!(res.unwrap(), "should be satisfied");
let gens = NIZKGens::new(num_cons, num_vars, num_inputs);
let mut prover_transcript = Transcript::new(b"test_verify");
let proof = NIZK::prove(
&inst,
assignment_vars,
&assignment_inputs,
&gens,
&mut prover_transcript,
);
verify_nizk::<1, 3>(&inst, &assignment_inputs.assignment, &proof, &gens);
// Verify the phase 1 zk-sumcheck proof
let sc_proof_phase1: CVSumCheckProof<1, 4> =
proof.r1cs_sat_proof.sc_proof_phase1.to_circuit_val();
let r1cs_sat_proof = &proof.r1cs_sat_proof;
let claims_phase2 = &r1cs_sat_proof.claims_phase2;
let input = assignment_inputs
.assignment
.iter()
.map(|x| x.to_circuit_val())
.collect();
let circuit = HopliteCircuit::<4, 1, 2, 1> {
inst: inst.digest,
input,
comm_vars: r1cs_sat_proof.comm_vars.to_circuit_val(),
sc_proof_phase1: sc_proof_phase1,
sc_proof_phase2: r1cs_sat_proof.sc_proof_phase2.to_circuit_val(),
claims_phase2: (
Some(claims_phase2.0.to_circuit_val()),
Some(claims_phase2.1.to_circuit_val()),
Some(claims_phase2.2.to_circuit_val()),
Some(claims_phase2.3.to_circuit_val()),
),
pok_claims_phase2: (
r1cs_sat_proof.pok_claims_phase2.0.to_circuit_val(),
r1cs_sat_proof.pok_claims_phase2.1.to_circuit_val(),
),
proof_eq_sc_phase1: r1cs_sat_proof.proof_eq_sc_phase1.to_circuit_val(),
proof_eq_sc_phase2: r1cs_sat_proof.proof_eq_sc_phase2.to_circuit_val(),
comm_vars_at_ry: Some(r1cs_sat_proof.comm_vars_at_ry.to_circuit_val()),
proof_eval_vars_at_ry: r1cs_sat_proof.proof_eval_vars_at_ry.to_circuit_val(),
gens_pc_1: gens.gens_r1cs_sat.gens_pc.gens.gens_1.into(),
gens_pc_n: gens.gens_r1cs_sat.gens_pc.gens.gens_n.into(),
gens_sc_1: gens.gens_r1cs_sat.gens_sc.gens_1.into(),
gens_sc_3: gens.gens_r1cs_sat.gens_sc.gens_3.into(),
gens_sc_4: gens.gens_r1cs_sat.gens_sc.gens_4.into(),
};
circuit
}
#[test]
fn test_tiny_prove() {
// Convert ZkSumCheckProof into a HopliteCircuit
let circuit = tiny_circuit();
let k = 12;
let prover = MockProver::<Fr>::run(k, &circuit, vec![vec![]]).unwrap();
assert_eq!(prover.verify(), Ok(()));
}
#[test]
fn test_spartan_ecdsa_mock_prove() {
let circuit = spartan_ecdsa_circuit();
let k = 21;
let prover = MockProver::<Fr>::run(k, &circuit, vec![vec![]]).unwrap();
assert_eq!(prover.verify(), Ok(()));
}
#[test]
fn test_spartan_ecdsa_prove() -> Result<(), Box<dyn std::error::Error>> {
let circuit = spartan_ecdsa_circuit();
let params_gen_timer = start_timer!(|| "Parameters generation");
let params = gen_srs(21);
end_timer!(params_gen_timer);
let vkey_gen_timer = start_timer!(|| "Verification key generation");
let vk = keygen_vk(&params, &circuit)?;
end_timer!(vkey_gen_timer);
let pkey_gen_timer = start_timer!(|| "Proving key generation");
let pk = keygen_pk(&params, vk, &circuit)?;
end_timer!(pkey_gen_timer);
let mut rng = OsRng;
let target = Secq256k1Affine::generator() * secpq_curves::Fp::one();
let x_limbs: Vec<Fr> =
decompose_biguint(&BigUint::from_bytes_le(&target.x.to_bytes()), 3, 88);
let y_limbs: Vec<Fr> =
decompose_biguint(&BigUint::from_bytes_le(&target.y.to_bytes()), 3, 88);
let instances = vec![x_limbs, y_limbs].concat();
let mut transcript = Blake2bWrite::<_, _, Challenge255<_>>::init(vec![]);
let proving_timer = start_timer!(|| "Proving");
create_proof::<
KZGCommitmentScheme<Bn256>,
ProverSHPLONK<'_, Bn256>,
Challenge255<G1Affine>,
_,
Blake2bWrite<Vec<u8>, G1Affine, Challenge255<_>>,
_,
>(
&params,
&pk,
&[circuit],
&[&[instances.as_slice()]],
&mut rng,
&mut transcript,
)
.expect("prover should not fail");
let proof = transcript.finalize();
end_timer!(proving_timer);
println!("proof size: {}", proof.len());
let mut verifier_transcript = Blake2bRead::<_, G1Affine, Challenge255<_>>::init(&proof[..]);
let strategy = SingleStrategy::new(&params);
let verifier_params: ParamsVerifierKZG<Bn256> = params.verifier_params().clone();
verify_proof::<
KZGCommitmentScheme<Bn256>,
VerifierSHPLONK<'_, Bn256>,
Challenge255<G1Affine>,
Blake2bRead<&[u8], G1Affine, Challenge255<G1Affine>>,
SingleStrategy<'_, Bn256>,
>(
&verifier_params,
pk.get_vk(),
strategy,
&[&[instances.as_slice()]],
&mut verifier_transcript,
)
.expect("failed to verify bench circuit");
Ok(())
}
}

View File

@@ -0,0 +1,59 @@
use halo2_base::utils::PrimeField;
use halo2_ecc::bigint::CRTInteger;
use halo2_ecc::ecc::EcPoint;
use halo2_proofs::circuit::Value;
use libspartan::{
group::CompressedGroup,
transcript::{ProofTranscript, Transcript},
};
// TODO: Turn this into a transcript chip
pub trait HopliteTranscript<'v, F: PrimeField> {
fn append_circuit_point(&mut self, label: &'static [u8], point: EcPoint<F, CRTInteger<'v, F>>);
fn append_circuit_fq(&mut self, label: &'static [u8], fe: CRTInteger<'v, F>);
}
impl<'v, F: PrimeField> HopliteTranscript<'v, F> for Transcript {
fn append_circuit_point(
&mut self,
label: &'static [u8],
circuit_point: EcPoint<F, CRTInteger<'v, F>>,
) {
let mut x = [0u8; 32];
let _x = circuit_point.x.value.and_then(|val| {
let mut x_bytes = val.to_bytes_be().1;
x_bytes.resize(32, 0);
x = x_bytes.try_into().unwrap();
Value::known(val)
});
let mut y = [0u8; 32];
let _y = circuit_point.y.value.and_then(|val| {
let mut y_bytes = val.to_bytes_be().1;
y_bytes.resize(32, 0);
y = y_bytes.try_into().unwrap();
Value::known(val)
});
let point = if (x == [0u8; 32]) && (y == [0u8; 32]) {
CompressedGroup::identity()
} else {
CompressedGroup::from_affine_coordinates(&x.into(), &y.into(), true)
};
self.append_point(label, &point);
}
fn append_circuit_fq(&mut self, label: &'static [u8], fe: CRTInteger<'v, F>) {
// TODO: Not sure if this works!
let mut bytes = [0u8; 32];
let _ = fe.value.and_then(|val| {
let mut bytes_be = val.to_bytes_be().1;
bytes_be.resize(32, 0);
bytes = bytes_be.try_into().unwrap();
Value::known(val)
});
self.append_message(label, &bytes);
}
}

View File

@@ -1,6 +1,6 @@
{
"name": "@personaelabs/spartan-ecdsa",
"version": "2.1.1",
"version": "2.0.0",
"main": "./build/lib.js",
"types": "./build/lib.d.ts",
"license": "MIT",

View File

@@ -1,62 +1,33 @@
/* tslint:disable */
/* eslint-disable */
/**
*/
*/
export function init_panic_hook(): void;
/**
* @param {Uint8Array} circuit
* @param {Uint8Array} vars
* @param {Uint8Array} public_inputs
* @returns {Uint8Array}
*/
export function prove(
circuit: Uint8Array,
vars: Uint8Array,
public_inputs: Uint8Array
): Uint8Array;
* @param {Uint8Array} circuit
* @param {Uint8Array} vars
* @param {Uint8Array} public_inputs
* @returns {Uint8Array}
*/
export function prove(circuit: Uint8Array, vars: Uint8Array, public_inputs: Uint8Array): Uint8Array;
/**
* @param {Uint8Array} circuit
* @param {Uint8Array} proof
* @param {Uint8Array} public_input
* @returns {boolean}
*/
export function verify(
circuit: Uint8Array,
proof: Uint8Array,
public_input: Uint8Array
): boolean;
* @param {Uint8Array} circuit
* @param {Uint8Array} proof
* @param {Uint8Array} public_input
* @returns {boolean}
*/
export function verify(circuit: Uint8Array, proof: Uint8Array, public_input: Uint8Array): boolean;
/**
* @param {Uint8Array} input_bytes
* @returns {Uint8Array}
*/
* @param {Uint8Array} input_bytes
* @returns {Uint8Array}
*/
export function poseidon(input_bytes: Uint8Array): Uint8Array;
export type InitInput =
| RequestInfo
| URL
| Response
| BufferSource
| WebAssembly.Module;
export type InitInput = RequestInfo | URL | Response | BufferSource | WebAssembly.Module;
export interface InitOutput {
readonly prove: (
a: number,
b: number,
c: number,
d: number,
e: number,
f: number,
g: number
) => void;
readonly verify: (
a: number,
b: number,
c: number,
d: number,
e: number,
f: number,
g: number
) => void;
readonly prove: (a: number, b: number, c: number, d: number, e: number, f: number, g: number) => void;
readonly verify: (a: number, b: number, c: number, d: number, e: number, f: number, g: number) => void;
readonly poseidon: (a: number, b: number, c: number) => void;
readonly init_panic_hook: () => void;
readonly memory: WebAssembly.Memory;
@@ -65,35 +36,29 @@ export interface InitOutput {
readonly __wbindgen_free: (a: number, b: number) => void;
readonly __wbindgen_exn_store: (a: number) => void;
readonly __wbindgen_realloc: (a: number, b: number, c: number) => number;
readonly __wbindgen_thread_destroy: (a: number, b: number) => void;
readonly __wbindgen_thread_destroy: () => void;
readonly __wbindgen_start: () => void;
}
export type SyncInitInput = BufferSource | WebAssembly.Module;
/**
* Instantiates the given `module`, which can either be bytes or
* a precompiled `WebAssembly.Module`.
*
* @param {SyncInitInput} module
* @param {WebAssembly.Memory} maybe_memory
*
* @returns {InitOutput}
*/
export function initSync(
module: SyncInitInput,
maybe_memory?: WebAssembly.Memory
): InitOutput;
* Instantiates the given `module`, which can either be bytes or
* a precompiled `WebAssembly.Module`.
*
* @param {SyncInitInput} module
* @param {WebAssembly.Memory} maybe_memory
*
* @returns {InitOutput}
*/
export function initSync(module: SyncInitInput, maybe_memory?: WebAssembly.Memory): InitOutput;
/**
* If `module_or_path` is {RequestInfo} or {URL}, makes a request and
* for everything else, calls `WebAssembly.instantiate` directly.
*
* @param {InitInput | Promise<InitInput>} module_or_path
* @param {WebAssembly.Memory} maybe_memory
*
* @returns {Promise<InitOutput>}
*/
export default function init(
module_or_path?: InitInput | Promise<InitInput>,
maybe_memory?: WebAssembly.Memory
): Promise<InitOutput>;
* If `module_or_path` is {RequestInfo} or {URL}, makes a request and
* for everything else, calls `WebAssembly.instantiate` directly.
*
* @param {InitInput | Promise<InitInput>} module_or_path
* @param {WebAssembly.Memory} maybe_memory
*
* @returns {Promise<InitOutput>}
*/
export default function init (module_or_path?: InitInput | Promise<InitInput>, maybe_memory?: WebAssembly.Memory): Promise<InitOutput>;

View File

@@ -247,16 +247,16 @@ async function load(module, imports) {
function getImports() {
const imports = {};
imports.wbg = {};
imports.wbg.__wbg_getRandomValues_3774744e221a22ad = function() { return handleError(function (arg0, arg1) {
getObject(arg0).getRandomValues(getObject(arg1));
imports.wbg.__wbg_randomFillSync_6894564c2c334c42 = function() { return handleError(function (arg0, arg1, arg2) {
getObject(arg0).randomFillSync(getArrayU8FromWasm0(arg1, arg2));
}, arguments) };
imports.wbg.__wbindgen_object_drop_ref = function(arg0) {
takeObject(arg0);
};
imports.wbg.__wbg_randomFillSync_e950366c42764a07 = function() { return handleError(function (arg0, arg1) {
getObject(arg0).randomFillSync(takeObject(arg1));
imports.wbg.__wbg_getRandomValues_805f1c3d65988a5a = function() { return handleError(function (arg0, arg1) {
getObject(arg0).getRandomValues(getObject(arg1));
}, arguments) };
imports.wbg.__wbg_crypto_70a96de3b6b73dac = function(arg0) {
imports.wbg.__wbg_crypto_e1d53a1d73fb10b8 = function(arg0) {
const ret = getObject(arg0).crypto;
return addHeapObject(ret);
};
@@ -265,15 +265,15 @@ function getImports() {
const ret = typeof(val) === 'object' && val !== null;
return ret;
};
imports.wbg.__wbg_process_dd1577445152112e = function(arg0) {
imports.wbg.__wbg_process_038c26bf42b093f8 = function(arg0) {
const ret = getObject(arg0).process;
return addHeapObject(ret);
};
imports.wbg.__wbg_versions_58036bec3add9e6f = function(arg0) {
imports.wbg.__wbg_versions_ab37218d2f0b24a8 = function(arg0) {
const ret = getObject(arg0).versions;
return addHeapObject(ret);
};
imports.wbg.__wbg_node_6a9d28205ed5b0d8 = function(arg0) {
imports.wbg.__wbg_node_080f4b19d15bc1fe = function(arg0) {
const ret = getObject(arg0).node;
return addHeapObject(ret);
};
@@ -281,11 +281,11 @@ function getImports() {
const ret = typeof(getObject(arg0)) === 'string';
return ret;
};
imports.wbg.__wbg_msCrypto_adbc770ec9eca9c7 = function(arg0) {
imports.wbg.__wbg_msCrypto_6e7d3e1f92610cbb = function(arg0) {
const ret = getObject(arg0).msCrypto;
return addHeapObject(ret);
};
imports.wbg.__wbg_require_f05d779769764e82 = function() { return handleError(function () {
imports.wbg.__wbg_require_78a3dcfbdba9cbce = function() { return handleError(function () {
const ret = module.require;
return addHeapObject(ret);
}, arguments) };
@@ -337,10 +337,6 @@ function getImports() {
const ret = getObject(arg0).buffer;
return addHeapObject(ret);
};
imports.wbg.__wbg_newwithbyteoffsetandlength_9fb2f11355ecadf5 = function(arg0, arg1, arg2) {
const ret = new Uint8Array(getObject(arg0), arg1 >>> 0, arg2 >>> 0);
return addHeapObject(ret);
};
imports.wbg.__wbg_new_537b7341ce90bb31 = function(arg0) {
const ret = new Uint8Array(getObject(arg0));
return addHeapObject(ret);
@@ -348,6 +344,10 @@ function getImports() {
imports.wbg.__wbg_set_17499e8aa4003ebd = function(arg0, arg1, arg2) {
getObject(arg0).set(getObject(arg1), arg2 >>> 0);
};
imports.wbg.__wbg_length_27a2afe8ab42b09f = function(arg0) {
const ret = getObject(arg0).length;
return ret;
};
imports.wbg.__wbg_newwithlength_b56c882b57805732 = function(arg0) {
const ret = new Uint8Array(arg0 >>> 0);
return addHeapObject(ret);
@@ -399,18 +399,23 @@ function finalizeInit(instance, module) {
return wasm;
}
function initSync(module, maybe_memory) {
const imports = getImports();
async function initSync(module, maybe_memory) {
if (!wasm) {
const imports = getImports();
initMemory(imports, maybe_memory);
if (!(module instanceof WebAssembly.Module)) {
module = new WebAssembly.Module(module);
initMemory(imports, maybe_memory);
/*
if (!(module instanceof WebAssembly.Module)) {
module = new WebAssembly.Module(module);
}
*/
const compiled = WebAssembly.compile(module);
const instance = await WebAssembly.instantiate(await compiled, imports);
return finalizeInit(instance, module);
}
const instance = new WebAssembly.Instance(module, imports);
return finalizeInit(instance, module);
}
async function init(input, maybe_memory) {

View File

@@ -11,7 +11,7 @@ crate-type = ["cdylib", "rlib"]
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
spartan = { path = "../Spartan-secq" }
spartan = { git = "https://github.com/DanTehrani/Spartan-secq.git", branch="hoplite" }
wasm-bindgen = { version = "0.2.81", features = ["serde-serialize"]}
console_error_panic_hook = "0.1.7"
merlin = "3.0.0"