Compare commits

...

25 Commits

Author SHA1 Message Date
Mengran Lan
dbaf8531b1 upgrade snark-verifier-sdk 2024-05-30 22:35:56 +08:00
Mengran Lan
0ca302a654 upgrade to rc4 2024-05-30 22:09:11 +08:00
Mengran Lan
124d10820a enable gzip when sending http request 2024-05-30 14:44:18 +08:00
Mengran Lan
614a894aa1 add sleep logic when failed to fetch task from coordinator 2024-05-28 15:19:54 +08:00
Mengran Lan
c4f54da7ca upgrade zk-circuits to v0.11.0rc2 2024-05-28 10:31:58 +08:00
Mengran Lan
46f5849ae0 comment types for next 2024-05-27 17:24:31 +08:00
Mengran Lan
45d8f66864 update cargo depends 2024-05-27 17:19:02 +08:00
Mengran Lan
94e1ea3a08 change prover_next version to fit the e2e test 2024-05-27 16:53:44 +08:00
Mengran Lan
62c1f00d3b copy libzktrie.so to lib dir 2024-05-27 15:54:49 +08:00
Mengran Lan
038d7a5bbf tmp commit, test next handler wrapper logic (set next handler as default) 2024-05-26 23:13:11 +08:00
Mengran Lan
112e9ac42b add task_cache logic 2024-05-24 13:07:10 +08:00
Mengran Lan
728266ebad add info logs for circuits handler 2024-05-23 11:31:09 +08:00
Mengran Lan
7b8f30d230 add second zkevm-handler && add proof_check when proving batch 2024-05-22 18:47:06 +08:00
Mengran Lan
69ca648c83 utilize proof_status logic 2024-05-22 15:50:06 +08:00
Mengran Lan
00a07a8258 build using --rlease && fix bug in proof status 2024-05-22 11:41:28 +08:00
Mengran Lan
f87e5b5ca7 fix bug, action not taken if re-login to coordinator 2024-05-21 23:48:29 +08:00
Mengran Lan
7b848f971b fmt code 2024-05-21 12:08:31 +08:00
Mengran Lan
49166ec8d0 change l2geth config to option 2024-05-21 12:06:55 +08:00
Mengran Lan
2d0c36eb5a geth client add tokio runtime 2024-05-20 22:47:46 +08:00
Mengran Lan
445a8d592a unify coordinator client api, add logs 2024-05-20 19:03:18 +08:00
Mengran Lan
eadc51d33b set vk in get task request 2024-05-20 16:18:00 +08:00
Mengran Lan
254a7faf58 init the log; add tokio runtime 2024-05-20 16:15:06 +08:00
Mengran Lan
173cbc4dc4 first compile-ready version 2024-05-16 11:17:16 +08:00
Mengran Lan
94bd5917ba finish most logic, leaving some rust-style compiler issue to be solved 2024-05-15 14:28:01 +08:00
Mengran Lan
107aa5792b tmp save 2024-05-13 15:59:06 +08:00
27 changed files with 7025 additions and 0 deletions

2
.gitignore vendored
View File

@@ -20,3 +20,5 @@ coverage.txt
# misc
sftp-config.json
*~
target

5134
prover_rust/Cargo.lock generated Normal file

File diff suppressed because it is too large Load Diff

46
prover_rust/Cargo.toml Normal file
View File

@@ -0,0 +1,46 @@
[package]
name = "prover_rust"
version = "0.1.0"
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[patch.crates-io]
ethers-signers = { git = "https://github.com/scroll-tech/ethers-rs.git", branch = "v2.0.7" }
halo2curves = { git = "https://github.com/scroll-tech/halo2curves", branch = "v0.1.0" }
[patch."https://github.com/privacy-scaling-explorations/halo2.git"]
halo2_proofs = { git = "https://github.com/scroll-tech/halo2.git", branch = "v1.1" }
[patch."https://github.com/privacy-scaling-explorations/poseidon.git"]
poseidon = { git = "https://github.com/scroll-tech/poseidon.git", branch = "main" }
[patch."https://github.com/privacy-scaling-explorations/bls12_381"]
bls12_381 = { git = "https://github.com/scroll-tech/bls12_381", branch = "feat/impl_scalar_field" }
[dependencies]
anyhow = "1.0"
log = "0.4"
serde = { version = "1.0.198", features = ["derive"] }
serde_json = "1.0.116"
futures = "0.3.30"
ethers-core = { git = "https://github.com/scroll-tech/ethers-rs.git", branch = "v2.0.7" }
ethers-providers = { git = "https://github.com/scroll-tech/ethers-rs.git", branch = "v2.0.7" }
halo2_proofs = { git = "https://github.com/scroll-tech/halo2.git", branch = "v1.1" }
snark-verifier-sdk = { git = "https://github.com/scroll-tech/snark-verifier", branch = "develop", default-features = false, features = ["loader_halo2", "loader_evm", "halo2-pse"] }
prover = { git = "https://github.com/scroll-tech/zkevm-circuits.git", tag = "v0.11.0rc4", default-features = false, features = ["parallel_syn", "scroll"] }
eth-types = { git = "https://github.com/scroll-tech/zkevm-circuits.git", tag = "v0.10.3" }
base64 = "0.13.1"
reqwest = { version = "0.12.4", features = ["gzip"] }
once_cell = "1.19.0"
hex = "0.4.3"
tiny-keccak = { version = "2.0.0", features = ["sha3", "keccak"] }
rand = "0.8.5"
eth-keystore = "0.5.0"
rlp = "0.5.2"
tokio = "1.37.0"
env_logger = "0.11.3"
sled = "0.34.7"
http = "1.1.0"

27
prover_rust/Makefile Normal file
View File

@@ -0,0 +1,27 @@
.PHONY: prover
ifeq (4.3,$(firstword $(sort $(MAKE_VERSION) 4.3)))
ZKEVM_VERSION=$(shell grep -m 1 "zkevm-circuits" ./Cargo.lock | cut -d "#" -f2 | cut -c-7)
HALO2_VERSION=$(shell grep -m 1 "halo2.git" ./Cargo.lock | cut -d "#" -f2 | cut -c-7)
else
ZKEVM_VERSION=$(shell grep -m 1 "zkevm-circuits" ./Cargo.lock | cut -d "\#" -f2 | cut -c-7)
HALO2_VERSION=$(shell grep -m 1 "halo2.git" ./Cargo.lock | cut -d "\#" -f2 | cut -c-7)
endif
HALO2_GPU_VERSION=$(shell ./print_halo2gpu_version.sh | sed -n '2p')
GIT_REV=$(shell git rev-parse --short HEAD)
ifeq (${HALO2_GPU_VERSION},)
# use halo2_proofs with CPU
ZK_VERSION=${ZKEVM_VERSION}-${HALO2_VERSION}
else
# use halo2_gpu
ZK_VERSION=${ZKEVM_VERSION}-${HALO2_GPU_VERSION}
endif
prover:
GIT_REV=${GIT_REV} ZK_VERSION=${ZK_VERSION} cargo build --release
rm -rf ./lib && mkdir ./lib
find target/ -name "libzktrie.so" | xargs -I{} cp {} ./lib

22
prover_rust/config.json Normal file
View File

@@ -0,0 +1,22 @@
{
"prover_name": "prover-1",
"hard_fork_name": "homestead",
"keystore_path": "keystore.json",
"keystore_password": "prover-pwd",
"db_path": "unique-db-path-for-prover-1",
"core": {
"params_path": "params",
"assets_path": "assets",
"proof_type": 2
},
"coordinator": {
"base_url": "http://localhost:8555",
"retry_count": 10,
"retry_wait_time_sec": 10,
"connection_timeout_sec": 30
},
"l2geth": {
"endpoint": "http://localhost:9999",
"confirmations": "0x1"
}
}

View File

@@ -0,0 +1,21 @@
#!/bin/bash
config_file="$HOME/.cargo/config"
if [ ! -e "$config_file" ]; then
exit 0
fi
if [[ $(head -n 1 "$config_file") == "#"* ]]; then
exit 0
fi
halo2gpu_path=$(grep -Po '(?<=paths = \[")([^"]*)' $config_file)
pushd $halo2gpu_path
commit_hash=$(git log --pretty=format:%h -n 1)
echo "${commit_hash:0:7}"
popd

View File

@@ -0,0 +1 @@
nightly-2023-12-03

9
prover_rust/rustfmt.toml Normal file
View File

@@ -0,0 +1,9 @@
edition = "2021"
comment_width = 100
imports_granularity = "Crate"
max_width = 100
newline_style = "Unix"
# normalize_comments = true
reorder_imports = true
wrap_comments = true

57
prover_rust/src/config.rs Normal file
View File

@@ -0,0 +1,57 @@
use ethers_core::types::BlockNumber;
use serde::{Deserialize, Serialize};
// use serde_json::Error;
use std::{error::Error, fs::File};
use crate::types::ProofType;
#[derive(Debug, Serialize, Deserialize)]
pub struct ProverCoreConfig {
pub params_path: String,
pub assets_path: String,
#[serde(default)]
pub proof_type: ProofType,
#[serde(default)]
pub dump_dir: String,
}
#[derive(Debug, Serialize, Deserialize)]
pub struct CoordinatorConfig {
pub base_url: String,
pub retry_count: u16,
pub retry_wait_time_sec: u32,
pub connection_timeout_sec: u32,
}
#[derive(Debug, Serialize, Deserialize)]
pub struct L2GethConfig {
pub endpoint: String,
pub confirmations: BlockNumber,
}
#[derive(Debug, Serialize, Deserialize)]
pub struct Config {
pub prover_name: String,
pub hard_fork_name: String,
pub keystore_path: String,
pub keystore_password: String,
pub db_path: String,
pub core: ProverCoreConfig,
pub coordinator: CoordinatorConfig,
pub l2geth: Option<L2GethConfig>,
}
impl Config {
pub fn from_reader<R>(reader: R) -> Result<Self, Box<dyn Error>>
where
R: std::io::Read,
{
serde_json::from_reader(reader).map_err(|e| Box::new(e) as Box<dyn Error>)
}
pub fn from_file(file_name: String) -> Result<Self, Box<dyn Error>> {
let file = File::open(file_name)?;
Config::from_reader(&file)
}
}

View File

@@ -0,0 +1,137 @@
mod api;
mod errors;
pub mod listener;
pub mod types;
use anyhow::{bail, Context, Ok, Result};
use std::rc::Rc;
use api::API;
use errors::*;
use listener::Listener;
use log;
use tokio::runtime::Runtime;
use types::*;
use crate::key_signer::KeySigner;
pub struct Config {
pub endpoint: String,
pub prover_name: String,
pub prover_version: String,
pub hard_fork_name: String,
}
pub struct CoordinatorClient {
api: API,
token: Option<String>,
config: Config,
key_signer: Rc<KeySigner>,
rt: Runtime,
listener: Box<dyn Listener>,
}
impl CoordinatorClient {
pub fn new(
config: Config,
key_signer: Rc<KeySigner>,
listener: Box<dyn Listener>,
) -> Result<Self> {
let rt = tokio::runtime::Builder::new_current_thread()
.enable_all()
.build()?;
let mut client = Self {
api: API::new(&config.endpoint)?,
token: None,
config,
key_signer,
rt,
listener,
};
client.login()?;
Ok(client)
}
fn login(&mut self) -> Result<()> {
let api = &self.api;
let challenge_response = self.rt.block_on(api.challenge())?;
if challenge_response.errcode != Success {
bail!("challenge failed: {}", challenge_response.errmsg)
}
let mut token: String;
if let Some(r) = challenge_response.data {
token = r.token;
} else {
bail!("challenge failed: got empty token")
}
let login_message = LoginMessage {
challenge: token.clone(),
prover_name: self.config.prover_name.clone(),
prover_version: self.config.prover_version.clone(),
hard_fork_name: self.config.hard_fork_name.clone(),
};
let buffer = login_message.rlp();
let signature = self.key_signer.sign_buffer(&buffer)?;
let login_request = LoginRequest {
message: login_message,
signature: signature,
};
let login_response = self.rt.block_on(api.login(&login_request, &token))?;
if login_response.errcode != Success {
bail!("login failed: {}", login_response.errmsg)
}
if let Some(r) = login_response.data {
token = r.token;
} else {
bail!("login failed: got empty token")
}
self.token = Some(token);
Ok(())
}
fn action_with_re_login<T, F, R>(&mut self, req: &R, mut f: F) -> Result<Response<T>>
where
F: FnMut(&mut Self, &R) -> Result<Response<T>>,
{
let response = f(self, req)?;
if response.errcode == ErrJWTTokenExpired {
log::info!("JWT expired, attempting to re-login");
self.login().context("JWT expired, re-login failed")?;
log::info!("re-login success");
return f(self, req);
} else if response.errcode != Success {
bail!("action failed: {}", response.errmsg)
}
Ok(response)
}
fn do_get_task(&mut self, req: &GetTaskRequest) -> Result<Response<GetTaskResponseData>> {
self.rt
.block_on(self.api.get_task(req, self.token.as_ref().unwrap()))
}
pub fn get_task(&mut self, req: &GetTaskRequest) -> Result<Response<GetTaskResponseData>> {
self.action_with_re_login(req, |s, req| s.do_get_task(req))
}
fn do_submit_proof(
&mut self,
req: &SubmitProofRequest,
) -> Result<Response<SubmitProofResponseData>> {
let response = self
.rt
.block_on(self.api.submit_proof(req, &self.token.as_ref().unwrap()))?;
self.listener.on_proof_submitted(req);
Ok(response)
}
pub fn submit_proof(
&mut self,
req: &SubmitProofRequest,
) -> Result<Response<SubmitProofResponseData>> {
self.action_with_re_login(req, |s, req| s.do_submit_proof(req))
}
}

View File

@@ -0,0 +1,105 @@
use super::types::*;
use anyhow::{bail, Result};
use reqwest::{header::CONTENT_TYPE, Url};
use serde::Serialize;
pub struct API {
url_base: Url,
pub client: reqwest::Client,
}
impl API {
pub fn new(url_base: &String) -> Result<Self> {
Ok(Self {
url_base: Url::parse(&url_base)?,
client: reqwest::Client::new(),
})
}
pub async fn challenge(&self) -> Result<Response<ChallengeResponseData>> {
let method = "/coordinator/v1/challenge";
let url = self.build_url(method)?;
let response = self
.client
.get(url)
.header(CONTENT_TYPE, "application/json")
.send()
.await?;
let response_body = response.text().await?;
serde_json::from_str(&response_body).map_err(|e| anyhow::anyhow!(e))
}
pub async fn login(
&self,
req: &LoginRequest,
token: &String,
) -> Result<Response<LoginResponseData>> {
let method = "/coordinator/v1/login";
self.post_with_token(&method, req, token).await
}
pub async fn get_task(
&self,
req: &GetTaskRequest,
token: &String,
) -> Result<Response<GetTaskResponseData>> {
let method = "/coordinator/v1/get_task";
self.post_with_token(&method, req, token).await
}
pub async fn submit_proof(
&self,
req: &SubmitProofRequest,
token: &String,
) -> Result<Response<SubmitProofResponseData>> {
let method = "/coordinator/v1/submit_proof";
self.post_with_token(&method, req, token).await
}
async fn post_with_token<Req, Resp>(
&self,
method: &str,
req: &Req,
token: &String,
) -> Result<Resp>
where
Req: ?Sized + Serialize,
Resp: serde::de::DeserializeOwned,
{
let url = self.build_url(method)?;
let request_body = serde_json::to_string(req)?;
log::info!("[coordinator client], {method}, request: {request_body}");
let response = self
.client
.post(url)
.header(CONTENT_TYPE, "application/json")
.bearer_auth(token)
.body(request_body)
.send()
.await?;
if response.status() != http::status::StatusCode::OK {
log::error!(
"[coordinator client], {method}, status not ok: {}",
response.status()
);
bail!(
"[coordinator client], {method}, status not ok: {}",
response.status()
)
}
let response_body = response.text().await?;
log::info!("[coordinator client], {method}, response: {response_body}");
serde_json::from_str(&response_body).map_err(|e| anyhow::anyhow!(e))
}
fn build_url(&self, method: &str) -> Result<Url> {
self.url_base.join(method).map_err(|e| anyhow::anyhow!(e))
}
}

View File

@@ -0,0 +1,17 @@
// TODO: refactor using enum
pub type ErrorCode = i32;
pub const Success: ErrorCode = 0;
pub const InternalServerError: ErrorCode = 500;
pub const ErrJWTCommonErr: ErrorCode = 50000;
pub const ErrJWTTokenExpired: ErrorCode = 50001;
pub const ErrProverStatsAPIParameterInvalidNo: ErrorCode = 10001;
pub const ErrProverStatsAPIProverTaskFailure: ErrorCode = 10002;
pub const ErrProverStatsAPIProverTotalRewardFailure: ErrorCode = 10003;
pub const ErrCoordinatorParameterInvalidNo: ErrorCode = 20001;
pub const ErrCoordinatorGetTaskFailure: ErrorCode = 20002;
pub const ErrCoordinatorHandleZkProofFailure: ErrorCode = 20003;
pub const ErrCoordinatorEmptyProofData: ErrorCode = 20004;

View File

@@ -0,0 +1,5 @@
use super::SubmitProofRequest;
pub trait Listener {
fn on_proof_submitted(&self, req: &SubmitProofRequest);
}

View File

@@ -0,0 +1,76 @@
use crate::types::{ProofFailureType, ProofStatus};
use rlp::RlpStream;
use serde::{Deserialize, Serialize};
#[derive(Serialize, Deserialize)]
pub struct Response<T> {
pub errcode: i32,
pub errmsg: String,
pub data: Option<T>,
}
#[derive(Serialize, Deserialize)]
pub struct LoginMessage {
pub challenge: String,
pub prover_name: String,
pub prover_version: String,
pub hard_fork_name: String,
}
impl LoginMessage {
pub fn rlp(&self) -> Vec<u8> {
let mut rlp = RlpStream::new();
let num_fields = 4;
rlp.begin_list(num_fields);
rlp.append(&self.prover_name);
rlp.append(&self.prover_version);
rlp.append(&self.challenge);
rlp.append(&self.hard_fork_name);
rlp.out().freeze().into()
}
}
#[derive(Serialize, Deserialize)]
pub struct LoginRequest {
pub message: LoginMessage,
pub signature: String,
}
#[derive(Serialize, Deserialize)]
pub struct LoginResponseData {
pub time: String,
pub token: String,
}
pub type ChallengeResponseData = LoginResponseData;
#[derive(Default, Serialize, Deserialize)]
pub struct GetTaskRequest {
pub task_type: crate::types::ProofType,
pub prover_height: Option<u64>,
pub vks: Vec<String>,
pub vk: String,
}
#[derive(Serialize, Deserialize)]
pub struct GetTaskResponseData {
pub uuid: String,
pub task_id: String,
pub task_type: crate::types::ProofType,
pub task_data: String,
pub hard_fork_name: Option<String>,
}
#[derive(Serialize, Deserialize, Default)]
pub struct SubmitProofRequest {
pub uuid: String,
pub task_id: String,
pub task_type: crate::types::ProofType,
pub status: ProofStatus,
pub proof: String,
pub failure_type: Option<ProofFailureType>,
pub failure_msg: Option<String>,
}
#[derive(Serialize, Deserialize)]
pub struct SubmitProofResponseData {}

View File

@@ -0,0 +1,81 @@
pub mod types;
use crate::types::CommonHash;
use anyhow::Result;
use ethers_core::types::BlockNumber;
use tokio::runtime::Runtime;
use types::{BlockTrace, Header};
use ethers_providers::{Http, Provider};
/// Serialize a type.
///
/// # Panics
///
/// If the type returns an error during serialization.
pub fn serialize<T: serde::Serialize>(t: &T) -> serde_json::Value {
serde_json::to_value(t).expect("Types never fail to serialize.")
}
pub struct GethClient {
id: String,
provider: Provider<Http>,
rt: Runtime,
}
impl GethClient {
pub fn new(id: &str, api_url: &str) -> Result<Self> {
let provider = Provider::<Http>::try_from(api_url)?;
let rt = tokio::runtime::Builder::new_current_thread()
.enable_all()
.build()?;
Ok(Self {
id: id.to_string(),
provider,
rt,
})
}
pub fn get_block_trace_by_hash(&mut self, hash: &CommonHash) -> Result<BlockTrace> {
log::info!(
"{}: calling get_block_trace_by_hash, hash: {}",
self.id,
hash
);
let trace_future = self
.provider
.request("scroll_getBlockTraceByNumberOrHash", [format!("{hash:#x}")]);
let trace = self.rt.block_on(trace_future)?;
Ok(trace)
}
pub fn header_by_number(&mut self, block_number: &BlockNumber) -> Result<Header> {
log::info!(
"{}: calling header_by_number, hash: {}",
self.id,
block_number
);
let hash = serialize(block_number);
let include_txs = serialize(&false);
let trace_future = self
.provider
.request("eth_getBlockByNumber", [hash, include_txs]);
let trace = self.rt.block_on(trace_future)?;
Ok(trace)
}
pub fn block_number(&mut self) -> Result<BlockNumber> {
log::info!("{}: calling block_number", self.id);
let trace_future = self.provider.request("eth_blockNumber", ());
let trace = self.rt.block_on(trace_future)?;
Ok(trace)
}
}

View File

@@ -0,0 +1,40 @@
use eth_types::{H256, U64};
use serde::{Deserialize, Serialize};
use crate::types::CommonHash;
use prover::BlockTrace as ProverBlockTrace;
/// l2 block full trace
#[derive(Deserialize, Serialize, Default, Debug, Clone)]
pub struct BlockTrace {
#[serde(flatten)]
pub block_trace: ProverBlockTrace,
pub version: String,
pub withdraw_trie_root: Option<CommonHash>,
#[serde(rename = "mptwitness", default)]
pub mpt_witness: Vec<u8>,
}
pub fn get_block_number(block_trace: &ProverBlockTrace) -> Option<u64> {
block_trace.header.number.map(|n| n.as_u64())
}
pub type TxHash = H256;
/// this struct is tracked to https://github.com/scroll-tech/go-ethereum/blob/0f0cd99f7a2e/core/types/block.go#Header
/// the detail fields of struct are not 100% same as eth_types::Block so this needs to be changed in
/// some time currently only the `number` field is required
#[derive(Debug, Deserialize, Serialize, Default)]
pub struct Header {
#[serde(flatten)]
block: eth_types::Block<TxHash>,
}
impl Header {
pub fn get_number(&self) -> Option<U64> {
self.block.number
}
}

View File

@@ -0,0 +1,105 @@
use std::path::Path;
use anyhow::Result;
use ethers_core::{
k256::{
ecdsa::{signature::hazmat::PrehashSigner, RecoveryId, Signature, SigningKey},
elliptic_curve::{sec1::ToEncodedPoint, FieldBytes},
PublicKey, Secp256k1, SecretKey,
},
types::Signature as EthSignature,
};
use eth_types::{H256, U256};
use hex::ToHex;
use tiny_keccak::{Hasher, Keccak};
pub struct KeySigner {
public_key: PublicKey,
signer: SigningKey,
}
impl KeySigner {
pub fn new(key_path: &str, passwd: &str) -> Result<Self> {
let p = Path::new(key_path);
let secret = if !p.exists() {
let dir = p.parent().unwrap();
let name = p.file_name().and_then(|s| s.to_str());
let mut rng = rand::thread_rng();
let (secret, _) = eth_keystore::new(dir, &mut rng, passwd, name)?;
secret
} else {
eth_keystore::decrypt_key(key_path, passwd).map_err(|e| anyhow::anyhow!(e))?
};
let secret_key = SecretKey::from_bytes(secret.as_slice().into())?;
let signer = SigningKey::from(secret_key.clone());
Ok(Self {
public_key: secret_key.public_key(),
signer: signer,
})
}
pub fn get_public_key(&self) -> String {
let v: Vec<u8> = Vec::from(self.public_key.to_encoded_point(true).as_bytes());
buffer_to_hex(&v, false)
}
/// Signs the provided hash.
pub fn sign_hash(&self, hash: H256) -> Result<EthSignature> {
let signer = &self.signer as &dyn PrehashSigner<(Signature, RecoveryId)>;
let (recoverable_sig, recovery_id) = signer.sign_prehash(hash.as_ref())?;
let v = u8::from(recovery_id) as u64;
let r_bytes: FieldBytes<Secp256k1> = recoverable_sig.r().into();
let s_bytes: FieldBytes<Secp256k1> = recoverable_sig.s().into();
let r = U256::from_big_endian(r_bytes.as_slice());
let s = U256::from_big_endian(s_bytes.as_slice());
Ok(EthSignature { r, s, v })
}
pub fn sign_buffer<T>(&self, buffer: &T) -> Result<String>
where
T: AsRef<[u8]>,
{
let pre_hash = keccak256(buffer);
let hash_str = buffer_to_hex(&pre_hash, true);
println!("hash is {hash_str}");
let hash = H256::from(pre_hash);
let sig = self.sign_hash(hash)?;
Ok(buffer_to_hex(&sig.to_vec(), true))
}
}
fn buffer_to_hex<T>(buffer: &T, has_prefix: bool) -> String
where
T: AsRef<[u8]>,
{
if has_prefix {
format!("0x{}", buffer.encode_hex::<String>())
} else {
buffer.encode_hex::<String>()
}
}
/// Compute the Keccak-256 hash of input bytes.
///
/// Note that strings are interpreted as UTF-8 bytes,
// TODO: Add Solidity Keccak256 packing support
pub fn keccak256<T: AsRef<[u8]>>(bytes: T) -> [u8; 32] {
let mut output = [0u8; 32];
let mut hasher = Keccak::v256();
hasher.update(bytes.as_ref());
hasher.finalize(&mut output);
output
}

142
prover_rust/src/main.rs Normal file
View File

@@ -0,0 +1,142 @@
mod config;
mod coordinator_client;
mod geth_client;
mod key_signer;
mod prover;
mod task_cache;
mod types;
mod utils_log;
mod version;
mod zk_circuits_handler;
use anyhow::{Context, Result};
use config::Config;
use coordinator_client::listener::Listener;
use log;
use prover::Prover;
use core::time;
use std::rc::Rc;
use task_cache::TaskCache;
use types::TaskWrapper;
struct ClearCacheCoordinatorListener {
pub task_cache: Rc<TaskCache>,
}
impl Listener for ClearCacheCoordinatorListener {
fn on_proof_submitted(&self, req: &coordinator_client::types::SubmitProofRequest) {
let result = self.task_cache.delete_task(req.task_id.clone());
if let Err(e) = result {
log::error!("delete task from embed db failed, {}", e.to_string());
} else {
log::info!(
"delete task from embed db successfully, task_id: {}",
&req.task_id
);
}
}
}
struct TaskProcessor<'a> {
prover: &'a Prover<'a>,
task_cache: Rc<TaskCache>,
}
impl<'a> TaskProcessor<'a> {
pub fn new(prover: &'a Prover, task_cache: Rc<TaskCache>) -> Self {
TaskProcessor { prover, task_cache }
}
pub fn start(&self) {
loop {
log::info!("start a new round.");
if let Err(err) = self.prove_and_submit() {
log::error!("encounter error: {err}");
} else {
log::info!("prove & submit succeed.");
}
}
}
fn prove_and_submit(&self) -> Result<()> {
let task_from_cache = self
.task_cache
.get_last_task()
.context("failed to peek from stack")?;
let mut task_wrapper = match task_from_cache {
Some(t) => t,
None => {
let fetch_result = self.prover.fetch_task();
if let Err(err) = fetch_result {
std::thread::sleep(time::Duration::from_secs(10));
return Err(err).context("failed to fetch task from coordinator");
}
let task_wrapper: TaskWrapper = fetch_result.unwrap().into();
self.task_cache
.put_task(&task_wrapper)
.context("failed to push task into stack")?;
task_wrapper
}
};
if task_wrapper.get_count() <= 2 {
task_wrapper.increment_count();
self.task_cache
.put_task(&task_wrapper)
.context("failed to push task into stack, updating count")?;
log::info!(
"start to prove task, task_type: {:?}, task_id: {}",
task_wrapper.task.task_type,
task_wrapper.task.id
);
let result = match self.prover.prove_task(&task_wrapper.task) {
Ok(proof_detail) => self
.prover
.submit_proof(&proof_detail, task_wrapper.task.uuid.clone()),
Err(error) => self.prover.submit_error(
&task_wrapper.task,
types::ProofFailureType::NoPanic,
error,
),
};
return result;
}
// if tried times >= 3, it's probably due to circuit proving panic
log::error!(
"zk proving panic for task, task_type: {:?}, task_id: {}",
task_wrapper.task.task_type,
task_wrapper.task.id
);
self.prover.submit_error(
&task_wrapper.task,
types::ProofFailureType::Panic,
anyhow::anyhow!("zk proving panic for task"),
)
}
}
fn main() -> Result<(), Box<dyn std::error::Error>> {
utils_log::log_init();
let file_name = "config.json";
let config: Config = Config::from_file(file_name.to_string())?;
println!("{:?}", config);
let task_cache = Rc::new(TaskCache::new(&config.db_path)?);
let coordinator_listener = Box::new(ClearCacheCoordinatorListener {
task_cache: task_cache.clone(),
});
let prover = Prover::new(&config, coordinator_listener)?;
let task_processer = TaskProcessor::new(&prover, task_cache);
task_processer.start();
Ok(())
}

324
prover_rust/src/prover.rs Normal file
View File

@@ -0,0 +1,324 @@
use anyhow::{bail, Error, Ok, Result};
use eth_types::U64;
use once_cell::sync::Lazy;
use std::{cell::RefCell, cmp::Ordering, env, rc::Rc};
use crate::{
config::Config,
coordinator_client::{
listener::Listener, types::*, Config as CoordinatorConfig, CoordinatorClient,
},
geth_client::{types::get_block_number, GethClient},
key_signer::KeySigner,
types::{CommonHash, ProofFailureType, ProofStatus, ProofType},
zk_circuits_handler::{CircuitsHandler, CircuitsHandlerProvider},
};
use super::types::{ProofDetail, Task};
use prover::{BlockTrace, ChunkHash, ChunkProof};
// Only used for debugging.
pub(crate) static OUTPUT_DIR: Lazy<Option<String>> =
Lazy::new(|| env::var("PROVER_OUTPUT_DIR").ok());
pub struct Prover<'a> {
config: &'a Config,
key_signer: Rc<KeySigner>,
circuits_handler_provider: CircuitsHandlerProvider,
coordinator_client: RefCell<CoordinatorClient>,
geth_client: Option<RefCell<GethClient>>,
}
// a u64 is positive when it's 63th index bit not set
fn is_positive(n: &U64) -> bool {
!n.bit(63)
}
impl<'a> Prover<'a> {
pub fn new(config: &'a Config, coordinator_listener: Box<dyn Listener>) -> Result<Self> {
let proof_type = config.core.proof_type;
let params_path = &config.core.params_path;
let assets_path = &config.core.assets_path;
let keystore_path = &config.keystore_path;
let keystore_password = &config.keystore_password;
let coordinator_config = CoordinatorConfig {
endpoint: config.coordinator.base_url.clone(),
prover_name: config.prover_name.clone(),
prover_version: crate::version::get_version(),
hard_fork_name: config.hard_fork_name.clone(),
};
let key_signer = Rc::new(KeySigner::new(&keystore_path, &keystore_password)?);
let coordinator_client = CoordinatorClient::new(
coordinator_config,
Rc::clone(&key_signer),
coordinator_listener,
)?;
let mut prover = Prover {
config,
key_signer: Rc::clone(&key_signer),
circuits_handler_provider: CircuitsHandlerProvider::new(
proof_type,
params_path,
assets_path,
)?,
coordinator_client: RefCell::new(coordinator_client),
geth_client: None,
};
if config.core.proof_type == ProofType::ProofTypeChunk {
prover.geth_client = Some(RefCell::new(GethClient::new(
"test",
&config.l2geth.as_ref().unwrap().endpoint,
)?));
}
Ok(prover)
}
pub fn get_proof_type(&self) -> ProofType {
self.config.core.proof_type
}
pub fn get_public_key(&self) -> String {
self.key_signer.get_public_key()
}
pub fn fetch_task(&self) -> Result<Task> {
let vks = self.circuits_handler_provider.get_vks();
let vk = vks[0].clone();
let mut req = GetTaskRequest {
task_type: self.get_proof_type(),
prover_height: None,
vks,
vk,
};
if self.get_proof_type() == ProofType::ProofTypeChunk {
let latest_block_number = self.get_latest_block_number_value()?;
if let Some(v) = latest_block_number {
if v.as_u64() == 0 {
bail!("omit to prove task of the genesis block")
}
req.prover_height = Some(v.as_u64());
} else {
bail!("failed to fetch latest confirmed block number, got None")
}
}
let resp = self.coordinator_client.borrow_mut().get_task(&req)?;
Task::try_from(&resp.data.unwrap()).map_err(|e| anyhow::anyhow!(e))
}
pub fn prove_task(&self, task: &Task) -> Result<ProofDetail> {
let version = task.get_version();
if let Some(handler) = self.circuits_handler_provider.get_circuits_client(version) {
self.do_prove(task, handler)
} else {
bail!("failed to get a circuit handler")
}
}
fn do_prove(&self, task: &Task, handler: &Box<dyn CircuitsHandler>) -> Result<ProofDetail> {
let mut proof_detail = ProofDetail {
id: task.id.clone(),
proof_type: task.task_type,
..Default::default()
};
match task.task_type {
ProofType::ProofTypeBatch => {
let chunk_hashes_proofs: Vec<(ChunkHash, ChunkProof)> =
self.gen_chunk_hashes_proofs(task)?;
let chunk_proofs: Vec<ChunkProof> =
chunk_hashes_proofs.iter().map(|t| t.1.clone()).collect();
let is_valid = handler.aggregator_check_chunk_proofs(&chunk_proofs)?;
if !is_valid {
bail!("non-match chunk protocol, task-id: {}", &task.id)
}
let batch_proof = handler.aggregator_gen_agg_evm_proof(
chunk_hashes_proofs,
None,
self.get_output_dir(),
)?;
proof_detail.batch_proof = Some(batch_proof);
Ok(proof_detail)
}
ProofType::ProofTypeChunk => {
let chunk_trace = self.gen_chunk_traces(task)?;
let chunk_proof = handler.prover_gen_chunk_proof(
chunk_trace,
None,
None,
self.get_output_dir(),
)?;
proof_detail.chunk_proof = Some(chunk_proof);
Ok(proof_detail)
}
_ => bail!("task type invalid"),
}
}
pub fn submit_proof(&self, proof_detail: &ProofDetail, uuid: String) -> Result<()> {
let proof_data = match proof_detail.proof_type {
ProofType::ProofTypeBatch => {
serde_json::to_string(proof_detail.batch_proof.as_ref().unwrap())?
}
ProofType::ProofTypeChunk => {
serde_json::to_string(proof_detail.chunk_proof.as_ref().unwrap())?
}
_ => unreachable!(),
};
let request = SubmitProofRequest {
uuid,
task_id: proof_detail.id.clone(),
task_type: proof_detail.proof_type,
status: ProofStatus::Ok,
proof: proof_data,
..Default::default()
};
self.do_submit(&request)
}
pub fn submit_error(
&self,
task: &Task,
failure_type: ProofFailureType,
error: Error,
) -> Result<()> {
let request = SubmitProofRequest {
uuid: task.uuid.clone(),
task_id: task.id.clone(),
task_type: task.task_type,
status: ProofStatus::Error,
failure_type: Some(failure_type),
failure_msg: Some(error.to_string()),
..Default::default()
};
self.do_submit(&request)
}
fn do_submit(&self, request: &SubmitProofRequest) -> Result<()> {
self.coordinator_client.borrow_mut().submit_proof(request)?;
Ok(())
}
fn get_latest_block_number_value(&self) -> Result<Option<U64>> {
let number = self
.geth_client
.as_ref()
.unwrap()
.borrow_mut()
.block_number()?;
Ok(number.as_number())
}
// fn get_configured_block_number_value(&self) -> Result<Option<U64>> {
// self.get_block_number_value(&self.config.l2geth.as_ref().unwrap().confirmations)
// }
// fn get_block_number_value(&self, block_number: &BlockNumber) -> Result<Option<U64>> {
// match block_number {
// BlockNumber::Safe | BlockNumber::Finalized => {
// let header =
// self.geth_client.as_ref().unwrap().borrow_mut().header_by_number(block_number)?;
// Ok(header.get_number())
// },
// BlockNumber::Latest => {
// let number = self.geth_client.as_ref().unwrap().borrow_mut().block_number()?;
// Ok(number.as_number())
// },
// BlockNumber::Number(n) if is_positive(n) => {
// let number = self.geth_client.as_ref().unwrap().borrow_mut().block_number()?;
// let diff = number.as_number()
// .filter(|m| m.as_u64() >= n.as_u64())
// .map(|m| U64::from(m.as_u64() - n.as_u64()));
// Ok(diff)
// },
// _ => bail!("unknown confirmation type"),
// }
// }
fn get_output_dir(&self) -> Option<&str> {
OUTPUT_DIR.as_deref()
}
fn gen_chunk_traces(&self, task: &Task) -> Result<Vec<BlockTrace>> {
if let Some(chunk_detail) = task.chunk_task_detail.as_ref() {
self.get_sorted_traces_by_hashes(&chunk_detail.block_hashes)
} else {
bail!("invalid task")
}
}
fn gen_chunk_hashes_proofs(&self, task: &Task) -> Result<Vec<(ChunkHash, ChunkProof)>> {
if let Some(batch_detail) = task.batch_task_detail.as_ref() {
Ok(batch_detail
.chunk_infos
.clone()
.into_iter()
.zip(batch_detail.chunk_proofs.clone())
.collect())
} else {
bail!("invalid task")
}
}
fn get_sorted_traces_by_hashes(
&self,
block_hashes: &Vec<CommonHash>,
) -> Result<Vec<BlockTrace>> {
if block_hashes.len() == 0 {
bail!("blockHashes is empty")
}
let mut block_traces = Vec::new();
for (_, hash) in block_hashes.into_iter().enumerate() {
let trace = self
.geth_client
.as_ref()
.unwrap()
.borrow_mut()
.get_block_trace_by_hash(hash)?;
block_traces.push(trace.block_trace);
}
block_traces.sort_by(|a, b| {
if get_block_number(a) == None {
Ordering::Less
} else if get_block_number(b) == None {
Ordering::Greater
} else {
get_block_number(a)
.unwrap()
.cmp(&get_block_number(b).unwrap())
}
});
let block_numbers: Vec<u64> = block_traces
.iter()
.map(|trace| match get_block_number(trace) {
Some(v) => v,
None => 0,
})
.collect();
let mut i = 0;
while i < block_numbers.len() - 1 {
if block_numbers[i] + 1 != block_numbers[i + 1] {
bail!(
"block numbers are not continuous, got {} and {}",
block_numbers[i],
block_numbers[i + 1]
)
}
i += 1;
}
Ok(block_traces)
}
}

View File

@@ -0,0 +1,40 @@
use anyhow::{Ok, Result};
use crate::types::TaskWrapper;
use sled::{Config, Db};
pub struct TaskCache {
db: Db,
}
impl TaskCache {
pub fn new(db_path: &String) -> Result<Self> {
let config = Config::new().path(db_path);
let db = config.open()?;
Ok(Self{db})
}
pub fn put_task(&self, task_wrapper: &TaskWrapper) -> Result<()> {
let k = task_wrapper.task.id.clone().into_bytes();
let v = serde_json::to_vec(task_wrapper)?;
self.db.insert(k, v)?;
Ok(())
}
pub fn get_last_task(&self) -> Result<Option<TaskWrapper>> {
let last = self.db.last()?;
if let Some((k, v)) = last {
let kk = std::str::from_utf8(k.as_ref())?;
log::info!("get last task, task_id: {kk}");
let task_wrapper: TaskWrapper = serde_json::from_slice(v.as_ref())?;
return Ok(Some(task_wrapper));
}
Ok(None)
}
pub fn delete_task(&self, task_id: String) -> Result<()> {
let k = task_id.into_bytes();
self.db.remove(k)?;
Ok(())
}
}

237
prover_rust/src/types.rs Normal file
View File

@@ -0,0 +1,237 @@
use core::fmt;
use eth_types::H256;
use prover::{BatchProof, ChunkHash, ChunkProof};
use serde::{Deserialize, Deserializer, Serialize, Serializer};
use crate::coordinator_client::types::GetTaskResponseData;
pub type CommonHash = H256;
pub type Bytes = Vec<u8>;
#[derive(Debug, Clone, Copy, PartialEq)]
pub enum ProofType {
ProofTypeUndefined,
ProofTypeChunk,
ProofTypeBatch,
}
impl ProofType {
fn from_u8(v: u8) -> Self {
match v {
1 => ProofType::ProofTypeChunk,
2 => ProofType::ProofTypeBatch,
_ => ProofType::ProofTypeUndefined,
}
}
}
impl Serialize for ProofType {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
match *self {
ProofType::ProofTypeUndefined => serializer.serialize_i8(0),
ProofType::ProofTypeChunk => serializer.serialize_i8(1),
ProofType::ProofTypeBatch => serializer.serialize_i8(2),
}
}
}
impl<'de> Deserialize<'de> for ProofType {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
let v: u8 = u8::deserialize(deserializer)?;
Ok(ProofType::from_u8(v))
}
}
impl Default for ProofType {
fn default() -> Self {
Self::ProofTypeUndefined
}
}
#[derive(Serialize, Deserialize)]
pub struct BatchTaskDetail {
pub chunk_infos: Vec<ChunkHash>,
pub chunk_proofs: Vec<ChunkProof>,
}
#[derive(Serialize, Deserialize)]
pub struct ChunkTaskDetail {
pub block_hashes: Vec<CommonHash>,
}
#[derive(Serialize, Deserialize, Default)]
pub struct Task {
pub uuid: String,
pub id: String,
#[serde(rename = "type", default)]
pub task_type: ProofType,
#[serde(default)]
pub batch_task_detail: Option<BatchTaskDetail>,
#[serde(default)]
pub chunk_task_detail: Option<ChunkTaskDetail>,
#[serde(default)]
pub hard_fork_name: Option<String>,
}
impl Task {
pub fn get_version(&self) -> String {
match self.hard_fork_name.as_ref() {
Some(v) => v.clone(),
None => "".to_string(),
}
}
}
impl TryFrom<&GetTaskResponseData> for Task {
type Error = serde_json::Error;
fn try_from(value: &GetTaskResponseData) -> Result<Self, Self::Error> {
let mut task = Task {
uuid: value.uuid.clone(),
id: value.task_id.clone(),
task_type: value.task_type,
chunk_task_detail: None,
batch_task_detail: None,
hard_fork_name: value.hard_fork_name.clone(),
};
match task.task_type {
ProofType::ProofTypeBatch => {
task.batch_task_detail = Some(serde_json::from_str(&value.task_data)?);
}
ProofType::ProofTypeChunk => {
task.chunk_task_detail = Some(serde_json::from_str(&value.task_data)?);
}
_ => unreachable!(),
}
Ok(task)
}
}
#[derive(Serialize, Deserialize, Default)]
pub struct TaskWrapper {
pub task: Task,
count: usize,
}
impl TaskWrapper {
pub fn increment_count(&mut self) {
self.count += 1;
}
pub fn get_count(&self) -> usize {
self.count
}
}
impl From<Task> for TaskWrapper {
fn from(task: Task) -> Self {
TaskWrapper { task, count: 0 }
}
}
#[derive(Serialize, Deserialize, Default)]
pub struct ProofDetail {
pub id: String,
#[serde(rename = "type", default)]
pub proof_type: ProofType,
pub chunk_proof: Option<ChunkProof>,
pub batch_proof: Option<BatchProof>,
pub error: String,
}
#[derive(Debug, Clone, Copy, PartialEq)]
pub enum ProofFailureType {
Undefined,
Panic,
NoPanic,
}
impl ProofFailureType {
fn from_u8(v: u8) -> Self {
match v {
1 => ProofFailureType::Panic,
2 => ProofFailureType::NoPanic,
_ => ProofFailureType::Undefined,
}
}
}
impl Serialize for ProofFailureType {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
match *self {
ProofFailureType::Undefined => serializer.serialize_u8(0),
ProofFailureType::Panic => serializer.serialize_u8(1),
ProofFailureType::NoPanic => serializer.serialize_u8(2),
}
}
}
impl<'de> Deserialize<'de> for ProofFailureType {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
let v: u8 = u8::deserialize(deserializer)?;
Ok(ProofFailureType::from_u8(v))
}
}
impl Default for ProofFailureType {
fn default() -> Self {
Self::Undefined
}
}
#[derive(Debug, Clone, Copy, PartialEq)]
pub enum ProofStatus {
Ok,
Error,
}
impl ProofStatus {
fn from_u8(v: u8) -> Self {
match v {
0 => ProofStatus::Ok,
_ => ProofStatus::Error,
}
}
}
impl Serialize for ProofStatus {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
match *self {
ProofStatus::Ok => serializer.serialize_u8(0),
ProofStatus::Error => serializer.serialize_u8(1),
}
}
}
impl<'de> Deserialize<'de> for ProofStatus {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
let v: u8 = u8::deserialize(deserializer)?;
Ok(ProofStatus::from_u8(v))
}
}
impl Default for ProofStatus {
fn default() -> Self {
Self::Ok
}
}

View File

@@ -0,0 +1,11 @@
use env_logger::Env;
use std::sync::Once;
static LOG_INIT: Once = Once::new();
/// Initialize log
pub fn log_init() {
LOG_INIT.call_once(|| {
env_logger::Builder::from_env(Env::default().default_filter_or("info")).init();
});
}

View File

@@ -0,0 +1,17 @@
use std::cell::OnceCell;
static DEFAULT_COMMIT: &str = "unknown";
static mut VERSION: OnceCell<String> = OnceCell::new();
pub const TAG: &str = "v4.4.3";
pub const DEFAULT_ZK_VERSION: &str = "000000-000000";
fn init_version() -> String {
let commit = option_env!("GIT_REV").unwrap_or(DEFAULT_COMMIT);
let zk_version = option_env!("ZK_VERSION").unwrap_or(DEFAULT_ZK_VERSION);
format!("{TAG}-{commit}-{zk_version}")
}
pub fn get_version() -> String {
unsafe { VERSION.get_or_init(init_version).clone() }
}

View File

@@ -0,0 +1,89 @@
mod base;
// mod next;
mod types;
use anyhow::Result;
use base::BaseCircuitsHandler;
use std::collections::HashMap;
use types::{BatchProof, BlockTrace, ChunkHash, ChunkProof};
use crate::types::ProofType;
// use self::next::NextCircuitsHandler;
type CiruitsVersion = String;
pub mod utils {
pub fn encode_vk(vk: Vec<u8>) -> String {
base64::encode(vk)
}
}
pub trait CircuitsHandler {
// api of zkevm::Prover
fn prover_get_vk(&self) -> Option<Vec<u8>>;
fn prover_gen_chunk_proof(
&self,
chunk_trace: Vec<BlockTrace>,
name: Option<&str>,
inner_id: Option<&str>,
output_dir: Option<&str>,
) -> Result<ChunkProof>;
// api of aggregator::Prover
fn aggregator_get_vk(&self) -> Option<Vec<u8>>;
fn aggregator_gen_agg_evm_proof(
&self,
chunk_hashes_proofs: Vec<(ChunkHash, ChunkProof)>,
name: Option<&str>,
output_dir: Option<&str>,
) -> Result<BatchProof>;
fn aggregator_check_chunk_proofs(&self, chunk_proofs: &[ChunkProof]) -> Result<bool>;
}
pub struct CircuitsHandlerProvider {
proof_type: ProofType,
circuits_handler_map: HashMap<CiruitsVersion, Box<dyn CircuitsHandler>>,
}
impl CircuitsHandlerProvider {
pub fn new(proof_type: ProofType, params_dir: &str, assets_dir: &str) -> Result<Self> {
let mut m: HashMap<CiruitsVersion, Box<dyn CircuitsHandler>> = HashMap::new();
let handler = BaseCircuitsHandler::new(proof_type, params_dir, assets_dir)?;
m.insert("".to_string(), Box::new(handler));
// let next_handler: NextCircuitsHandler = NextCircuitsHandler::new(proof_type, params_dir,
// assets_dir)?; m.insert("".to_string(), Box::new(next_handler));
Ok(CircuitsHandlerProvider {
proof_type: proof_type,
circuits_handler_map: m,
})
}
pub fn get_circuits_client(&self, version: String) -> Option<&Box<dyn CircuitsHandler>> {
self.circuits_handler_map.get(&version)
}
pub fn get_vks(&self) -> Vec<String> {
match self.proof_type {
ProofType::ProofTypeBatch => self
.circuits_handler_map
.values()
.map(|h| {
h.aggregator_get_vk()
.map_or("".to_string(), |vk| utils::encode_vk(vk))
})
.collect::<Vec<String>>(),
ProofType::ProofTypeChunk => self
.circuits_handler_map
.values()
.map(|h| {
h.prover_get_vk()
.map_or("".to_string(), |vk| utils::encode_vk(vk))
})
.collect::<Vec<String>>(),
_ => unreachable!(),
}
}
}

View File

@@ -0,0 +1,91 @@
use super::{
types::{BatchProof, BlockTrace, ChunkHash, ChunkProof},
CircuitsHandler,
};
use crate::types::ProofType;
use anyhow::{bail, Ok, Result};
use prover::{aggregator::Prover as BatchProver, zkevm::Prover as ChunkProver};
use std::cell::RefCell;
#[derive(Default)]
pub struct BaseCircuitsHandler {
chunk_prover: Option<RefCell<ChunkProver>>,
batch_prover: Option<RefCell<BatchProver>>,
}
impl BaseCircuitsHandler {
pub fn new(proof_type: ProofType, params_dir: &str, assets_dir: &str) -> Result<Self> {
match proof_type {
ProofType::ProofTypeChunk => Ok(Self {
chunk_prover: Some(RefCell::new(ChunkProver::from_dirs(params_dir, assets_dir))),
..Default::default()
}),
ProofType::ProofTypeBatch => Ok(Self {
batch_prover: Some(RefCell::new(BatchProver::from_dirs(params_dir, assets_dir))),
..Default::default()
}),
_ => bail!("proof type invalid"),
}
}
}
impl CircuitsHandler for BaseCircuitsHandler {
// api of zkevm::Prover
fn prover_get_vk(&self) -> Option<Vec<u8>> {
log::info!("[circuit handler], [base], [chunk] get_vk");
self.chunk_prover
.as_ref()
.and_then(|prover| prover.borrow().get_vk())
}
fn prover_gen_chunk_proof(
&self,
chunk_trace: Vec<BlockTrace>,
name: Option<&str>,
inner_id: Option<&str>,
output_dir: Option<&str>,
) -> Result<ChunkProof> {
log::info!("[circuit handler], [base], [chunk] gen_chunk_proof");
if let Some(prover) = self.chunk_prover.as_ref() {
return prover
.borrow_mut()
.gen_chunk_proof(chunk_trace, name, inner_id, output_dir);
}
unreachable!("please check errors in proof_type logic")
}
// api of aggregator::Prover
fn aggregator_get_vk(&self) -> Option<Vec<u8>> {
log::info!("[circuit handler], [base], [batch] get_vk");
self.batch_prover
.as_ref()
.and_then(|prover| prover.borrow().get_vk())
}
fn aggregator_gen_agg_evm_proof(
&self,
chunk_hashes_proofs: Vec<(ChunkHash, ChunkProof)>,
name: Option<&str>,
output_dir: Option<&str>,
) -> Result<BatchProof> {
log::info!("[circuit handler], [base], [batch] gen_agg_evm_proof");
if let Some(prover) = self.batch_prover.as_ref() {
return prover
.borrow_mut()
.gen_agg_evm_proof(chunk_hashes_proofs, name, output_dir);
}
unreachable!("please check errors in proof_type logic")
}
fn aggregator_check_chunk_proofs(&self, chunk_proofs: &[ChunkProof]) -> Result<bool> {
log::info!("[circuit handler], [base], [batch] check_chunk_proofs");
if let Some(prover) = self.batch_prover.as_ref() {
return Ok(prover.borrow_mut().check_chunk_proofs(chunk_proofs));
}
unreachable!("please check errors in proof_type logic")
}
}

View File

@@ -0,0 +1,120 @@
use super::{types::*, CircuitsHandler};
use crate::types::ProofType;
use anyhow::{bail, Ok, Result};
use prover_next::{aggregator::Prover as NextBatchProver, zkevm::Prover as NextChunkProver};
use std::cell::RefCell;
#[derive(Default)]
pub struct NextCircuitsHandler {
chunk_prover: Option<RefCell<NextChunkProver>>,
batch_prover: Option<RefCell<NextBatchProver>>,
}
impl NextCircuitsHandler {
pub fn new(proof_type: ProofType, params_dir: &str, assets_dir: &str) -> Result<Self> {
match proof_type {
ProofType::ProofTypeChunk => Ok(Self {
chunk_prover: Some(RefCell::new(NextChunkProver::from_dirs(
params_dir, assets_dir,
))),
..Default::default()
}),
ProofType::ProofTypeBatch => Ok(Self {
batch_prover: Some(RefCell::new(NextBatchProver::from_dirs(
params_dir, assets_dir,
))),
..Default::default()
}),
_ => bail!("proof type invalid"),
}
}
}
impl CircuitsHandler for NextCircuitsHandler {
// api of zkevm::Prover
fn prover_get_vk(&self) -> Option<Vec<u8>> {
log::info!("[circuit handler], [next], [chunk] get_vk");
self.chunk_prover
.as_ref()
.and_then(|prover| prover.borrow().get_vk())
}
fn prover_gen_chunk_proof(
&self,
chunk_trace: Vec<BlockTrace>,
name: Option<&str>,
inner_id: Option<&str>,
output_dir: Option<&str>,
) -> Result<ChunkProof> {
log::info!("[circuit handler], [next], [chunk] gen_chunk_proof");
if let Some(prover) = self.chunk_prover.as_ref() {
let next_chunk_trace = chunk_trace
.into_iter()
.map(|block_trace| block_trace_base_to_next(block_trace))
.collect::<Result<Vec<NextBlockTrace>>>()?;
let next_chunk_proof = prover.borrow_mut().gen_chunk_proof(
next_chunk_trace,
name,
inner_id,
output_dir,
)?;
return chunk_proof_next_to_base(next_chunk_proof);
}
unreachable!("please check errors in proof_type logic")
}
// api of aggregator::Prover
fn aggregator_get_vk(&self) -> Option<Vec<u8>> {
log::info!("[circuit handler], [next], [batch] get_vk");
self.batch_prover
.as_ref()
.and_then(|prover| prover.borrow().get_vk())
}
fn aggregator_gen_agg_evm_proof(
&self,
chunk_hashes_proofs: Vec<(ChunkHash, ChunkProof)>,
name: Option<&str>,
output_dir: Option<&str>,
) -> Result<BatchProof> {
log::info!("[circuit handler], [next], [batch] gen_agg_evm_proof");
if let Some(prover) = self.batch_prover.as_ref() {
let next_chunk_hashes_proofs = chunk_hashes_proofs
.into_iter()
.map(|t| {
let next_chunk_hash = chunk_hash_base_to_next(t.0);
let next_chunk_proof = chunk_proof_base_to_next(&t.1);
match next_chunk_proof {
Result::Ok(proof) => Ok((next_chunk_hash, proof)),
Err(err) => Err(err),
}
})
.collect::<Result<Vec<(NextChunkHash, NextChunkProof)>>>()?;
let next_batch_proof = prover.borrow_mut().gen_agg_evm_proof(
next_chunk_hashes_proofs,
name,
output_dir,
)?;
return batch_proof_next_to_base(next_batch_proof);
}
unreachable!("please check errors in proof_type logic")
}
fn aggregator_check_chunk_proofs(&self, chunk_proofs: &[ChunkProof]) -> Result<bool> {
log::info!("[circuit handler], [next], [batch] check_chunk_proofs");
if let Some(prover) = self.batch_prover.as_ref() {
let next_chunk_proofs = chunk_proofs
.into_iter()
.map(|chunk_proof| chunk_proof_base_to_next(chunk_proof))
.collect::<Result<Vec<NextChunkProof>>>()?;
return Ok(prover.borrow_mut().check_chunk_proofs(&next_chunk_proofs));
}
unreachable!("please check errors in proof_type logic")
}
}

View File

@@ -0,0 +1,69 @@
use anyhow::Result;
pub use prover::{BatchProof, BlockTrace, ChunkHash, ChunkProof, Proof};
// pub use prover_next::{
// BatchProof as NextBatchProof, BlockTrace as NextBlockTrace, ChunkHash as NextChunkHash,
// ChunkProof as NextChunkProof, Proof as NextProof,
// };
// pub fn chunk_proof_next_to_base(next: NextChunkProof) -> Result<ChunkProof> {
// let proof_bytes = serde_json::to_string(&next.proof)?;
// let proof: Proof = serde_json::from_str(&proof_bytes)?;
// let chunk_hash = next.chunk_hash.map(|hash| ChunkHash {
// chain_id: hash.chain_id,
// prev_state_root: hash.prev_state_root,
// post_state_root: hash.post_state_root,
// withdraw_root: hash.withdraw_root,
// data_hash: hash.data_hash,
// tx_bytes: hash.tx_bytes,
// is_padding: hash.is_padding,
// });
// Ok(ChunkProof {
// protocol: next.protocol,
// proof,
// chunk_hash,
// })
// }
// pub fn batch_proof_next_to_base(next: NextBatchProof) -> Result<BatchProof> {
// let proof_bytes = serde_json::to_string(&next)?;
// serde_json::from_str(&proof_bytes).map_err(|err| anyhow::anyhow!(err))
// }
// pub fn chunk_proof_base_to_next(base: &ChunkProof) -> Result<NextChunkProof> {
// let proof_bytes = serde_json::to_string(&base.proof)?;
// let proof: NextProof = serde_json::from_str(&proof_bytes)?;
// let chunk_hash = base.chunk_hash.clone().map(|hash| NextChunkHash {
// chain_id: hash.chain_id,
// prev_state_root: hash.prev_state_root,
// post_state_root: hash.post_state_root,
// withdraw_root: hash.withdraw_root,
// data_hash: hash.data_hash,
// tx_bytes: hash.tx_bytes,
// is_padding: hash.is_padding,
// });
// Ok(NextChunkProof {
// protocol: base.protocol.clone(),
// proof,
// chunk_hash,
// })
// }
// pub fn chunk_hash_base_to_next(base: ChunkHash) -> NextChunkHash {
// NextChunkHash {
// chain_id: base.chain_id,
// prev_state_root: base.prev_state_root,
// post_state_root: base.post_state_root,
// withdraw_root: base.withdraw_root,
// data_hash: base.data_hash,
// tx_bytes: base.tx_bytes,
// is_padding: base.is_padding,
// }
// }
// pub fn block_trace_base_to_next(base: BlockTrace) -> Result<NextBlockTrace> {
// let trace_bytes = serde_json::to_string(&base)?;
// serde_json::from_str(&trace_bytes).map_err(|err| anyhow::anyhow!(err))
// }