Refactor/zkvm 3 (#1684)

This commit is contained in:
Ho
2025-07-01 07:39:27 +09:00
committed by GitHub
parent 9dc57c6126
commit ea38ae7e96
60 changed files with 1495 additions and 592 deletions

View File

@@ -26,10 +26,12 @@ pub fn checkout_chunk_task(
}
/// Generate required staff for proving tasks
/// return (pi_hash, metadata, task)
pub fn gen_universal_task(
task_type: i32,
task_json: &str,
fork_name: &str,
expected_vk: &[u8],
interpreter: Option<impl ChunkInterpreter>,
) -> eyre::Result<(B256, String, String)> {
use proofs::*;
@@ -44,7 +46,7 @@ pub fn gen_universal_task(
Bundle(BundleProofMetadata),
}
let (pi_hash, metadata, u_task) = match task_type {
let (pi_hash, metadata, mut u_task) = match task_type {
x if x == TaskType::Chunk as i32 => {
let task = serde_json::from_str::<ChunkProvingTask>(task_json)?;
let (pi_hash, metadata, u_task) =
@@ -64,6 +66,8 @@ pub fn gen_universal_task(
_ => return Err(eyre::eyre!("unrecognized task type {task_type}")),
};
u_task.vk = Vec::from(expected_vk);
Ok((
pi_hash,
serde_json::to_string(&metadata)?,
@@ -106,7 +110,24 @@ pub fn verifier_init(config: &str) -> eyre::Result<()> {
pub fn verify_proof(proof: Vec<u8>, fork_name: &str, task_type: TaskType) -> eyre::Result<bool> {
let verifier = verifier::get_verifier(fork_name)?;
let ret = verifier.verify(task_type, proof)?;
let ret = verifier.lock().unwrap().verify(task_type, &proof)?;
if let Ok(debug_value) = std::env::var("ZKVM_DEBUG_PROOF") {
use std::time::{SystemTime, UNIX_EPOCH};
if !ret && debug_value.to_lowercase() == "true" {
// Dump req.input to a temporary file
let timestamp = SystemTime::now()
.duration_since(UNIX_EPOCH)
.unwrap_or_default()
.as_secs();
let filename = format!("/tmp/proof_{}.json", timestamp);
if let Err(e) = std::fs::write(&filename, &proof) {
eprintln!("Failed to write proof to file {}: {}", filename, e);
} else {
println!("Dumped failed proof to {}", filename);
}
}
}
Ok(ret)
}
@@ -115,7 +136,7 @@ pub fn verify_proof(proof: Vec<u8>, fork_name: &str, task_type: TaskType) -> eyr
pub fn dump_vk(fork_name: &str, file: &str) -> eyre::Result<()> {
let verifier = verifier::get_verifier(fork_name)?;
verifier.dump_vk(Path::new(file));
verifier.lock().unwrap().dump_vk(Path::new(file));
Ok(())
}

View File

@@ -179,7 +179,7 @@ impl<Metadata: ProofMetadata> WrappedProof<Metadata> {
/// Sanity checks on the wrapped proof:
///
/// - pi_hash computed in host does in fact match pi_hash computed in guest
pub fn sanity_check(&self, fork_name: ForkName) {
pub fn pi_hash_check(&self, fork_name: ForkName) -> bool {
let proof_pi = self.proof.public_values();
let expected_pi = self
@@ -192,10 +192,11 @@ impl<Metadata: ProofMetadata> WrappedProof<Metadata> {
.map(|&v| v as u32)
.collect::<Vec<_>>();
assert_eq!(
expected_pi, proof_pi,
"pi mismatch: expected={expected_pi:?}, found={proof_pi:?}"
);
let ret = expected_pi == proof_pi;
if !ret {
tracing::warn!("pi mismatch: expected={expected_pi:?}, found={proof_pi:?}");
}
ret
}
}

View File

@@ -9,13 +9,39 @@ pub use chunk::{ChunkProvingTask, ChunkTask};
pub use chunk_interpreter::ChunkInterpreter;
pub use scroll_zkvm_types::task::ProvingTask;
use crate::proofs::{BatchProofMetadata, BundleProofMetadata, ChunkProofMetadata};
use chunk_interpreter::{DummyInterpreter, TryFromWithInterpreter};
use crate::proofs::{self, BatchProofMetadata, BundleProofMetadata, ChunkProofMetadata};
use sbv_primitives::B256;
use scroll_zkvm_types::{
chunk::ChunkInfo,
public_inputs::{ForkName, MultiVersionPublicInputs},
};
use scroll_zkvm_types::public_inputs::{ForkName, MultiVersionPublicInputs};
fn check_aggregation_proofs<Metadata>(
proofs: &[proofs::WrappedProof<Metadata>],
fork_name: ForkName,
) -> eyre::Result<()>
where
Metadata: proofs::ProofMetadata,
{
use std::panic::{self, AssertUnwindSafe};
panic::catch_unwind(AssertUnwindSafe(|| {
for w in proofs.windows(2) {
w[1].metadata
.pi_hash_info()
.validate(w[0].metadata.pi_hash_info(), fork_name);
}
}))
.map_err(|e| {
let error_msg = if let Some(string) = e.downcast_ref::<String>() {
string.clone()
} else if let Some(str) = e.downcast_ref::<&str>() {
str.to_string()
} else {
"Unknown validation error occurred".to_string()
};
eyre::eyre!("Chunk data validation failed: {}", error_msg)
})?;
Ok(())
}
/// Generate required staff for chunk proving
pub fn gen_universal_chunk_task(
@@ -23,11 +49,10 @@ pub fn gen_universal_chunk_task(
fork_name: ForkName,
interpreter: Option<impl ChunkInterpreter>,
) -> eyre::Result<(B256, ChunkProofMetadata, ProvingTask)> {
let chunk_info = if let Some(interpreter) = interpreter {
ChunkInfo::try_from_with_interpret(&mut task, interpreter)
} else {
ChunkInfo::try_from_with_interpret(&mut task, DummyInterpreter {})
}?;
if let Some(interpreter) = interpreter {
task.prepare_task_via_interpret(interpreter)?;
}
let chunk_info = task.precheck_and_build_metadata()?;
let proving_task = task.try_into()?;
let expected_pi_hash = chunk_info.pi_hash_by_fork(fork_name);
Ok((

View File

@@ -5,7 +5,7 @@ use sbv_primitives::{B256, U256};
use scroll_zkvm_types::{
batch::{
BatchHeader, BatchHeaderV6, BatchHeaderV7, BatchInfo, BatchWitness, EnvelopeV6, EnvelopeV7,
PointEvalWitness, ReferenceHeader, N_BLOB_BYTES,
PointEvalWitness, ReferenceHeader, ToArchievedWitness, N_BLOB_BYTES,
},
public_inputs::ForkName,
task::ProvingTask,
@@ -104,7 +104,7 @@ impl BatchProvingTask {
fn build_guest_input(&self) -> BatchWitness {
let fork_name = self.fork_name.to_lowercase().as_str().into();
// calculate point eval needed and compare with task input
// sanity check: calculate point eval needed and compare with task input
let (kzg_commitment, kzg_proof, challenge_digest) = {
let blob = point_eval::to_blob(&self.blob_bytes);
let commitment = point_eval::blob_to_kzg_commitment(&blob);
@@ -120,12 +120,12 @@ impl BatchProvingTask {
EnvelopeV6::from(self.blob_bytes.as_slice()).challenge_digest(versioned_hash)
}
BatchHeaderV::V7(_) => {
assert_eq!(
fork_name,
ForkName::EuclidV2,
"hardfork mismatch for da-codec@v7 header: found={fork_name:?}, expected={:?}",
ForkName::EuclidV2,
);
match fork_name {
ForkName::EuclidV2 => (),
_ => unreachable!("hardfork mismatch for da-codec@v6 header: found={fork_name:?}, expected={:?}",
[ForkName::EuclidV2],
),
}
let padded_blob_bytes = {
let mut padded_blob_bytes = self.blob_bytes.to_vec();
padded_blob_bytes.resize(N_BLOB_BYTES, 0);
@@ -175,79 +175,19 @@ impl BatchProvingTask {
pub fn precheck_and_build_metadata(&self) -> Result<BatchInfo> {
let fork_name = ForkName::from(self.fork_name.as_str());
let (parent_state_root, state_root, chain_id, withdraw_root) = (
self.chunk_proofs
.first()
.expect("at least one chunk in batch")
.metadata
.chunk_info
.prev_state_root,
self.chunk_proofs
.last()
.expect("at least one chunk in batch")
.metadata
.chunk_info
.post_state_root,
self.chunk_proofs
.last()
.expect("at least one chunk in batch")
.metadata
.chunk_info
.chain_id,
self.chunk_proofs
.last()
.expect("at least one chunk in batch")
.metadata
.chunk_info
.withdraw_root,
);
let (parent_batch_hash, prev_msg_queue_hash, post_msg_queue_hash) = match self.batch_header
{
BatchHeaderV::V6(h) => {
assert_eq!(
fork_name,
ForkName::EuclidV1,
"hardfork mismatch for da-codec@v6 header: found={fork_name:?}, expected={:?}",
ForkName::EuclidV1,
);
(h.parent_batch_hash, Default::default(), Default::default())
}
BatchHeaderV::V7(h) => {
assert_eq!(
fork_name,
ForkName::EuclidV2,
"hardfork mismatch for da-codec@v7 header: found={fork_name:?}, expected={:?}",
ForkName::EuclidV2,
);
(
h.parent_batch_hash,
self.chunk_proofs
.first()
.expect("at least one chunk in batch")
.metadata
.chunk_info
.prev_msg_queue_hash,
self.chunk_proofs
.last()
.expect("at least one chunk in batch")
.metadata
.chunk_info
.post_msg_queue_hash,
)
}
};
// for every aggregation task, there are two steps needed to build the metadata:
// 1. generate data for metadata from the witness
// 2. validate every adjacent proof pair
let witness = self.build_guest_input();
let archieved = ToArchievedWitness::create(&witness)
.map_err(|e| eyre::eyre!("archieve batch witness fail: {e}"))?;
let archieved_witness = archieved
.access()
.map_err(|e| eyre::eyre!("access archieved batch witness fail: {e}"))?;
let metadata: BatchInfo = archieved_witness.into();
let batch_hash = self.batch_header.batch_hash();
super::check_aggregation_proofs(self.chunk_proofs.as_slice(), fork_name)?;
Ok(BatchInfo {
parent_state_root,
parent_batch_hash,
state_root,
batch_hash,
chain_id,
withdraw_root,
prev_msg_queue_hash,
post_msg_queue_hash,
})
Ok(metadata)
}
}

View File

@@ -1,7 +1,8 @@
use crate::proofs::BatchProof;
use eyre::Result;
use scroll_zkvm_types::{
bundle::{BundleInfo, BundleWitness},
bundle::{BundleInfo, BundleWitness, ToArchievedWitness},
public_inputs::ForkName,
task::ProvingTask,
utils::{to_rkyv_bytes, RancorError},
};
@@ -50,57 +51,21 @@ impl BundleProvingTask {
}
pub fn precheck_and_build_metadata(&self) -> Result<BundleInfo> {
use eyre::eyre;
let err_prefix = format!("metadata_with_prechecks for task_id={}", self.identifier());
let fork_name = ForkName::from(self.fork_name.as_str());
// for every aggregation task, there are two steps needed to build the metadata:
// 1. generate data for metadata from the witness
// 2. validate every adjacent proof pair
let witness = self.build_guest_input();
let archieved = ToArchievedWitness::create(&witness)
.map_err(|e| eyre::eyre!("archieve bundle witness fail: {e}"))?;
let archieved_witness = archieved
.access()
.map_err(|e| eyre::eyre!("access archieved bundle witness fail: {e}"))?;
let metadata: BundleInfo = archieved_witness.into();
for w in self.batch_proofs.windows(2) {
if w[1].metadata.batch_info.chain_id != w[0].metadata.batch_info.chain_id {
return Err(eyre!("{err_prefix}: chain_id mismatch"));
}
super::check_aggregation_proofs(self.batch_proofs.as_slice(), fork_name)?;
if w[1].metadata.batch_info.parent_state_root != w[0].metadata.batch_info.state_root {
return Err(eyre!("{err_prefix}: state_root not chained"));
}
if w[1].metadata.batch_info.parent_batch_hash != w[0].metadata.batch_info.batch_hash {
return Err(eyre!("{err_prefix}: batch_hash not chained"));
}
}
let (first_batch, last_batch) = (
&self
.batch_proofs
.first()
.expect("at least one batch in bundle")
.metadata
.batch_info,
&self
.batch_proofs
.last()
.expect("at least one batch in bundle")
.metadata
.batch_info,
);
let chain_id = first_batch.chain_id;
let num_batches = u32::try_from(self.batch_proofs.len()).expect("num_batches: u32");
let prev_state_root = first_batch.parent_state_root;
let prev_batch_hash = first_batch.parent_batch_hash;
let post_state_root = last_batch.state_root;
let batch_hash = last_batch.batch_hash;
let withdraw_root = last_batch.withdraw_root;
let msg_queue_hash = last_batch.post_msg_queue_hash;
Ok(BundleInfo {
chain_id,
msg_queue_hash,
num_batches,
prev_state_root,
prev_batch_hash,
post_state_root,
batch_hash,
withdraw_root,
})
Ok(metadata)
}
}

View File

@@ -2,7 +2,7 @@ use super::chunk_interpreter::*;
use eyre::Result;
use sbv_primitives::{types::BlockWitness, B256};
use scroll_zkvm_types::{
chunk::{execute, ChunkInfo, ChunkWitness},
chunk::{execute, ChunkInfo, ChunkWitness, ToArchievedWitness},
task::ProvingTask,
utils::{to_rkyv_bytes, RancorError},
};
@@ -129,23 +129,33 @@ impl ChunkProvingTask {
fn insert_state(&mut self, node: sbv_primitives::Bytes) {
self.block_witnesses[0].states.push(node);
}
}
const MAX_FETCH_NODES_ATTEMPTS: usize = 15;
pub fn precheck_and_build_metadata(&self) -> Result<ChunkInfo> {
let witness = self.build_guest_input();
let archieved = ToArchievedWitness::create(&witness)
.map_err(|e| eyre::eyre!("archieve chunk witness fail: {e}"))?;
let archieved_witness = archieved
.access()
.map_err(|e| eyre::eyre!("access archieved chunk witness fail: {e}"))?;
impl TryFromWithInterpreter<&mut ChunkProvingTask> for ChunkInfo {
fn try_from_with_interpret(
value: &mut ChunkProvingTask,
let ret = ChunkInfo::try_from(archieved_witness).map_err(|e| eyre::eyre!("{e}"))?;
Ok(ret)
}
/// this method check the validate of current task (there may be missing storage node)
/// and try fixing it until everything is ok
pub fn prepare_task_via_interpret(
&mut self,
interpreter: impl ChunkInterpreter,
) -> eyre::Result<Self> {
) -> eyre::Result<()> {
use eyre::eyre;
let err_prefix = format!(
"metadata_with_prechecks for task_id={:?}",
value.identifier()
self.identifier()
);
if value.block_witnesses.is_empty() {
if self.block_witnesses.is_empty() {
return Err(eyre!(
"{err_prefix}: chunk should contain at least one block",
));
@@ -156,8 +166,15 @@ impl TryFromWithInterpreter<&mut ChunkProvingTask> for ChunkInfo {
let err_parse_re = regex::Regex::new(pattern)?;
let mut attempts = 0;
loop {
match execute(&value.build_guest_input()) {
Ok(chunk_info) => return Ok(chunk_info),
let witness = self.build_guest_input();
let archieved = ToArchievedWitness::create(&witness)
.map_err(|e| eyre::eyre!("archieve chunk witness fail: {e}"))?;
let archieved_witness = archieved
.access()
.map_err(|e| eyre::eyre!("access archieved chunk witness fail: {e}"))?;
match execute(archieved_witness) {
Ok(_) => return Ok(()),
Err(e) => {
if let Some(caps) = err_parse_re.captures(&e) {
let hash = caps[2].to_string();
@@ -174,7 +191,7 @@ impl TryFromWithInterpreter<&mut ChunkProvingTask> for ChunkInfo {
hash.parse::<sbv_primitives::B256>().expect("should be hex");
let node = interpreter.try_fetch_storage_node(node_hash)?;
tracing::warn!("missing node fetched: {node}");
value.insert_state(node);
self.insert_state(node);
} else {
return Err(eyre!("{err_prefix}: {e}"));
}
@@ -183,3 +200,5 @@ impl TryFromWithInterpreter<&mut ChunkProvingTask> for ChunkInfo {
}
}
}
const MAX_FETCH_NODES_ATTEMPTS: usize = 15;

View File

@@ -4,7 +4,11 @@ mod euclidv2;
use euclidv2::EuclidV2Verifier;
use eyre::Result;
use serde::{Deserialize, Serialize};
use std::{cell::OnceCell, path::Path, rc::Rc};
use std::{
collections::HashMap,
path::Path,
sync::{Arc, Mutex, OnceLock},
};
#[derive(Debug, Clone, Copy, PartialEq)]
pub enum TaskType {
@@ -31,7 +35,7 @@ pub struct VKDump {
}
pub trait ProofVerifier {
fn verify(&self, task_type: TaskType, proof: Vec<u8>) -> Result<bool>;
fn verify(&self, task_type: TaskType, proof: &[u8]) -> Result<bool>;
fn dump_vk(&self, file: &Path);
}
@@ -43,36 +47,49 @@ pub struct CircuitConfig {
#[derive(Debug, Serialize, Deserialize)]
pub struct VerifierConfig {
pub high_version_circuit: CircuitConfig,
pub circuits: Vec<CircuitConfig>,
}
type HardForkName = String;
struct VerifierPair(HardForkName, Rc<Box<dyn ProofVerifier>>);
static mut VERIFIER_HIGH: OnceCell<VerifierPair> = OnceCell::new();
type VerifierType = Arc<Mutex<dyn ProofVerifier + Send>>;
static VERIFIERS: OnceLock<HashMap<HardForkName, VerifierType>> = OnceLock::new();
pub fn init(config: VerifierConfig) {
let verifier = EuclidV2Verifier::new(&config.high_version_circuit.assets_path);
unsafe {
VERIFIER_HIGH
.set(VerifierPair(
config.high_version_circuit.fork_name,
Rc::new(Box::new(verifier)),
))
.unwrap_unchecked();
let mut verifiers: HashMap<HardForkName, VerifierType> = Default::default();
for cfg in &config.circuits {
let canonical_fork_name = cfg.fork_name.to_lowercase();
let verifier = EuclidV2Verifier::new(&cfg.assets_path, canonical_fork_name.as_str().into());
let ret = verifiers.insert(canonical_fork_name, Arc::new(Mutex::new(verifier)));
assert!(
ret.is_none(),
"DO NOT init the same fork {} twice",
cfg.fork_name
);
tracing::info!("load verifier config for fork {}", cfg.fork_name);
}
let ret = VERIFIERS.set(verifiers).is_ok();
assert!(ret);
}
pub fn get_verifier(fork_name: &str) -> Result<Rc<Box<dyn ProofVerifier>>> {
unsafe {
if let Some(verifier) = VERIFIER_HIGH.get() {
if verifier.0 == fork_name {
return Ok(verifier.1.clone());
}
pub fn get_verifier(fork_name: &str) -> Result<Arc<Mutex<dyn ProofVerifier>>> {
if let Some(verifiers) = VERIFIERS.get() {
if let Some(verifier) = verifiers.get(fork_name) {
return Ok(verifier.clone());
}
Err(eyre::eyre!(
"failed to get verifier, key not found: {}, has {:?}",
fork_name,
verifiers.keys().collect::<Vec<_>>(),
))
} else {
Err(eyre::eyre!(
"failed to get verifier, not inited {}",
fork_name
))
}
Err(eyre::eyre!(
"failed to get verifier, key not found, {}",
fork_name
))
}

View File

@@ -1,4 +1,4 @@
use super::{ProofVerifier, TaskType, VKDump};
use super::{ProofVerifier, TaskType};
use eyre::Result;
@@ -6,61 +6,64 @@ use crate::{
proofs::{AsRootProof, BatchProof, BundleProof, ChunkProof, IntoEvmProof},
utils::panic_catch,
};
use scroll_zkvm_verifier_euclid::verifier::{BatchVerifier, BundleVerifierEuclidV2, ChunkVerifier};
use std::{fs::File, path::Path};
use scroll_zkvm_types::public_inputs::ForkName;
use scroll_zkvm_verifier_euclid::verifier::UniversalVerifier;
use std::path::Path;
pub struct EuclidV2Verifier {
chunk_verifier: ChunkVerifier,
batch_verifier: BatchVerifier,
bundle_verifier: BundleVerifierEuclidV2,
verifier: UniversalVerifier,
fork: ForkName,
}
impl EuclidV2Verifier {
pub fn new(assets_dir: &str) -> Self {
pub fn new(assets_dir: &str, fork: ForkName) -> Self {
let verifier_bin = Path::new(assets_dir).join("verifier.bin");
let config = Path::new(assets_dir).join("root-verifier-vm-config");
let exe = Path::new(assets_dir).join("root-verifier-committed-exe");
Self {
chunk_verifier: ChunkVerifier::setup(&config, &exe, &verifier_bin)
verifier: UniversalVerifier::setup(&config, &exe, &verifier_bin)
.expect("Setting up chunk verifier"),
batch_verifier: BatchVerifier::setup(&config, &exe, &verifier_bin)
.expect("Setting up batch verifier"),
bundle_verifier: BundleVerifierEuclidV2::setup(&config, &exe, &verifier_bin)
.expect("Setting up bundle verifier"),
fork,
}
}
}
impl ProofVerifier for EuclidV2Verifier {
fn verify(&self, task_type: super::TaskType, proof: Vec<u8>) -> Result<bool> {
fn verify(&self, task_type: super::TaskType, proof: &[u8]) -> Result<bool> {
panic_catch(|| match task_type {
TaskType::Chunk => {
let proof = serde_json::from_slice::<ChunkProof>(proof.as_slice()).unwrap();
self.chunk_verifier.verify_proof(proof.as_root_proof())
let proof = serde_json::from_slice::<ChunkProof>(proof).unwrap();
if !proof.pi_hash_check(self.fork) {
return false;
}
self.verifier
.verify_proof(proof.as_root_proof(), &proof.vk)
.unwrap()
}
TaskType::Batch => {
let proof = serde_json::from_slice::<BatchProof>(proof.as_slice()).unwrap();
self.batch_verifier.verify_proof(proof.as_root_proof())
let proof = serde_json::from_slice::<BatchProof>(proof).unwrap();
if !proof.pi_hash_check(self.fork) {
return false;
}
self.verifier
.verify_proof(proof.as_root_proof(), &proof.vk)
.unwrap()
}
TaskType::Bundle => {
let proof = serde_json::from_slice::<BundleProof>(proof.as_slice()).unwrap();
self.bundle_verifier
.verify_proof_evm(&proof.into_evm_proof())
let proof = serde_json::from_slice::<BundleProof>(proof).unwrap();
if !proof.pi_hash_check(self.fork) {
return false;
}
let vk = proof.vk.clone();
let evm_proof = proof.into_evm_proof();
self.verifier.verify_proof_evm(&evm_proof, &vk).unwrap()
}
})
.map_err(|err_str: String| eyre::eyre!("{err_str}"))
}
fn dump_vk(&self, file: &Path) {
use base64::{prelude::BASE64_STANDARD, Engine};
let f = File::create(file).expect("Failed to open file to dump VK");
let dump = VKDump {
chunk_vk: BASE64_STANDARD.encode(self.chunk_verifier.get_app_vk()),
batch_vk: BASE64_STANDARD.encode(self.batch_verifier.get_app_vk()),
bundle_vk: BASE64_STANDARD.encode(self.bundle_verifier.get_app_vk()),
};
serde_json::to_writer(f, &dump).expect("Failed to dump VK");
fn dump_vk(&self, _file: &Path) {
panic!("dump vk has been deprecated");
}
}