feat: the CLOAK privacy solution (#1737)

Co-authored-by: Ho <fan@scroll.io>
Co-authored-by: Rohit Narurkar <rohit.narurkar@proton.me>
Co-authored-by: Péter Garamvölgyi <peter@scroll.io>
This commit is contained in:
Zhang Zhuo
2025-11-14 22:00:37 +08:00
committed by GitHub
parent 1985e54ab3
commit 6bee33036f
58 changed files with 1687 additions and 413 deletions

View File

@@ -5,7 +5,7 @@ use alloy::{
};
use eyre::Result;
use libzkp::tasks::ChunkInterpreter;
use sbv_primitives::types::Network;
use sbv_primitives::types::{consensus::TxL1Message, Network};
use serde::{Deserialize, Serialize};
fn default_max_retry() -> u32 {
@@ -168,6 +168,40 @@ impl<T: Provider<Network>> ChunkInterpreter for RpcClient<'_, T> {
self.handle
.block_on(fetch_storage_node_async(&self.provider, node_hash))
}
fn try_fetch_l1_msgs(&self, block_number: u64) -> Result<Vec<TxL1Message>> {
async fn fetch_l1_msgs(
provider: impl Provider<Network>,
block_number: u64,
) -> Result<Vec<TxL1Message>> {
let block_number_hex = format!("0x{:x}", block_number);
#[derive(Deserialize, Debug)]
#[serde(untagged)]
enum NullOrVec {
Null, // matches JSON `null`
Vec(Vec<TxL1Message>), // matches JSON array
}
Ok(
match provider
.client()
.request::<_, NullOrVec>(
"scroll_getL1MessagesInBlock",
(block_number_hex, "synced"),
)
.await?
{
NullOrVec::Null => Vec::new(),
NullOrVec::Vec(r) => r,
},
)
}
tracing::debug!("fetch L1 msgs for {block_number}");
self.handle
.block_on(fetch_l1_msgs(&self.provider, block_number))
}
}
#[cfg(test)]
@@ -218,4 +252,16 @@ mod tests {
println!("{}", serde_json::to_string_pretty(&wit2).unwrap());
}
#[test]
#[ignore = "Requires L2GETH_ENDPOINT environment variable"]
fn test_try_fetch_l1_messages() {
let config = create_config_from_env();
let client_core = RpcClientCore::create(&config).expect("Failed to create RPC client");
let client = client_core.get_client();
let msgs = client.try_fetch_l1_msgs(32).expect("should success");
println!("{}", serde_json::to_string_pretty(&msgs).unwrap());
}
}

View File

@@ -5,7 +5,7 @@ edition.workspace = true
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
scroll-zkvm-types.workspace = true
scroll-zkvm-types = { workspace = true, features = ["scroll"] }
scroll-zkvm-verifier.workspace = true
alloy-primitives.workspace = true #depress the effect of "native-keccak"

View File

@@ -13,6 +13,7 @@ use tasks::chunk_interpreter::{ChunkInterpreter, TryFromWithInterpreter};
/// global features: use legacy encoding for witness
static mut LEGACY_WITNESS_ENCODING: bool = false;
pub(crate) fn witness_use_legacy_mode() -> bool {
unsafe { LEGACY_WITNESS_ENCODING }
}
@@ -36,14 +37,13 @@ pub fn set_dynamic_feature(feats: &str) {
/// task (with full witnesses)
pub fn checkout_chunk_task(
task_json: &str,
decryption_key: Option<&[u8]>,
interpreter: impl ChunkInterpreter,
) -> eyre::Result<String> {
let chunk_task = serde_json::from_str::<tasks::ChunkTask>(task_json)?;
let ret = serde_json::to_string(&tasks::ChunkProvingTask::try_from_with_interpret(
chunk_task,
interpreter,
)?)?;
Ok(ret)
Ok(serde_json::to_string(
&tasks::ChunkProvingTask::try_from_with_interpret(chunk_task, decryption_key, interpreter)?,
)?)
}
/// Convert the universal task json into compatible form for old prover

View File

@@ -8,9 +8,10 @@ use scroll_zkvm_types::{
bundle::BundleInfo,
chunk::ChunkInfo,
proof::{EvmProof, OpenVmEvmProof, ProofEnum, StarkProof},
public_inputs::{ForkName, MultiVersionPublicInputs},
public_inputs::MultiVersionPublicInputs,
types_agg::AggregationInput,
utils::{serialize_vk, vec_as_base64},
version,
};
use serde::{de::DeserializeOwned, Deserialize, Serialize};
@@ -181,13 +182,13 @@ impl<Metadata: ProofMetadata> WrappedProof<Metadata> {
/// Sanity checks on the wrapped proof:
///
/// - pi_hash computed in host does in fact match pi_hash computed in guest
pub fn pi_hash_check(&self, fork_name: ForkName) -> bool {
pub fn pi_hash_check(&self, ver: version::Version) -> bool {
let proof_pi = self.proof.public_values();
let expected_pi = self
.metadata
.pi_hash_info()
.pi_hash_by_fork(fork_name)
.pi_hash_by_version(ver)
.0
.as_ref()
.iter()
@@ -252,6 +253,7 @@ mod tests {
batch_hash: B256::repeat_byte(4),
withdraw_root: B256::repeat_byte(5),
msg_queue_hash: B256::repeat_byte(6),
encryption_key: None,
};
let bundle_pi_hash = bundle_info.pi_hash(ForkName::EuclidV1);
BundleProofMetadata {

View File

@@ -10,32 +10,27 @@ pub use chunk_interpreter::ChunkInterpreter;
pub use scroll_zkvm_types::task::ProvingTask;
use crate::{
proofs::{self, BatchProofMetadata, BundleProofMetadata, ChunkProofMetadata},
proofs::{BatchProofMetadata, BundleProofMetadata, ChunkProofMetadata},
utils::panic_catch,
};
use sbv_primitives::B256;
use scroll_zkvm_types::public_inputs::{ForkName, MultiVersionPublicInputs};
use scroll_zkvm_types::public_inputs::{ForkName, MultiVersionPublicInputs, Version};
fn encode_task_to_witness<T: serde::Serialize>(task: &T) -> eyre::Result<Vec<u8>> {
let config = bincode::config::standard();
Ok(bincode::serde::encode_to_vec(task, config)?)
}
fn check_aggregation_proofs<Metadata>(
proofs: &[proofs::WrappedProof<Metadata>],
fork_name: ForkName,
) -> eyre::Result<()>
where
Metadata: proofs::ProofMetadata,
{
fn check_aggregation_proofs<Metadata: MultiVersionPublicInputs>(
metadata: &[Metadata],
version: Version,
) -> eyre::Result<()> {
panic_catch(|| {
for w in proofs.windows(2) {
w[1].metadata
.pi_hash_info()
.validate(w[0].metadata.pi_hash_info(), fork_name);
for w in metadata.windows(2) {
w[1].validate(&w[0], version);
}
})
.map_err(|e| eyre::eyre!("Chunk data validation failed: {}", e))?;
.map_err(|e| eyre::eyre!("Metadata validation failed: {}", e))?;
Ok(())
}

View File

@@ -1,21 +1,31 @@
use crate::proofs::ChunkProof;
use c_kzg::Bytes48;
use eyre::Result;
use sbv_primitives::{B256, U256};
use scroll_zkvm_types::{
batch::{
build_point_eval_witness, BatchHeader, BatchHeaderV6, BatchHeaderV7, BatchHeaderV8,
BatchInfo, BatchWitness, Envelope, EnvelopeV6, EnvelopeV7, EnvelopeV8, LegacyBatchWitness,
ReferenceHeader, N_BLOB_BYTES,
BatchHeaderValidium, BatchInfo, BatchWitness, Envelope, EnvelopeV6, EnvelopeV7, EnvelopeV8,
LegacyBatchWitness, ReferenceHeader, N_BLOB_BYTES,
},
public_inputs::ForkName,
chunk::ChunkInfo,
public_inputs::{ForkName, Version},
task::ProvingTask,
utils::{to_rkyv_bytes, RancorError},
version::{Domain, STFVersion},
};
use crate::proofs::ChunkProof;
mod utils;
use utils::{base64, point_eval};
#[derive(Clone, serde::Deserialize, serde::Serialize)]
pub struct BatchHeaderValidiumWithHash {
#[serde(flatten)]
header: BatchHeaderValidium,
batch_hash: B256,
}
/// Define variable batch header type, since BatchHeaderV6 can not
/// be decoded as V7 we can always has correct deserialization
/// Notice: V6 header MUST be put above V7 since untagged enum
@@ -23,36 +33,55 @@ use utils::{base64, point_eval};
#[derive(Clone, serde::Deserialize, serde::Serialize)]
#[serde(untagged)]
pub enum BatchHeaderV {
Validium(BatchHeaderValidiumWithHash),
V6(BatchHeaderV6),
V7_8(BatchHeaderV7),
}
impl core::fmt::Display for BatchHeaderV {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
match self {
BatchHeaderV::V6(_) => write!(f, "V6"),
BatchHeaderV::V7_8(_) => write!(f, "V7_8"),
BatchHeaderV::Validium(_) => write!(f, "Validium"),
}
}
}
impl BatchHeaderV {
pub fn batch_hash(&self) -> B256 {
match self {
BatchHeaderV::V6(h) => h.batch_hash(),
BatchHeaderV::V7_8(h) => h.batch_hash(),
BatchHeaderV::Validium(h) => h.header.batch_hash(),
}
}
pub fn must_v6_header(&self) -> &BatchHeaderV6 {
match self {
BatchHeaderV::V6(h) => h,
_ => panic!("try to pick other header type"),
_ => unreachable!("A header of {} is considered to be v6", self),
}
}
pub fn must_v7_header(&self) -> &BatchHeaderV7 {
match self {
BatchHeaderV::V7_8(h) => h,
_ => panic!("try to pick other header type"),
_ => unreachable!("A header of {} is considered to be v7", self),
}
}
pub fn must_v8_header(&self) -> &BatchHeaderV8 {
match self {
BatchHeaderV::V7_8(h) => h,
_ => panic!("try to pick other header type"),
_ => unreachable!("A header of {} is considered to be v8", self),
}
}
pub fn must_validium_header(&self) -> &BatchHeaderValidium {
match self {
BatchHeaderV::Validium(h) => &h.header,
_ => unreachable!("A header of {} is considered to be validium", self),
}
}
}
@@ -61,6 +90,8 @@ impl BatchHeaderV {
/// is compatible with both pre-euclidv2 and euclidv2
#[derive(Clone, serde::Deserialize, serde::Serialize)]
pub struct BatchProvingTask {
/// The version of the chunks in the batch, as per [`Version`].
pub version: u8,
/// Chunk proofs for the contiguous list of chunks within the batch.
pub chunk_proofs: Vec<ChunkProof>,
/// The [`BatchHeaderV6/V7`], as computed on-chain for this batch.
@@ -107,85 +138,135 @@ impl TryFrom<BatchProvingTask> for ProvingTask {
impl BatchProvingTask {
fn build_guest_input(&self) -> BatchWitness {
let fork_name = self.fork_name.to_lowercase().as_str().into();
let version = Version::from(self.version);
// sanity check: calculate point eval needed and compare with task input
let (kzg_commitment, kzg_proof, challenge_digest) = {
let blob = point_eval::to_blob(&self.blob_bytes);
let commitment = point_eval::blob_to_kzg_commitment(&blob);
let versioned_hash = point_eval::get_versioned_hash(&commitment);
let challenge_digest = match &self.batch_header {
BatchHeaderV::V6(_) => {
assert_eq!(
fork_name,
ForkName::EuclidV1,
"hardfork mismatch for da-codec@v6 header: found={fork_name:?}, expected={:?}",
ForkName::EuclidV1,
);
EnvelopeV6::from_slice(self.blob_bytes.as_slice())
.challenge_digest(versioned_hash)
}
BatchHeaderV::V7_8(_) => {
let padded_blob_bytes = {
let mut padded_blob_bytes = self.blob_bytes.to_vec();
padded_blob_bytes.resize(N_BLOB_BYTES, 0);
padded_blob_bytes
};
match fork_name {
ForkName::EuclidV2 => {
<EnvelopeV7 as Envelope>::from_slice(padded_blob_bytes.as_slice())
.challenge_digest(versioned_hash)
}
ForkName::Feynman => {
<EnvelopeV8 as Envelope>::from_slice(padded_blob_bytes.as_slice())
.challenge_digest(versioned_hash)
}
f => unreachable!(
"hardfork mismatch for da-codec@v7 header: found={}, expected={:?}",
f,
[ForkName::EuclidV2, ForkName::Feynman],
),
let point_eval_witness = if !version.is_validium() {
// sanity check: calculate point eval needed and compare with task input
let (kzg_commitment, kzg_proof, challenge_digest) = {
let blob = point_eval::to_blob(&self.blob_bytes);
let commitment = point_eval::blob_to_kzg_commitment(&blob);
let versioned_hash = point_eval::get_versioned_hash(&commitment);
let challenge_digest = match &self.batch_header {
BatchHeaderV::V6(_) => {
assert_eq!(
version.fork,
ForkName::EuclidV1,
"hardfork mismatch for da-codec@v6 header: found={:?}, expected={:?}",
version.fork,
ForkName::EuclidV1,
);
EnvelopeV6::from_slice(self.blob_bytes.as_slice())
.challenge_digest(versioned_hash)
}
}
BatchHeaderV::V7_8(_) => {
let padded_blob_bytes = {
let mut padded_blob_bytes = self.blob_bytes.to_vec();
padded_blob_bytes.resize(N_BLOB_BYTES, 0);
padded_blob_bytes
};
match version.fork {
ForkName::EuclidV2 => {
<EnvelopeV7 as Envelope>::from_slice(padded_blob_bytes.as_slice())
.challenge_digest(versioned_hash)
}
ForkName::Feynman => {
<EnvelopeV8 as Envelope>::from_slice(padded_blob_bytes.as_slice())
.challenge_digest(versioned_hash)
}
fork_name => unreachable!(
"hardfork mismatch for da-codec@v7 header: found={}, expected={:?}",
fork_name,
[ForkName::EuclidV2, ForkName::Feynman],
),
}
}
BatchHeaderV::Validium(_) => unreachable!("version!=validium"),
};
let (proof, _) = point_eval::get_kzg_proof(&blob, challenge_digest);
(commitment.to_bytes(), proof.to_bytes(), challenge_digest)
};
let (proof, _) = point_eval::get_kzg_proof(&blob, challenge_digest);
if let Some(k) = self.kzg_commitment {
assert_eq!(k, kzg_commitment);
}
(commitment.to_bytes(), proof.to_bytes(), challenge_digest)
if let Some(c) = self.challenge_digest {
assert_eq!(c, U256::from_be_bytes(challenge_digest.0));
}
if let Some(p) = self.kzg_proof {
assert_eq!(p, kzg_proof);
}
Some(build_point_eval_witness(
kzg_commitment.into_inner(),
kzg_proof.into_inner(),
))
} else {
assert!(self.kzg_proof.is_none(), "domain=validium has no blob-da");
assert!(
self.kzg_commitment.is_none(),
"domain=validium has no blob-da"
);
assert!(
self.challenge_digest.is_none(),
"domain=validium has no blob-da"
);
match &self.batch_header {
BatchHeaderV::Validium(h) => assert_eq!(
h.header.batch_hash(),
h.batch_hash,
"calculated batch hash match which from coordinator"
),
_ => panic!("unexpected header type"),
}
None
};
if let Some(k) = self.kzg_commitment {
assert_eq!(k, kzg_commitment);
}
if let Some(c) = self.challenge_digest {
assert_eq!(c, U256::from_be_bytes(challenge_digest.0));
}
if let Some(p) = self.kzg_proof {
assert_eq!(p, kzg_proof);
}
let point_eval_witness = Some(build_point_eval_witness(
kzg_commitment.into_inner(),
kzg_proof.into_inner(),
));
let reference_header = match fork_name {
ForkName::EuclidV1 => ReferenceHeader::V6(*self.batch_header.must_v6_header()),
ForkName::EuclidV2 => ReferenceHeader::V7(*self.batch_header.must_v7_header()),
ForkName::Feynman => ReferenceHeader::V8(*self.batch_header.must_v8_header()),
let reference_header = match (version.domain, version.stf_version) {
(Domain::Scroll, STFVersion::V6) => {
ReferenceHeader::V6(*self.batch_header.must_v6_header())
}
(Domain::Scroll, STFVersion::V7) => {
ReferenceHeader::V7(*self.batch_header.must_v7_header())
}
(Domain::Scroll, STFVersion::V8) => {
ReferenceHeader::V8(*self.batch_header.must_v8_header())
}
(Domain::Validium, STFVersion::V1) => {
ReferenceHeader::Validium(*self.batch_header.must_validium_header())
}
(domain, stf_version) => {
unreachable!("unsupported domain={domain:?},stf-version={stf_version:?}")
}
};
// patch: ensure block_hash field is ZERO for scroll domain
let chunk_infos = self
.chunk_proofs
.iter()
.map(|p| {
if version.domain == Domain::Scroll {
ChunkInfo {
prev_blockhash: B256::ZERO,
post_blockhash: B256::ZERO,
..p.metadata.chunk_info.clone()
}
} else {
p.metadata.chunk_info.clone()
}
})
.collect();
BatchWitness {
fork_name,
version: version.as_version_byte(),
fork_name: version.fork,
chunk_proofs: self.chunk_proofs.iter().map(|proof| proof.into()).collect(),
chunk_infos: self
.chunk_proofs
.iter()
.map(|p| p.metadata.chunk_info.clone())
.collect(),
chunk_infos,
blob_bytes: self.blob_bytes.clone(),
reference_header,
point_eval_witness,
@@ -193,15 +274,81 @@ impl BatchProvingTask {
}
pub fn precheck_and_build_metadata(&self) -> Result<BatchInfo> {
let fork_name = ForkName::from(self.fork_name.as_str());
// for every aggregation task, there are two steps needed to build the metadata:
// 1. generate data for metadata from the witness
// 2. validate every adjacent proof pair
let witness = self.build_guest_input();
let metadata = BatchInfo::from(&witness);
super::check_aggregation_proofs(self.chunk_proofs.as_slice(), fork_name)?;
super::check_aggregation_proofs(
witness.chunk_infos.as_slice(),
Version::from(self.version),
)?;
Ok(metadata)
}
}
#[test]
fn test_deserde_batch_header_v_validium() {
use std::str::FromStr;
// Top-level JSON: flattened enum tag "V1" + batch_hash
let json = r#"{
"V1": {
"version": 1,
"batch_index": 42,
"parent_batch_hash": "0x1111111111111111111111111111111111111111111111111111111111111111",
"post_state_root": "0x2222222222222222222222222222222222222222222222222222222222222222",
"withdraw_root": "0x3333333333333333333333333333333333333333333333333333333333333333",
"commitment": "0x4444444444444444444444444444444444444444444444444444444444444444"
},
"batch_hash": "0x5555555555555555555555555555555555555555555555555555555555555555"
}"#;
let parsed: BatchHeaderV = serde_json::from_str(json).expect("deserialize BatchHeaderV");
match parsed {
BatchHeaderV::Validium(v) => {
// Check the batch_hash field
let expected_batch_hash = B256::from_str(
"0x5555555555555555555555555555555555555555555555555555555555555555",
)
.unwrap();
assert_eq!(v.batch_hash, expected_batch_hash);
// Check the inner header variant and fields
match v.header {
BatchHeaderValidium::V1(h) => {
assert_eq!(h.version, 1);
assert_eq!(h.batch_index, 42);
let p = B256::from_str(
"0x1111111111111111111111111111111111111111111111111111111111111111",
)
.unwrap();
let s = B256::from_str(
"0x2222222222222222222222222222222222222222222222222222222222222222",
)
.unwrap();
let w = B256::from_str(
"0x3333333333333333333333333333333333333333333333333333333333333333",
)
.unwrap();
let c = B256::from_str(
"0x4444444444444444444444444444444444444444444444444444444444444444",
)
.unwrap();
assert_eq!(h.parent_batch_hash, p);
assert_eq!(h.post_state_root, s);
assert_eq!(h.withdraw_root, w);
assert_eq!(h.commitment, c);
// Sanity: computed batch hash equals the provided one (if method available)
// assert_eq!(v.header.batch_hash(), expected_batch_hash);
}
}
}
_ => panic!("expected validium header variant"),
}
}

View File

@@ -1,17 +1,21 @@
use crate::proofs::BatchProof;
use eyre::Result;
use scroll_zkvm_types::{
bundle::{BundleInfo, BundleWitness},
public_inputs::ForkName,
bundle::{BundleInfo, BundleWitness, LegacyBundleWitness},
public_inputs::Version,
task::ProvingTask,
utils::{to_rkyv_bytes, RancorError},
};
use crate::proofs::BatchProof;
/// Message indicating a sanity check failure.
const BUNDLE_SANITY_MSG: &str = "bundle must have at least one batch";
#[derive(Clone, serde::Deserialize, serde::Serialize)]
pub struct BundleProvingTask {
/// The version of batches in the bundle.
pub version: u8,
/// The STARK proofs of each batch in the bundle.
pub batch_proofs: Vec<BatchProof>,
/// for sanity check
pub bundle_info: Option<BundleInfo>,
@@ -40,26 +44,29 @@ impl BundleProvingTask {
}
fn build_guest_input(&self) -> BundleWitness {
let version = Version::from(self.version);
BundleWitness {
version: version.as_version_byte(),
batch_proofs: self.batch_proofs.iter().map(|proof| proof.into()).collect(),
batch_infos: self
.batch_proofs
.iter()
.map(|wrapped_proof| wrapped_proof.metadata.batch_info.clone())
.collect(),
fork_name: self.fork_name.to_lowercase().as_str().into(),
fork_name: version.fork,
}
}
pub fn precheck_and_build_metadata(&self) -> Result<BundleInfo> {
let fork_name = ForkName::from(self.fork_name.as_str());
// for every aggregation task, there are two steps needed to build the metadata:
// 1. generate data for metadata from the witness
// 2. validate every adjacent proof pair
let witness = self.build_guest_input();
let metadata = BundleInfo::from(&witness);
super::check_aggregation_proofs(self.batch_proofs.as_slice(), fork_name)?;
super::check_aggregation_proofs(
witness.batch_infos.as_slice(),
Version::from(self.version),
)?;
Ok(metadata)
}
@@ -71,7 +78,8 @@ impl TryFrom<BundleProvingTask> for ProvingTask {
fn try_from(value: BundleProvingTask) -> Result<Self> {
let witness = value.build_guest_input();
let serialized_witness = if crate::witness_use_legacy_mode() {
to_rkyv_bytes::<RancorError>(&witness)?.into_vec()
let legacy = LegacyBundleWitness::from(witness);
to_rkyv_bytes::<RancorError>(&legacy)?.into_vec()
} else {
super::encode_task_to_witness(&witness)?
};

View File

@@ -1,20 +1,26 @@
use super::chunk_interpreter::*;
use eyre::Result;
use sbv_core::BlockWitness;
use sbv_primitives::B256;
use sbv_primitives::{types::consensus::BlockHeader, B256};
use scroll_zkvm_types::{
chunk::{execute, ChunkInfo, ChunkWitness, LegacyChunkWitness},
chunk::{execute, ChunkInfo, ChunkWitness, LegacyChunkWitness, ValidiumInputs},
task::ProvingTask,
utils::{to_rkyv_bytes, RancorError},
version::Version,
};
use super::chunk_interpreter::*;
/// The type aligned with coordinator's defination
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
pub struct ChunkTask {
/// The version for the chunk, as per [`Version`].
pub version: u8,
/// block hashes for a series of block
pub block_hashes: Vec<B256>,
/// The on-chain L1 msg queue hash before applying L1 msg txs from the chunk.
pub prev_msg_queue_hash: B256,
/// The on-chain L1 msg queue hash after applying L1 msg txs from the chunk (for validate)
pub post_msg_queue_hash: B256,
/// Fork name specify
pub fork_name: String,
}
@@ -22,6 +28,7 @@ pub struct ChunkTask {
impl TryFromWithInterpreter<ChunkTask> for ChunkProvingTask {
fn try_from_with_interpret(
value: ChunkTask,
decryption_key: Option<&[u8]>,
interpreter: impl ChunkInterpreter,
) -> Result<Self> {
let mut block_witnesses = Vec::new();
@@ -31,10 +38,28 @@ impl TryFromWithInterpreter<ChunkTask> for ChunkProvingTask {
block_witnesses.push(witness);
}
let validium_txs = if Version::from(value.version).is_validium() {
let mut validium_txs = Vec::new();
for block_number in block_witnesses.iter().map(|w| w.header.number()) {
validium_txs.push(interpreter.try_fetch_l1_msgs(block_number)?);
}
validium_txs
} else {
vec![]
};
let validium_inputs = decryption_key.map(|secret_key| ValidiumInputs {
validium_txs,
secret_key: secret_key.into(),
});
Ok(Self {
version: value.version,
block_witnesses,
prev_msg_queue_hash: value.prev_msg_queue_hash,
post_msg_queue_hash: value.post_msg_queue_hash,
fork_name: value.fork_name,
validium_inputs,
})
}
}
@@ -48,12 +73,18 @@ const CHUNK_SANITY_MSG: &str = "chunk must have at least one block";
/// - {first_block_number}-{last_block_number}
#[derive(Clone, Debug, serde::Deserialize, serde::Serialize)]
pub struct ChunkProvingTask {
/// The version for the chunk, as per [Version][scroll_zkvm_types::version::Version].
pub version: u8,
/// Witnesses for every block in the chunk.
pub block_witnesses: Vec<BlockWitness>,
/// The on-chain L1 msg queue hash before applying L1 msg txs from the chunk.
pub prev_msg_queue_hash: B256,
/// The on-chain L1 msg queue hash after applying L1 msg txs from the chunk (for validate)
pub post_msg_queue_hash: B256,
/// Fork name specify
pub fork_name: String,
/// Optional inputs in case of domain=validium.
pub validium_inputs: Option<ValidiumInputs>,
}
#[derive(Clone, Debug)]
@@ -126,11 +157,25 @@ impl ChunkProvingTask {
}
fn build_guest_input(&self) -> ChunkWitness {
ChunkWitness::new(
&self.block_witnesses,
self.prev_msg_queue_hash,
self.fork_name.to_lowercase().as_str().into(),
)
let version = Version::from(self.version);
if version.is_validium() {
assert!(self.validium_inputs.is_some());
ChunkWitness::new(
version.as_version_byte(),
&self.block_witnesses,
self.prev_msg_queue_hash,
version.fork,
self.validium_inputs.clone(),
)
} else {
ChunkWitness::new_scroll(
version.as_version_byte(),
&self.block_witnesses,
self.prev_msg_queue_hash,
version.fork,
)
}
}
fn insert_state(&mut self, node: sbv_primitives::Bytes) {
@@ -139,8 +184,8 @@ impl ChunkProvingTask {
pub fn precheck_and_build_metadata(&self) -> Result<ChunkInfo> {
let witness = self.build_guest_input();
let ret = ChunkInfo::try_from(witness).map_err(|e| eyre::eyre!("{e}"))?;
assert_eq!(ret.post_msg_queue_hash, self.post_msg_queue_hash);
Ok(ret)
}

View File

@@ -1,6 +1,6 @@
use eyre::Result;
use sbv_core::BlockWitness;
use sbv_primitives::{Bytes, B256};
use sbv_primitives::{types::consensus::TxL1Message, Bytes, B256};
/// An interpreter which is cirtical in translating chunk data
/// since we need to grep block witness and storage node data
@@ -13,13 +13,22 @@ pub trait ChunkInterpreter {
) -> Result<BlockWitness> {
Err(eyre::eyre!("no implement"))
}
fn try_fetch_storage_node(&self, _node_hash: B256) -> Result<Bytes> {
Err(eyre::eyre!("no implement"))
}
fn try_fetch_l1_msgs(&self, _block_number: u64) -> Result<Vec<TxL1Message>> {
Err(eyre::eyre!("no implement"))
}
}
pub trait TryFromWithInterpreter<T>: Sized {
fn try_from_with_interpret(value: T, intepreter: impl ChunkInterpreter) -> Result<Self>;
fn try_from_with_interpret(
value: T,
decryption_key: Option<&[u8]>,
intepreter: impl ChunkInterpreter,
) -> Result<Self>;
}
pub struct DummyInterpreter {}

View File

@@ -41,6 +41,7 @@ pub trait ProofVerifier {
#[derive(Debug, Serialize, Deserialize)]
pub struct CircuitConfig {
pub version: u8,
pub fork_name: String,
pub assets_path: String,
}
@@ -61,14 +62,18 @@ pub fn init(config: VerifierConfig) {
for cfg in &config.circuits {
let canonical_fork_name = cfg.fork_name.to_lowercase();
let verifier = Verifier::new(&cfg.assets_path, canonical_fork_name.as_str().into());
let verifier = Verifier::new(&cfg.assets_path, cfg.version);
let ret = verifiers.insert(canonical_fork_name, Arc::new(Mutex::new(verifier)));
assert!(
ret.is_none(),
"DO NOT init the same fork {} twice",
cfg.fork_name
);
tracing::info!("load verifier config for fork {}", cfg.fork_name);
tracing::info!(
"load verifier config for fork {} (ver {})",
cfg.fork_name,
cfg.version
);
}
let ret = VERIFIERS.set(verifiers).is_ok();

View File

@@ -6,22 +6,22 @@ use crate::{
proofs::{AsRootProof, BatchProof, BundleProof, ChunkProof, IntoEvmProof},
utils::panic_catch,
};
use scroll_zkvm_types::public_inputs::ForkName;
use scroll_zkvm_types::version::Version;
use scroll_zkvm_verifier::verifier::UniversalVerifier;
use std::path::Path;
pub struct Verifier {
verifier: UniversalVerifier,
fork: ForkName,
version: Version,
}
impl Verifier {
pub fn new(assets_dir: &str, fork: ForkName) -> Self {
pub fn new(assets_dir: &str, ver_n: u8) -> Self {
let verifier_bin = Path::new(assets_dir);
Self {
verifier: UniversalVerifier::setup(verifier_bin).expect("Setting up chunk verifier"),
fork,
version: Version::from(ver_n),
}
}
}
@@ -31,21 +31,21 @@ impl ProofVerifier for Verifier {
panic_catch(|| match task_type {
TaskType::Chunk => {
let proof = serde_json::from_slice::<ChunkProof>(proof).unwrap();
assert!(proof.pi_hash_check(self.fork));
assert!(proof.pi_hash_check(self.version));
self.verifier
.verify_stark_proof(proof.as_root_proof(), &proof.vk)
.unwrap()
}
TaskType::Batch => {
let proof = serde_json::from_slice::<BatchProof>(proof).unwrap();
assert!(proof.pi_hash_check(self.fork));
assert!(proof.pi_hash_check(self.version));
self.verifier
.verify_stark_proof(proof.as_root_proof(), &proof.vk)
.unwrap()
}
TaskType::Bundle => {
let proof = serde_json::from_slice::<BundleProof>(proof).unwrap();
assert!(proof.pi_hash_check(self.fork));
assert!(proof.pi_hash_check(self.version));
let vk = proof.vk.clone();
let evm_proof = proof.into_evm_proof();
self.verifier.verify_evm_proof(&evm_proof, &vk).unwrap()

View File

@@ -152,11 +152,28 @@ pub unsafe extern "C" fn gen_universal_task(
fork_name: *const c_char,
expected_vk: *const u8,
expected_vk_len: usize,
decryption_key: *const u8,
decryption_key_len: usize,
) -> HandlingResult {
let task_json = if task_type == TaskType::Chunk as i32 {
let pre_task_str = c_char_to_str(task);
let cli = l2geth::get_client();
match libzkp::checkout_chunk_task(pre_task_str, cli) {
let decryption_key = if decryption_key_len > 0 {
if decryption_key_len != 32 {
tracing::error!(
"gen_universal_task received {}-byte decryption key; expected 32",
decryption_key_len
);
return failed_handling_result();
}
Some(std::slice::from_raw_parts(
decryption_key,
decryption_key_len,
))
} else {
None
};
match libzkp::checkout_chunk_task(pre_task_str, decryption_key, cli) {
Ok(str) => str,
Err(e) => {
tracing::error!("gen_universal_task failed at pre interpret step, error: {e}");

View File

@@ -2,6 +2,9 @@
"feynman": {
"b68fdc3f28a5ce006280980df70cd3447e56913e5bca6054603ba85f0794c23a6618ea25a7991845bbc5fd571670ee47379ba31ace92d345bca59702a0d4112d": "https://circuit-release.s3.us-west-2.amazonaws.com/scroll-zkvm/releases/0.5.2/chunk/",
"9a3f66370f11e3303f1a1248921025104e83253efea43a70d221cf4e15fc145bf2be2f4468d1ac4a70e7682babb1c60417e21c7633d4b55b58f44703ec82b05a": "https://circuit-release.s3.us-west-2.amazonaws.com/scroll-zkvm/releases/0.5.2/batch/",
"1f8627277e1c1f6e1cc70c03e6fde06929e5ea27ca5b1d56e23b235dfeda282e22c0e5294bcb1b3a9def836f8d0f18612a9860629b9497292976ca11844b7e73": "https://circuit-release.s3.us-west-2.amazonaws.com/scroll-zkvm/releases/0.5.2/bundle/"
"1f8627277e1c1f6e1cc70c03e6fde06929e5ea27ca5b1d56e23b235dfeda282e22c0e5294bcb1b3a9def836f8d0f18612a9860629b9497292976ca11844b7e73": "https://circuit-release.s3.us-west-2.amazonaws.com/scroll-zkvm/releases/0.5.2/bundle/",
"7eb91f1885cc7a63cc848928f043fa56bf747161a74cd933d88c0456b90643346618ea25a7991845bbc5fd571670ee47379ba31ace92d345bca59702a0d4112d": "https://circuit-release.s3.us-west-2.amazonaws.com/scroll-zkvm/releases/0.6.0-rc.1/chunk/",
"dc653e7416628c612fa4d80b4724002bad4fde3653aef7316b80df0c19740a1bf2be2f4468d1ac4a70e7682babb1c60417e21c7633d4b55b58f44703ec82b05a": "https://circuit-release.s3.us-west-2.amazonaws.com/scroll-zkvm/releases/0.6.0-rc.1/batch/",
"14de1c74b663ed3c99acb03e90a5753b5923233c5c590864ad7746570297d16722c0e5294bcb1b3a9def836f8d0f18612a9860629b9497292976ca11844b7e73": "https://circuit-release.s3.us-west-2.amazonaws.com/scroll-zkvm/releases/0.6.0-rc.1/bundle/"
}
}