Compare commits

..

5 Commits

Author SHA1 Message Date
colin
0578aab3ae feat: openvm euclid v2 (#1613)
Signed-off-by: noelwei <fan@scroll.io>
Co-authored-by: jonastheis <4181434+jonastheis@users.noreply.github.com>
Co-authored-by: Ömer Faruk Irmak <omerfirmak@gmail.com>
Co-authored-by: noelwei <fan@scroll.io>
Co-authored-by: Rohit Narurkar <rohit.narurkar@proton.me>
Co-authored-by: Péter Garamvölgyi <peter@scroll.io>
Co-authored-by: Morty <70688412+yiweichi@users.noreply.github.com>
Co-authored-by: omerfirmak <omerfirmak@users.noreply.github.com>
Co-authored-by: jonastheis <jonastheis@users.noreply.github.com>
Co-authored-by: georgehao <georgehao@users.noreply.github.com>
Co-authored-by: kunxian xia <xiakunxian130@gmail.com>
Co-authored-by: Morty <yiweichi1@gmail.com>
Co-authored-by: Velaciela <git.rover@outlook.com>
Co-authored-by: colinlyguo <colinlyguo@users.noreply.github.com>
2025-04-04 16:44:09 +08:00
colin
55dfbf6735 fix(rollup-relayer): rollup status overwrite (#1638) 2025-04-01 14:16:56 +08:00
Péter Garamvölgyi
f3ddf43439 feat: use specific go version for builder images (#1634) 2025-03-17 16:42:23 +01:00
goofylfg
a2582dcc3f fix(prover): fix clap deprecated warnings (#1632) 2025-03-14 16:42:14 +01:00
Jonas Theis
0a1868cec1 fix(relayer): fix rolling hash computation (#1628)
Co-authored-by: jonastheis <jonastheis@users.noreply.github.com>
2025-03-13 13:34:35 +08:00
38 changed files with 2165 additions and 1485 deletions

View File

@@ -9,9 +9,13 @@ on:
type: choice
options:
- "1.20"
- "1.20.14"
- "1.21"
- "1.21.13"
- "1.22"
- "1.22.12"
- "1.23"
- "1.23.7"
default: "1.21"
RUST_VERSION:
description: "Rust toolchain version"

View File

@@ -1,5 +1,5 @@
ARG CUDA_VERSION=11.7.1
ARG GO_VERSION=1.21
ARG GO_VERSION=1.22.12
ARG RUST_VERSION=nightly-2023-12-03
ARG CARGO_CHEF_TAG=0.1.41
@@ -36,7 +36,7 @@ RUN if [ "$(uname -m)" = "x86_64" ]; then \
else \
echo "Unsupported architecture"; exit 1; \
fi
RUN wget https://go.dev/dl/go${GO_VERSION}.1.linux-$(cat /tmp/arch).tar.gz
RUN tar -C /usr/local -xzf go${GO_VERSION}.1.linux-$(cat /tmp/arch).tar.gz
RUN rm go${GO_VERSION}.1.linux-$(cat /tmp/arch).tar.gz && rm /tmp/arch
RUN wget https://go.dev/dl/go${GO_VERSION}.linux-$(cat /tmp/arch).tar.gz
RUN tar -C /usr/local -xzf go${GO_VERSION}.linux-$(cat /tmp/arch).tar.gz
RUN rm go${GO_VERSION}.linux-$(cat /tmp/arch).tar.gz && rm /tmp/arch
ENV PATH="/usr/local/go/bin:${PATH}"

View File

@@ -1,4 +1,4 @@
ARG GO_VERSION=1.21
ARG GO_VERSION=1.22.12
ARG RUST_VERSION=nightly-2023-12-03
ARG CARGO_CHEF_TAG=0.1.41
@@ -32,7 +32,7 @@ RUN if [ "$(uname -m)" = "x86_64" ]; then \
else \
echo "Unsupported architecture"; exit 1; \
fi
RUN wget https://go.dev/dl/go${GO_VERSION}.1.linux-$(cat /tmp/arch).tar.gz
RUN tar -C /usr/local -xzf go${GO_VERSION}.1.linux-$(cat /tmp/arch).tar.gz
RUN rm go${GO_VERSION}.1.linux-$(cat /tmp/arch).tar.gz && rm /tmp/arch
RUN wget https://go.dev/dl/go${GO_VERSION}.linux-$(cat /tmp/arch).tar.gz
RUN tar -C /usr/local -xzf go${GO_VERSION}.linux-$(cat /tmp/arch).tar.gz
RUN rm go${GO_VERSION}.linux-$(cat /tmp/arch).tar.gz && rm /tmp/arch
ENV PATH="/usr/local/go/bin:${PATH}"

File diff suppressed because it is too large Load Diff

View File

@@ -14,8 +14,8 @@ ruint = { git = "https://github.com/scroll-tech/uint.git", branch = "v1.12.3" }
tiny-keccak = { git = "https://github.com/scroll-tech/tiny-keccak", branch = "scroll-patch-v2.0.2-openvm-v1.0.0-rc.1" }
[dependencies]
euclid_prover = { git = "https://github.com/scroll-tech/zkvm-prover.git", tag = "v0.1.0-rc.6", package = "scroll-zkvm-prover" }
euclid_verifier = { git = "https://github.com/scroll-tech/zkvm-prover.git", tag = "v0.1.0-rc.6", package = "scroll-zkvm-verifier" }
euclid_prover = { git = "https://github.com/scroll-tech/zkvm-prover.git", tag = "v0.2.0", package = "scroll-zkvm-prover" }
euclid_verifier = { git = "https://github.com/scroll-tech/zkvm-prover.git", tag = "v0.2.0", package = "scroll-zkvm-verifier" }
base64 = "0.13.0"
env_logger = "0.9.0"

View File

@@ -1,9 +1,11 @@
#![allow(static_mut_refs)]
mod euclid;
mod euclidv2;
use anyhow::{bail, Result};
use euclid::EuclidVerifier;
use euclidv2::EuclidV2Verifier;
use serde::{Deserialize, Serialize};
use std::{cell::OnceCell, path::Path, rc::Rc};
@@ -51,7 +53,17 @@ pub fn init(config: VerifierConfig) {
unsafe {
VERIFIER_LOW
.set(VerifierPair(
config.high_version_circuit.fork_name,
"euclid".to_string(),
Rc::new(Box::new(verifier)),
))
.unwrap_unchecked();
}
let verifier = EuclidV2Verifier::new(&config.high_version_circuit.assets_path);
unsafe {
VERIFIER_HIGH
.set(VerifierPair(
"euclidV2".to_string(),
Rc::new(Box::new(verifier)),
))
.unwrap_unchecked();

View File

@@ -4,13 +4,13 @@ use anyhow::Result;
use crate::utils::panic_catch;
use euclid_prover::{BatchProof, BundleProof, ChunkProof};
use euclid_verifier::verifier::{BatchVerifier, BundleVerifier, ChunkVerifier};
use euclid_verifier::verifier::{BatchVerifier, BundleVerifierEuclidV1, ChunkVerifier};
use std::{fs::File, path::Path};
pub struct EuclidVerifier {
chunk_verifier: ChunkVerifier,
batch_verifier: BatchVerifier,
bundle_verifier: BundleVerifier,
bundle_verifier: BundleVerifierEuclidV1,
}
impl EuclidVerifier {
@@ -24,7 +24,7 @@ impl EuclidVerifier {
.expect("Setting up chunk verifier"),
batch_verifier: BatchVerifier::setup(&config, &exe, &verifier_bin)
.expect("Setting up batch verifier"),
bundle_verifier: BundleVerifier::setup(&config, &exe, &verifier_bin)
bundle_verifier: BundleVerifierEuclidV1::setup(&config, &exe, &verifier_bin)
.expect("Setting up bundle verifier"),
}
}

View File

@@ -0,0 +1,65 @@
use super::{ProofVerifier, TaskType, VKDump};
use anyhow::Result;
use crate::utils::panic_catch;
use euclid_prover::{BatchProof, BundleProof, ChunkProof};
use euclid_verifier::verifier::{BatchVerifier, BundleVerifierEuclidV2, ChunkVerifier};
use std::{fs::File, path::Path};
pub struct EuclidV2Verifier {
chunk_verifier: ChunkVerifier,
batch_verifier: BatchVerifier,
bundle_verifier: BundleVerifierEuclidV2,
}
impl EuclidV2Verifier {
pub fn new(assets_dir: &str) -> Self {
let verifier_bin = Path::new(assets_dir).join("verifier.bin");
let config = Path::new(assets_dir).join("root-verifier-vm-config");
let exe = Path::new(assets_dir).join("root-verifier-committed-exe");
Self {
chunk_verifier: ChunkVerifier::setup(&config, &exe, &verifier_bin)
.expect("Setting up chunk verifier"),
batch_verifier: BatchVerifier::setup(&config, &exe, &verifier_bin)
.expect("Setting up batch verifier"),
bundle_verifier: BundleVerifierEuclidV2::setup(&config, &exe, &verifier_bin)
.expect("Setting up bundle verifier"),
}
}
}
impl ProofVerifier for EuclidV2Verifier {
fn verify(&self, task_type: super::TaskType, proof: Vec<u8>) -> Result<bool> {
panic_catch(|| match task_type {
TaskType::Chunk => {
let proof = serde_json::from_slice::<ChunkProof>(proof.as_slice()).unwrap();
self.chunk_verifier
.verify_proof(proof.proof.as_root_proof().unwrap())
}
TaskType::Batch => {
let proof = serde_json::from_slice::<BatchProof>(proof.as_slice()).unwrap();
self.batch_verifier
.verify_proof(proof.proof.as_root_proof().unwrap())
}
TaskType::Bundle => {
let proof = serde_json::from_slice::<BundleProof>(proof.as_slice()).unwrap();
self.bundle_verifier
.verify_proof_evm(&proof.proof.as_evm_proof().unwrap())
}
})
.map_err(|err_str: String| anyhow::anyhow!(err_str))
}
fn dump_vk(&self, file: &Path) {
let f = File::create(file).expect("Failed to open file to dump VK");
let dump = VKDump {
chunk_vk: base64::encode(self.chunk_verifier.get_app_vk()),
batch_vk: base64::encode(self.batch_verifier.get_app_vk()),
bundle_vk: base64::encode(self.bundle_verifier.get_app_vk()),
};
serde_json::to_writer(f, &dump).expect("Failed to dump VK");
}
}

View File

@@ -4,12 +4,18 @@ import (
"encoding/json"
"errors"
"fmt"
"math/big"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/common/hexutil"
)
const (
euclidFork = "euclid"
EuclidFork = "euclid"
EuclidV2Fork = "euclidV2"
EuclidForkNameForProver = "euclidv1"
EuclidV2ForkNameForProver = "euclidv2"
)
// ProofType represents the type of task.
@@ -39,38 +45,102 @@ const (
ProofTypeBundle
)
// ChunkTaskDetail is a type containing ChunkTask detail.
// ChunkTaskDetail is a type containing ChunkTask detail for chunk task.
type ChunkTaskDetail struct {
BlockHashes []common.Hash `json:"block_hashes"`
// use one of the string of EuclidFork / EuclidV2Fork
ForkName string `json:"fork_name"`
BlockHashes []common.Hash `json:"block_hashes"`
PrevMsgQueueHash common.Hash `json:"prev_msg_queue_hash"`
}
// it is a hex encoded big with fixed length on 48 bytes
type Byte48 struct {
hexutil.Big
}
func (e Byte48) MarshalText() ([]byte, error) {
i := e.ToInt()
// overrite encode big
if sign := i.Sign(); sign < 0 {
// sanity check
return nil, errors.New("Byte48 must be positive integer")
} else {
s := i.Text(16)
if len(s) > 96 {
return nil, errors.New("integer Exceed 384bit")
}
return []byte(fmt.Sprintf("0x%0*s", 96, s)), nil
}
}
func isString(input []byte) bool {
return len(input) >= 2 && input[0] == '"' && input[len(input)-1] == '"'
}
// hexutil.Big has limition of 256bit so we have to override it ...
func (e *Byte48) UnmarshalJSON(input []byte) error {
if !isString(input) {
return errors.New("not hex string")
}
b, err := hexutil.Decode(string(input[1 : len(input)-1]))
if err != nil {
return err
}
if len(b) != 48 {
return fmt.Errorf("not a 48 bytes hex string: %d", len(b))
}
var dec big.Int
dec.SetBytes(b)
*e = Byte48{(hexutil.Big)(dec)}
return nil
}
// BatchTaskDetail is a type containing BatchTask detail.
type BatchTaskDetail struct {
ChunkInfos []*ChunkInfo `json:"chunk_infos"`
ChunkProofs []ChunkProof `json:"chunk_proofs"`
BatchHeader interface{} `json:"batch_header"`
BlobBytes []byte `json:"blob_bytes"`
KzgProof []byte `json:"kzg_proof"`
KzgCommitment []byte `json:"kzg_commitment"`
Challenge common.Hash `json:"challenge"`
// use one of the string of EuclidFork / EuclidV2Fork
ForkName string `json:"fork_name"`
ChunkInfos []*ChunkInfo `json:"chunk_infos"`
ChunkProofs []ChunkProof `json:"chunk_proofs"`
BatchHeader interface{} `json:"batch_header"`
BlobBytes []byte `json:"blob_bytes"`
KzgProof Byte48 `json:"kzg_proof,omitempty"`
KzgCommitment Byte48 `json:"kzg_commitment,omitempty"`
ChallengeDigest common.Hash `json:"challenge_digest,omitempty"`
}
// BundleTaskDetail consists of all the information required to describe the task to generate a proof for a bundle of batches.
type BundleTaskDetail struct {
BatchProofs []BatchProof `json:"batch_proofs"`
// use one of the string of EuclidFork / EuclidV2Fork
ForkName string `json:"fork_name"`
BatchProofs []BatchProof `json:"batch_proofs"`
BundleInfo *OpenVMBundleInfo `json:"bundle_info,omitempty"`
}
// ChunkInfo is for calculating pi_hash for chunk
type ChunkInfo struct {
ChainID uint64 `json:"chain_id"`
PrevStateRoot common.Hash `json:"prev_state_root"`
PostStateRoot common.Hash `json:"post_state_root"`
WithdrawRoot common.Hash `json:"withdraw_root"`
DataHash common.Hash `json:"data_hash"`
IsPadding bool `json:"is_padding"`
TxBytes []byte `json:"tx_bytes"`
TxBytesHash common.Hash `json:"tx_data_digest"`
PrevMsgQueueHash common.Hash `json:"prev_msg_queue_hash"`
ChainID uint64 `json:"chain_id"`
PrevStateRoot common.Hash `json:"prev_state_root"`
PostStateRoot common.Hash `json:"post_state_root"`
WithdrawRoot common.Hash `json:"withdraw_root"`
DataHash common.Hash `json:"data_hash"`
IsPadding bool `json:"is_padding"`
TxBytes []byte `json:"tx_bytes"`
TxBytesHash common.Hash `json:"tx_data_digest"`
PrevMsgQueueHash common.Hash `json:"prev_msg_queue_hash"`
PostMsgQueueHash common.Hash `json:"post_msg_queue_hash"`
TxDataLength uint64 `json:"tx_data_length"`
InitialBlockNumber uint64 `json:"initial_block_number"`
BlockCtxs []BlockContextV2 `json:"block_ctxs"`
}
// BlockContextV2 is the block context for euclid v2
type BlockContextV2 struct {
Timestamp uint64 `json:"timestamp"`
BaseFee hexutil.Big `json:"base_fee"`
GasLimit uint64 `json:"gas_limit"`
NumTxs uint16 `json:"num_txs"`
NumL1Msgs uint16 `json:"num_l1_msgs"`
}
// SubCircuitRowUsage tracing info added in v0.11.0rc8
@@ -87,7 +157,7 @@ type ChunkProof interface {
// NewChunkProof creates a new ChunkProof instance.
func NewChunkProof(hardForkName string) ChunkProof {
switch hardForkName {
case euclidFork:
case EuclidFork, EuclidV2Fork:
return &OpenVMChunkProof{}
default:
return &Halo2ChunkProof{}
@@ -121,7 +191,7 @@ type BatchProof interface {
// NewBatchProof creates a new BatchProof instance.
func NewBatchProof(hardForkName string) BatchProof {
switch hardForkName {
case euclidFork:
case EuclidFork, EuclidV2Fork:
return &OpenVMBatchProof{}
default:
return &Halo2BatchProof{}
@@ -178,7 +248,7 @@ type BundleProof interface {
// NewBundleProof creates a new BundleProof instance.
func NewBundleProof(hardForkName string) BundleProof {
switch hardForkName {
case euclidFork:
case EuclidFork, EuclidV2Fork:
return &OpenVMBundleProof{}
default:
return &Halo2BundleProof{}
@@ -258,12 +328,14 @@ func (p *OpenVMChunkProof) Proof() []byte {
// OpenVMBatchInfo is for calculating pi_hash for batch header
type OpenVMBatchInfo struct {
ParentBatchHash common.Hash `json:"parent_batch_hash"`
ParentStateRoot common.Hash `json:"parent_state_root"`
StateRoot common.Hash `json:"state_root"`
WithdrawRoot common.Hash `json:"withdraw_root"`
BatchHash common.Hash `json:"batch_hash"`
ChainID uint64 `json:"chain_id"`
ParentBatchHash common.Hash `json:"parent_batch_hash"`
ParentStateRoot common.Hash `json:"parent_state_root"`
StateRoot common.Hash `json:"state_root"`
WithdrawRoot common.Hash `json:"withdraw_root"`
BatchHash common.Hash `json:"batch_hash"`
ChainID uint64 `json:"chain_id"`
PrevMsgQueueHash common.Hash `json:"prev_msg_queue_hash"`
PostMsgQueueHash common.Hash `json:"post_msg_queue_hash"`
}
// BatchProof includes the proof info that are required for batch verification and rollup.
@@ -323,6 +395,7 @@ type OpenVMBundleInfo struct {
NumBatches uint32 `json:"num_batches"`
PrevBatchHash common.Hash `json:"prev_batch_hash"`
BatchHash common.Hash `json:"batch_hash"`
MsgQueueHash common.Hash `json:"msg_queue_hash"`
}
// OpenVMBundleProof includes the proof info that are required for verification of a bundle of batch proofs.

View File

@@ -0,0 +1,22 @@
package message
import (
"fmt"
"testing"
)
func TestBytes48(t *testing.T) {
ti := &Byte48{}
ti.UnmarshalText([]byte("0x1"))
if s, err := ti.MarshalText(); err == nil {
if len(s) != 98 {
panic(fmt.Sprintf("wrong str: %s", s))
}
}
ti.UnmarshalText([]byte("0x0"))
if s, err := ti.MarshalText(); err == nil {
if len(s) != 98 {
panic(fmt.Sprintf("wrong str: %s", s))
}
}
}

View File

@@ -5,7 +5,7 @@ import (
"runtime/debug"
)
var tag = "v4.4.97"
var tag = "v4.5.0"
var commit = func() string {
if info, ok := debug.ReadBuildInfo(); ok {

View File

@@ -2,8 +2,6 @@ module scroll-tech/coordinator
go 1.22
toolchain go1.22.2
require (
github.com/appleboy/gin-jwt/v2 v2.9.1
github.com/gin-gonic/gin v1.9.1
@@ -11,7 +9,7 @@ require (
github.com/google/uuid v1.6.0
github.com/mitchellh/mapstructure v1.5.0
github.com/prometheus/client_golang v1.19.0
github.com/scroll-tech/da-codec v0.1.3-0.20250310095435-012aaee6b435
github.com/scroll-tech/da-codec v0.1.3-0.20250401062930-9f9f53898493
github.com/scroll-tech/go-ethereum v1.10.14-0.20250305151038-478940e79601
github.com/shopspring/decimal v1.3.1
github.com/stretchr/testify v1.10.0

View File

@@ -177,8 +177,8 @@ github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU
github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/scroll-tech/da-codec v0.1.3-0.20250310095435-012aaee6b435 h1:X9fkvjrYBY79lGgKEPpUhuiJ4vWpWwzOVw4H8CU8L54=
github.com/scroll-tech/da-codec v0.1.3-0.20250310095435-012aaee6b435/go.mod h1:yhTS9OVC0xQGhg7DN5iV5KZJvnSIlFWAxDdp+6jxQtY=
github.com/scroll-tech/da-codec v0.1.3-0.20250401062930-9f9f53898493 h1:Ioc01J0WEMxuwFvEPGJeBKXdf2KY4Yc3XbFky/IxLlI=
github.com/scroll-tech/da-codec v0.1.3-0.20250401062930-9f9f53898493/go.mod h1:yhTS9OVC0xQGhg7DN5iV5KZJvnSIlFWAxDdp+6jxQtY=
github.com/scroll-tech/go-ethereum v1.10.14-0.20250305151038-478940e79601 h1:NEsjCG6uSvLRBlsP3+x6PL1kM+Ojs3g8UGotIPgJSz8=
github.com/scroll-tech/go-ethereum v1.10.14-0.20250305151038-478940e79601/go.mod h1:OblWe1+QrZwdpwO0j/LY3BSGuKT3YPUFBDQQgvvfStQ=
github.com/scroll-tech/zktrie v0.8.4 h1:UagmnZ4Z3ITCk+aUq9NQZJNAwnWl4gSxsLb2Nl7IgRE=

View File

@@ -9,6 +9,7 @@ import (
"github.com/scroll-tech/go-ethereum/log"
"gorm.io/gorm"
"scroll-tech/common/types/message"
"scroll-tech/common/version"
"scroll-tech/coordinator/internal/config"
@@ -42,9 +43,10 @@ func NewLoginLogic(db *gorm.DB, cfg *config.Config, vf *verifier.Verifier) *Logi
var highHardForks []string
highHardForks = append(highHardForks, cfg.ProverManager.Verifier.HighVersionCircuit.ForkName)
if cfg.ProverManager.Verifier.HighVersionCircuit.ForkName != "euclid" {
if cfg.ProverManager.Verifier.HighVersionCircuit.ForkName != message.EuclidFork && cfg.ProverManager.Verifier.HighVersionCircuit.ForkName != message.EuclidV2Fork {
highHardForks = append(highHardForks, cfg.ProverManager.Verifier.LowVersionCircuit.ForkName)
}
highHardForks = append(highHardForks, message.EuclidFork, message.EuclidV2Fork)
proverVersionHardForkMap[cfg.ProverManager.Verifier.HighVersionCircuit.MinProverVersion] = highHardForks
proverVersionHardForkMap[cfg.ProverManager.Verifier.LowVersionCircuit.MinProverVersion] = []string{cfg.ProverManager.Verifier.LowVersionCircuit.ForkName}

View File

@@ -4,6 +4,7 @@ import (
"context"
"encoding/json"
"fmt"
"math/big"
"time"
"github.com/gin-gonic/gin"
@@ -11,6 +12,7 @@ import (
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/scroll-tech/da-codec/encoding"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/common/hexutil"
"github.com/scroll-tech/go-ethereum/log"
"github.com/scroll-tech/go-ethereum/params"
"gorm.io/gorm"
@@ -121,7 +123,7 @@ func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
for i := 0; i < len(proverTasks); i++ {
if proverTasks[i].ProverPublicKey == taskCtx.PublicKey ||
taskCtx.ProverProviderType == uint8(coordinatorType.ProverProviderTypeExternal) && cutils.IsExternalProverNameMatch(proverTasks[i].ProverName, taskCtx.ProverName) {
log.Debug("get empty batch, the prover already failed this task", "height", getTaskParameter.ProverHeight)
log.Debug("get empty batch, the prover already failed this task", "height", getTaskParameter.ProverHeight, "task ID", tmpBatchTask.Hash, "prover name", taskCtx.ProverName, "prover public key", taskCtx.PublicKey)
return nil, nil
}
}
@@ -211,17 +213,23 @@ func (bp *BatchProverTask) formatProverTask(ctx context.Context, task *orm.Prove
WithdrawRoot: common.HexToHash(chunk.WithdrawRoot),
DataHash: common.HexToHash(chunk.Hash),
PrevMsgQueueHash: common.HexToHash(chunk.PrevL1MessageQueueHash),
PostMsgQueueHash: common.HexToHash(chunk.PostL1MessageQueueHash),
IsPadding: false,
}
if haloProot, ok := proof.(*message.Halo2ChunkProof); ok {
if haloProot.ChunkInfo != nil {
chunkInfo.TxBytes = haloProot.ChunkInfo.TxBytes
if halo2Proof, ok := proof.(*message.Halo2ChunkProof); ok {
if halo2Proof.ChunkInfo != nil {
chunkInfo.TxBytes = halo2Proof.ChunkInfo.TxBytes
}
}
if openvmProof, ok := proof.(*message.OpenVMChunkProof); ok {
chunkInfo.InitialBlockNumber = openvmProof.MetaData.ChunkInfo.InitialBlockNumber
chunkInfo.BlockCtxs = openvmProof.MetaData.ChunkInfo.BlockCtxs
chunkInfo.TxDataLength = openvmProof.MetaData.ChunkInfo.TxDataLength
}
chunkInfos = append(chunkInfos, &chunkInfo)
}
taskDetail, err := bp.getBatchTaskDetail(batch, chunkInfos, chunkProofs)
taskDetail, err := bp.getBatchTaskDetail(batch, chunkInfos, chunkProofs, hardForkName)
if err != nil {
return nil, fmt.Errorf("failed to get batch task detail, taskID:%s err:%w", task.TaskID, err)
}
@@ -238,6 +246,9 @@ func (bp *BatchProverTask) formatProverTask(ctx context.Context, task *orm.Prove
TaskData: string(chunkProofsBytes),
HardForkName: hardForkName,
}
log.Debug("TaskData", "task_id", task.TaskID, "task_type", message.ProofTypeBatch.String(), "hard_fork_name", hardForkName, "task_data", taskMsg.TaskData)
return taskMsg, nil
}
@@ -247,12 +258,18 @@ func (bp *BatchProverTask) recoverActiveAttempts(ctx *gin.Context, batchTask *or
}
}
func (bp *BatchProverTask) getBatchTaskDetail(dbBatch *orm.Batch, chunkInfos []*message.ChunkInfo, chunkProofs []message.ChunkProof) (*message.BatchTaskDetail, error) {
func (bp *BatchProverTask) getBatchTaskDetail(dbBatch *orm.Batch, chunkInfos []*message.ChunkInfo, chunkProofs []message.ChunkProof, hardForkName string) (*message.BatchTaskDetail, error) {
taskDetail := &message.BatchTaskDetail{
ChunkInfos: chunkInfos,
ChunkProofs: chunkProofs,
}
if hardForkName == message.EuclidV2Fork {
taskDetail.ForkName = message.EuclidV2ForkNameForProver
} else if hardForkName == message.EuclidFork {
taskDetail.ForkName = message.EuclidForkNameForProver
}
dbBatchCodecVersion := encoding.CodecVersion(dbBatch.CodecVersion)
switch dbBatchCodecVersion {
case encoding.CodecV3, encoding.CodecV4, encoding.CodecV6, encoding.CodecV7:
@@ -271,17 +288,13 @@ func (bp *BatchProverTask) getBatchTaskDetail(dbBatch *orm.Batch, chunkInfos []*
}
taskDetail.BatchHeader = batchHeader
taskDetail.BlobBytes = dbBatch.BlobBytes
if len(dbBatch.BlobDataProof) < 160 {
return nil, fmt.Errorf("blob data proof length is less than 160 bytes = %d, taskID: %s: %s", len(dbBatch.BlobDataProof), dbBatch.Hash, common.Bytes2Hex(dbBatch.BlobDataProof))
}
taskDetail.ChallengeDigest = common.HexToHash(dbBatch.ChallengeDigest)
// Memory layout of `BlobDataProof`: used in Codec.BlobDataProofForPointEvaluation()
// | z | y | kzg_commitment | kzg_proof |
// |---------|---------|----------------|-----------|
// | bytes32 | bytes32 | bytes48 | bytes48 |
taskDetail.KzgProof = dbBatch.BlobDataProof[112:160]
taskDetail.KzgCommitment = dbBatch.BlobDataProof[64:112]
taskDetail.Challenge = common.Hash(dbBatch.BlobDataProof[0:32])
taskDetail.KzgProof = message.Byte48{Big: hexutil.Big(*new(big.Int).SetBytes(dbBatch.BlobDataProof[112:160]))}
taskDetail.KzgCommitment = message.Byte48{Big: hexutil.Big(*new(big.Int).SetBytes(dbBatch.BlobDataProof[64:112]))}
return taskDetail, nil
}

View File

@@ -9,6 +9,7 @@ import (
"github.com/gin-gonic/gin"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/log"
"github.com/scroll-tech/go-ethereum/params"
"gorm.io/gorm"
@@ -120,7 +121,7 @@ func (bp *BundleProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinat
for i := 0; i < len(proverTasks); i++ {
if proverTasks[i].ProverPublicKey == taskCtx.PublicKey ||
taskCtx.ProverProviderType == uint8(coordinatorType.ProverProviderTypeExternal) && cutils.IsExternalProverNameMatch(proverTasks[i].ProverName, taskCtx.ProverName) {
log.Debug("get empty bundle, the prover already failed this task", "height", getTaskParameter.ProverHeight)
log.Debug("get empty bundle, the prover already failed this task", "height", getTaskParameter.ProverHeight, "task ID", tmpBundleTask.Hash, "prover name", taskCtx.ProverName, "prover public key", taskCtx.PublicKey)
return nil, nil
}
}
@@ -194,6 +195,11 @@ func (bp *BundleProverTask) formatProverTask(ctx context.Context, task *orm.Prov
return nil, fmt.Errorf("failed to get batch proofs for bundle task id:%s, no batch found", task.TaskID)
}
parentBatch, err := bp.batchOrm.GetBatchByHash(ctx, batches[0].ParentBatchHash)
if err != nil {
return nil, fmt.Errorf("failed to get parent batch for batch task id:%s err:%w", task.TaskID, err)
}
var batchProofs []message.BatchProof
for _, batch := range batches {
proof := message.NewBatchProof(hardForkName)
@@ -207,6 +213,26 @@ func (bp *BundleProverTask) formatProverTask(ctx context.Context, task *orm.Prov
BatchProofs: batchProofs,
}
if hardForkName == message.EuclidV2Fork {
taskDetail.ForkName = message.EuclidV2ForkNameForProver
} else if hardForkName == message.EuclidFork {
taskDetail.ForkName = message.EuclidForkNameForProver
}
taskDetail.BundleInfo = &message.OpenVMBundleInfo{
ChainID: bp.cfg.L2.ChainID,
PrevStateRoot: common.HexToHash(parentBatch.StateRoot),
PostStateRoot: common.HexToHash(batches[len(batches)-1].StateRoot),
WithdrawRoot: common.HexToHash(batches[len(batches)-1].WithdrawRoot),
NumBatches: uint32(len(batches)),
PrevBatchHash: common.HexToHash(batches[0].ParentBatchHash),
BatchHash: common.HexToHash(batches[len(batches)-1].Hash),
}
if hardForkName == message.EuclidV2Fork {
taskDetail.BundleInfo.MsgQueueHash = common.HexToHash(batches[len(batches)-1].PostL1MessageQueueHash)
}
batchProofsBytes, err := json.Marshal(taskDetail)
if err != nil {
return nil, fmt.Errorf("failed to marshal batch proofs, taskID:%s err:%w", task.TaskID, err)
@@ -219,6 +245,9 @@ func (bp *BundleProverTask) formatProverTask(ctx context.Context, task *orm.Prov
TaskData: string(batchProofsBytes),
HardForkName: hardForkName,
}
log.Debug("TaskData", "task_id", task.TaskID, "task_type", message.ProofTypeBundle.String(), "hard_fork_name", hardForkName, "task_data", taskMsg.TaskData)
return taskMsg, nil
}

View File

@@ -9,6 +9,7 @@ import (
"github.com/gin-gonic/gin"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/log"
"github.com/scroll-tech/go-ethereum/params"
"gorm.io/gorm"
@@ -118,7 +119,7 @@ func (cp *ChunkProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
for i := 0; i < len(proverTasks); i++ {
if proverTasks[i].ProverPublicKey == taskCtx.PublicKey ||
taskCtx.ProverProviderType == uint8(coordinatorType.ProverProviderTypeExternal) && cutils.IsExternalProverNameMatch(proverTasks[i].ProverName, taskCtx.ProverName) {
log.Debug("get empty chunk, the prover already failed this task", "height", getTaskParameter.ProverHeight)
log.Debug("get empty chunk, the prover already failed this task", "height", getTaskParameter.ProverHeight, "task ID", tmpChunkTask.Hash, "prover name", taskCtx.ProverName, "prover public key", taskCtx.PublicKey)
return nil, nil
}
}
@@ -162,7 +163,7 @@ func (cp *ChunkProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
return nil, ErrCoordinatorInternalFailure
}
taskMsg, err := cp.formatProverTask(ctx.Copy(), &proverTask, hardForkName)
taskMsg, err := cp.formatProverTask(ctx.Copy(), &proverTask, chunkTask, hardForkName)
if err != nil {
cp.recoverActiveAttempts(ctx, chunkTask)
log.Error("format prover task failure", "task_id", chunkTask.Hash, "err", err)
@@ -179,17 +180,27 @@ func (cp *ChunkProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
return taskMsg, nil
}
func (cp *ChunkProverTask) formatProverTask(ctx context.Context, task *orm.ProverTask, hardForkName string) (*coordinatorType.GetTaskSchema, error) {
func (cp *ChunkProverTask) formatProverTask(ctx context.Context, task *orm.ProverTask, chunk *orm.Chunk, hardForkName string) (*coordinatorType.GetTaskSchema, error) {
// Get block hashes.
blockHashes, dbErr := cp.blockOrm.GetL2BlockHashesByChunkHash(ctx, task.TaskID)
if dbErr != nil || len(blockHashes) == 0 {
return nil, fmt.Errorf("failed to fetch block hashes of a chunk, chunk hash:%s err:%w", task.TaskID, dbErr)
}
var taskDetailBytes []byte
taskDetail := message.ChunkTaskDetail{
BlockHashes: blockHashes,
BlockHashes: blockHashes,
PrevMsgQueueHash: common.HexToHash(chunk.PrevL1MessageQueueHash),
}
blockHashesBytes, err := json.Marshal(taskDetail)
if hardForkName == message.EuclidV2Fork {
taskDetail.ForkName = message.EuclidV2ForkNameForProver
} else if hardForkName == message.EuclidFork {
taskDetail.ForkName = message.EuclidForkNameForProver
}
var err error
taskDetailBytes, err = json.Marshal(taskDetail)
if err != nil {
return nil, fmt.Errorf("failed to marshal block hashes hash:%s, err:%w", task.TaskID, err)
}
@@ -198,10 +209,12 @@ func (cp *ChunkProverTask) formatProverTask(ctx context.Context, task *orm.Prove
UUID: task.UUID.String(),
TaskID: task.TaskID,
TaskType: int(message.ProofTypeChunk),
TaskData: string(blockHashesBytes),
TaskData: string(taskDetailBytes),
HardForkName: hardForkName,
}
log.Debug("TaskData", "task_id", task.TaskID, "task_type", message.ProofTypeChunk.String(), "hard_fork_name", hardForkName, "task_data", proverTaskSchema.TaskData)
return proverTaskSchema, nil
}

View File

@@ -122,7 +122,7 @@ func (b *BaseProverTask) hardForkSanityCheck(ctx *gin.Context, taskCtx *proverTa
}
if _, ok := taskCtx.HardForkNames[hardForkName]; !ok {
return "", errors.New("to be assigned prover task's hard-fork name is not the same as prover")
return "", fmt.Errorf("to be assigned prover task's hard-fork name is not the same as prover, proverName: %s, proverVersion: %s, proverSupportHardForkNames: %s, taskHardForkName: %v", taskCtx.ProverName, taskCtx.ProverVersion, taskCtx.HardForkNames, hardForkName)
}
return hardForkName, nil
}

View File

@@ -103,11 +103,16 @@ func NewVerifier(cfg *config.VerifierConfig) (*Verifier, error) {
return nil, err
}
if err := v.loadOpenVMVks(cfg.HighVersionCircuit.ForkName); err != nil {
if err := v.loadOpenVMVks(message.EuclidFork); err != nil {
return nil, err
}
v.loadCurieVersionVKs()
if err := v.loadOpenVMVks(message.EuclidV2Fork); err != nil {
return nil, err
}
v.loadDarwinVKs()
return v, nil
}
@@ -224,8 +229,9 @@ func (v *Verifier) loadLowVersionVKs(cfg *config.VerifierConfig) error {
return nil
}
func (v *Verifier) loadCurieVersionVKs() {
v.BatchVKMap["AAAAGgAAAARX2S0K1wF333B1waOsnG/vcASJmWG9YM6SNWCBy1ywD9jfGkei+f0wNYpkjW7JO12EfU7CjYVBo+PGku3zaQJI64lbn6BwyTBa4RfrPFpV5mP47ix0sXZ+Wt5wklMLRW7OIJb1yfCDm+gkSsp3/Zqrxt4SY4rQ4WtHfynTCQ0KDi78jNuiFvwxO3ub3DkgGVaxMkGxTRP/Vz6E7MCZMUBR5wZFcMzJn+73f0wYjDxfj00krg9O1VrwVxbVV1ycLR6oQLcOgm/l+xwth8io0vDpF9OY21gD5DgJn9GgcYe8KoRVEbEqApLZPdBibpcSMTY9czZI2LnFcqrDDmYvhEwgjhZrsTog2xLXOODoOupZ/is5ekQ9Gi0y871b1mLlCGA="] = struct{}{}
func (v *Verifier) loadDarwinVKs() {
v.BundleVkMap["AAAAGgAAAARX2S0K1wF333B1waOsnG/vcASJmWG9YM6SNWCBy1ywD5dsp1rEy7PSqiIFikkkOPqKokLW2mZSwCbtKdkfLQcvTxARUwHSe4iZe27PRJ5WWaLqtRV1+x6+pSVKtcPtaV4kE7v2YJRf0582hxiAF0IBaOoREdpyNfA2a9cvhWb2TMaPrUYP9EDQ7CUiW1FQzxbjGc95ua2htscnpU7d9S5stHWzKb7okkCG7bTIL9aG6qTQo2YXW7n3H3Ir47oVJB7IKrUzKGvI5Wmanh2zpZOJ9Qm4/wY24cT7cJz+Ux6wAg=="] = struct{}{}
v.BatchVKMap["AAAAGgAAAARX2S0K1wF333B1waOsnG/vcASJmWG9YM6SNWCBy1ywD1DEjW4Kell67H07wazT5DdzrSh4+amh+cmosQHp9p9snFypyoBGt3UHtoJGQBZlywZWDS9ht5pnaEoGBdaKcQk+lFb+WxTiId0KOAa0mafTZTQw8yToy57Jple64qzlRu1dux30tZZGuerLN1CKzg5Xl2iOpMK+l87jCINwVp5cUtF/XrvhBbU7onKh3KBiy99iUqVyA3Y6iiIZhGKWBSuSA4bNgDYIoVkqjHpdL35aEShoRO6pNXt7rDzxFoPzH0JuPI54nE4OhVrzZXwtkAEosxVa/fszcE092FH+HhhtxZBYe/KEzwdISU9TOPdId3UF/UMYC0MiYOlqffVTgAg="] = struct{}{}
v.ChunkVKMap["AAAAGQAAAATyWEABRbJ6hQQ5/zLX1gTasr7349minA9rSgMS6gDeHwZKqikRiO3md+pXjjxMHnKQtmXYgMXhJSvlmZ+Ws+cheuly2X1RuNQzcZuRImaKPR9LJsVZYsXfJbuqdKX8p0Gj8G83wMJOmTzNVUyUol0w0lTU+CEiTpHOnxBsTF3EWaW3s1u4ycOgWt1c9M6s7WmaBZLYgAWYCunO5CLCLApNGbCASeck/LuSoedEri5u6HccCKU2khG6zl6W07jvYSbDVLJktbjRiHv+/HQix+K14j8boo8Z/unhpwXCsPxkQA=="] = struct{}{}
}

View File

@@ -19,20 +19,23 @@ type Batch struct {
db *gorm.DB `gorm:"column:-"`
// batch
Index uint64 `json:"index" gorm:"column:index"`
Hash string `json:"hash" gorm:"column:hash"`
DataHash string `json:"data_hash" gorm:"column:data_hash"`
StartChunkIndex uint64 `json:"start_chunk_index" gorm:"column:start_chunk_index"`
StartChunkHash string `json:"start_chunk_hash" gorm:"column:start_chunk_hash"`
EndChunkIndex uint64 `json:"end_chunk_index" gorm:"column:end_chunk_index"`
EndChunkHash string `json:"end_chunk_hash" gorm:"column:end_chunk_hash"`
StateRoot string `json:"state_root" gorm:"column:state_root"`
WithdrawRoot string `json:"withdraw_root" gorm:"column:withdraw_root"`
ParentBatchHash string `json:"parent_batch_hash" gorm:"column:parent_batch_hash"`
BatchHeader []byte `json:"batch_header" gorm:"column:batch_header"`
CodecVersion int16 `json:"codec_version" gorm:"column:codec_version"`
EnableCompress bool `json:"enable_compress" gorm:"column:enable_compress"`
BlobBytes []byte `json:"blob_bytes" gorm:"column:blob_bytes"`
Index uint64 `json:"index" gorm:"column:index"`
Hash string `json:"hash" gorm:"column:hash"`
DataHash string `json:"data_hash" gorm:"column:data_hash"`
StartChunkIndex uint64 `json:"start_chunk_index" gorm:"column:start_chunk_index"`
StartChunkHash string `json:"start_chunk_hash" gorm:"column:start_chunk_hash"`
EndChunkIndex uint64 `json:"end_chunk_index" gorm:"column:end_chunk_index"`
EndChunkHash string `json:"end_chunk_hash" gorm:"column:end_chunk_hash"`
StateRoot string `json:"state_root" gorm:"column:state_root"`
WithdrawRoot string `json:"withdraw_root" gorm:"column:withdraw_root"`
ParentBatchHash string `json:"parent_batch_hash" gorm:"column:parent_batch_hash"`
BatchHeader []byte `json:"batch_header" gorm:"column:batch_header"`
CodecVersion int16 `json:"codec_version" gorm:"column:codec_version"`
PrevL1MessageQueueHash string `json:"prev_l1_message_queue_hash" gorm:"column:prev_l1_message_queue_hash"`
PostL1MessageQueueHash string `json:"post_l1_message_queue_hash" gorm:"column:post_l1_message_queue_hash"`
EnableCompress bool `json:"enable_compress" gorm:"column:enable_compress"`
BlobBytes []byte `json:"blob_bytes" gorm:"column:blob_bytes"`
ChallengeDigest string `json:"challenge_digest" gorm:"column:challenge_digest"`
// proof
ChunkProofsStatus int16 `json:"chunk_proofs_status" gorm:"column:chunk_proofs_status;default:1"`

View File

@@ -59,20 +59,20 @@ func testResetDB(t *testing.T) {
cur, err := Current(pgDB)
assert.NoError(t, err)
// total number of tables.
assert.Equal(t, int64(25), cur)
assert.Equal(t, int64(26), cur)
}
func testMigrate(t *testing.T) {
assert.NoError(t, Migrate(pgDB))
cur, err := Current(pgDB)
assert.NoError(t, err)
assert.Equal(t, int64(25), cur)
assert.Equal(t, int64(26), cur)
}
func testRollback(t *testing.T) {
version, err := Current(pgDB)
assert.NoError(t, err)
assert.Equal(t, int64(25), version)
assert.Equal(t, int64(26), version)
assert.NoError(t, Rollback(pgDB, nil))

View File

@@ -0,0 +1,15 @@
-- +goose Up
-- +goose StatementBegin
ALTER TABLE batch
ADD COLUMN challenge_digest VARCHAR DEFAULT '';
-- +goose StatementEnd
-- +goose Down
-- +goose StatementBegin
ALTER TABLE IF EXISTS batch
DROP COLUMN IF EXISTS challenge_digest;
-- +goose StatementEnd

View File

@@ -1357,13 +1357,13 @@ github.com/scroll-tech/da-codec v0.1.1-0.20241014152913-2703f226fb0b h1:5H6V6yba
github.com/scroll-tech/da-codec v0.1.1-0.20241014152913-2703f226fb0b/go.mod h1:48uxaqVgpD8ulH8p+nrBtfeLHZ9tX82bVVdPNkW3rPE=
github.com/scroll-tech/da-codec v0.1.3-0.20250227072756-a1482833595f h1:YYbhuUwjowqI4oyXtECRofck7Fyj18e1tcRjuQlZpJE=
github.com/scroll-tech/da-codec v0.1.3-0.20250227072756-a1482833595f/go.mod h1:xECEHZLVzbdUn+tNbRJhRIjLGTOTmnFQuTgUTeVLX58=
github.com/scroll-tech/da-codec v0.1.3-0.20250401062930-9f9f53898493 h1:Ioc01J0WEMxuwFvEPGJeBKXdf2KY4Yc3XbFky/IxLlI=
github.com/scroll-tech/da-codec v0.1.3-0.20250401062930-9f9f53898493/go.mod h1:yhTS9OVC0xQGhg7DN5iV5KZJvnSIlFWAxDdp+6jxQtY=
github.com/scroll-tech/go-ethereum v1.10.14-0.20240607130425-e2becce6a1a4/go.mod h1:byf/mZ8jLYUCnUePTicjJWn+RvKdxDn7buS6glTnMwQ=
github.com/scroll-tech/go-ethereum v1.10.14-0.20240821074444-b3fa00861e5e/go.mod h1:swB5NSp8pKNDuYsTxfR08bHS6L56i119PBx8fxvV8Cs=
github.com/scroll-tech/go-ethereum v1.10.14-0.20241010064814-3d88e870ae22/go.mod h1:r9FwtxCtybMkTbWYCyBuevT9TW3zHmOTHqD082Uh+Oo=
github.com/scroll-tech/go-ethereum v1.10.14-0.20250206083728-ea43834c198f/go.mod h1:Ik3OBLl7cJxPC+CFyCBYNXBPek4wpdzkWehn/y5qLM8=
github.com/scroll-tech/go-ethereum v1.10.14-0.20250225152658-bcfdb48dd939/go.mod h1:AgU8JJxC7+nfs7R7ma35AU7dMAGW7wCw3dRZRefIKyQ=
github.com/scroll-tech/go-ethereum v1.10.14-0.20250305084331-57148478e950 h1:qfOaRflvH1vtnFWloB7BveKlP/VqYgMqLJ6e9TlBJ/8=
github.com/scroll-tech/go-ethereum v1.10.14-0.20250305084331-57148478e950/go.mod h1:OblWe1+QrZwdpwO0j/LY3BSGuKT3YPUFBDQQgvvfStQ=
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I=
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
github.com/segmentio/kafka-go v0.1.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo=

View File

@@ -17,7 +17,7 @@ use tokio::runtime;
use utils::get_prover_type;
#[derive(Parser, Debug)]
#[clap(disable_version_flag = true)]
#[command(disable_version_flag = true)]
struct Args {
/// Path of config file
#[arg(long = "config", default_value = "conf/config.json")]

View File

@@ -2,8 +2,6 @@ module scroll-tech/rollup
go 1.22
toolchain go1.22.2
require (
github.com/agiledragon/gomonkey/v2 v2.12.0
github.com/consensys/gnark-crypto v0.16.0
@@ -13,7 +11,7 @@ require (
github.com/holiman/uint256 v1.3.2
github.com/mitchellh/mapstructure v1.5.0
github.com/prometheus/client_golang v1.16.0
github.com/scroll-tech/da-codec v0.1.3-0.20250310095435-012aaee6b435
github.com/scroll-tech/da-codec v0.1.3-0.20250401062930-9f9f53898493
github.com/scroll-tech/go-ethereum v1.10.14-0.20250305151038-478940e79601
github.com/smartystreets/goconvey v1.8.0
github.com/spf13/viper v1.19.0

View File

@@ -249,8 +249,8 @@ github.com/sagikazarmark/locafero v0.4.0 h1:HApY1R9zGo4DBgr7dqsTH/JJxLTTsOt7u6ke
github.com/sagikazarmark/locafero v0.4.0/go.mod h1:Pe1W6UlPYUk/+wc/6KFhbORCfqzgYEpgQ3O5fPuL3H4=
github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE=
github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ=
github.com/scroll-tech/da-codec v0.1.3-0.20250310095435-012aaee6b435 h1:X9fkvjrYBY79lGgKEPpUhuiJ4vWpWwzOVw4H8CU8L54=
github.com/scroll-tech/da-codec v0.1.3-0.20250310095435-012aaee6b435/go.mod h1:yhTS9OVC0xQGhg7DN5iV5KZJvnSIlFWAxDdp+6jxQtY=
github.com/scroll-tech/da-codec v0.1.3-0.20250401062930-9f9f53898493 h1:Ioc01J0WEMxuwFvEPGJeBKXdf2KY4Yc3XbFky/IxLlI=
github.com/scroll-tech/da-codec v0.1.3-0.20250401062930-9f9f53898493/go.mod h1:yhTS9OVC0xQGhg7DN5iV5KZJvnSIlFWAxDdp+6jxQtY=
github.com/scroll-tech/go-ethereum v1.10.14-0.20250305151038-478940e79601 h1:NEsjCG6uSvLRBlsP3+x6PL1kM+Ojs3g8UGotIPgJSz8=
github.com/scroll-tech/go-ethereum v1.10.14-0.20250305151038-478940e79601/go.mod h1:OblWe1+QrZwdpwO0j/LY3BSGuKT3YPUFBDQQgvvfStQ=
github.com/scroll-tech/zktrie v0.8.4 h1:UagmnZ4Z3ITCk+aUq9NQZJNAwnWl4gSxsLb2Nl7IgRE=

View File

@@ -8,8 +8,6 @@ import (
"github.com/prometheus/client_golang/prometheus"
"github.com/scroll-tech/da-codec/encoding"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/common/hexutil"
gethTypes "github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/event"
"github.com/scroll-tech/go-ethereum/log"
@@ -86,43 +84,6 @@ func (w *L2WatcherClient) TryFetchRunningMissingBlocks(blockHeight uint64) {
}
}
func txsToTxsData(txs gethTypes.Transactions) []*gethTypes.TransactionData {
txsData := make([]*gethTypes.TransactionData, len(txs))
for i, tx := range txs {
v, r, s := tx.RawSignatureValues()
nonce := tx.Nonce()
// We need QueueIndex in `NewBatchHeader`. However, `TransactionData`
// does not have this field. Since `L1MessageTx` do not have a nonce,
// we reuse this field for storing the queue index.
if msg := tx.AsL1MessageTx(); msg != nil {
nonce = msg.QueueIndex
}
txsData[i] = &gethTypes.TransactionData{
Type: tx.Type(),
TxHash: tx.Hash().String(),
Nonce: nonce,
ChainId: (*hexutil.Big)(tx.ChainId()),
Gas: tx.Gas(),
GasPrice: (*hexutil.Big)(tx.GasPrice()),
GasTipCap: (*hexutil.Big)(tx.GasTipCap()),
GasFeeCap: (*hexutil.Big)(tx.GasFeeCap()),
To: tx.To(),
Value: (*hexutil.Big)(tx.Value()),
Data: hexutil.Encode(tx.Data()),
IsCreate: tx.To() == nil,
AccessList: tx.AccessList(),
AuthorizationList: tx.SetCodeAuthorizations(),
V: (*hexutil.Big)(v),
R: (*hexutil.Big)(r),
S: (*hexutil.Big)(s),
}
}
return txsData
}
func (w *L2WatcherClient) getAndStoreBlocks(ctx context.Context, from, to uint64) error {
var blocks []*encoding.Block
for number := from; number <= to; number++ {
@@ -150,7 +111,7 @@ func (w *L2WatcherClient) getAndStoreBlocks(ctx context.Context, from, to uint64
}
blocks = append(blocks, &encoding.Block{
Header: block.Header(),
Transactions: txsToTxsData(block.Transactions()),
Transactions: encoding.TxsToTxsData(block.Transactions()),
WithdrawRoot: common.BytesToHash(withdrawRoot),
RowConsumption: block.RowConsumption,
})

View File

@@ -39,6 +39,7 @@ type Batch struct {
PostL1MessageQueueHash string `json:"post_l1_message_queue_hash" gorm:"column:post_l1_message_queue_hash"`
EnableCompress bool `json:"enable_compress" gorm:"column:enable_compress"` // use for debug
BlobBytes []byte `json:"blob_bytes" gorm:"column:blob_bytes"`
ChallengeDigest string `json:"challenge_digest" gorm:"column:challenge_digest"`
// proof
ChunkProofsStatus int16 `json:"chunk_proofs_status" gorm:"column:chunk_proofs_status;default:1"`
@@ -305,6 +306,7 @@ func (o *Batch) InsertBatch(ctx context.Context, batch *encoding.Batch, codecVer
PostL1MessageQueueHash: batch.PostL1MessageQueueHash.Hex(),
EnableCompress: enableCompress,
BlobBytes: batchMeta.BlobBytes,
ChallengeDigest: batchMeta.ChallengeDigest.Hex(),
ChunkProofsStatus: int16(types.ChunkProofsStatusPending),
ProvingStatus: int16(types.ProvingTaskUnassigned),
RollupStatus: int16(types.RollupPending),
@@ -413,6 +415,16 @@ func (o *Batch) UpdateCommitTxHashAndRollupStatus(ctx context.Context, hash stri
db = dbTX[0]
}
db = db.WithContext(ctx)
var currentBatch Batch
if err := db.Where("hash", hash).First(&currentBatch).Error; err != nil {
return fmt.Errorf("Batch.UpdateCommitTxHashAndRollupStatus error when querying current status: %w, batch hash: %v", err, hash)
}
if types.RollupStatus(currentBatch.RollupStatus) == types.RollupFinalizing || types.RollupStatus(currentBatch.RollupStatus) == types.RollupFinalized {
return nil
}
db = db.Model(&Batch{})
db = db.Where("hash", hash)

View File

@@ -318,8 +318,8 @@ func TestBatchOrm(t *testing.T) {
updatedBatch, err = batchOrm.GetLatestBatch(context.Background())
assert.NoError(t, err)
assert.NotNil(t, updatedBatch)
assert.Equal(t, "commitTxHash", updatedBatch.CommitTxHash)
assert.Equal(t, types.RollupCommitted, types.RollupStatus(updatedBatch.RollupStatus))
assert.Equal(t, "", updatedBatch.CommitTxHash)
assert.Equal(t, types.RollupFinalized, types.RollupStatus(updatedBatch.RollupStatus))
err = batchOrm.UpdateFinalizeTxHashAndRollupStatus(context.Background(), batchHash2, "finalizeTxHash", types.RollupFinalizeFailed)
assert.NoError(t, err)
@@ -332,9 +332,8 @@ func TestBatchOrm(t *testing.T) {
batches, err := batchOrm.GetCommittedBatchesGEIndexGECodecVersion(context.Background(), 0, codecVersion, 0)
assert.NoError(t, err)
assert.Equal(t, 2, len(batches))
assert.Equal(t, 1, len(batches))
assert.Equal(t, batchHash1, batches[0].Hash)
assert.Equal(t, batchHash2, batches[1].Hash)
batches, err = batchOrm.GetCommittedBatchesGEIndexGECodecVersion(context.Background(), 0, codecVersion, 1)
assert.NoError(t, err)
@@ -343,8 +342,7 @@ func TestBatchOrm(t *testing.T) {
batches, err = batchOrm.GetCommittedBatchesGEIndexGECodecVersion(context.Background(), 1, codecVersion, 0)
assert.NoError(t, err)
assert.Equal(t, 1, len(batches))
assert.Equal(t, batchHash2, batches[0].Hash)
assert.Equal(t, 0, len(batches))
batches, err = batchOrm.GetCommittedBatchesGEIndexGECodecVersion(context.Background(), 0, codecVersion+1, 0)
assert.NoError(t, err)
@@ -361,13 +359,10 @@ func TestBatchOrm(t *testing.T) {
batches, err = batchOrm.GetCommittedBatchesGEIndexGECodecVersion(context.Background(), 0, codecVersion, 0)
assert.NoError(t, err)
assert.Equal(t, 2, len(batches))
assert.Equal(t, 1, len(batches))
assert.Equal(t, batchHash1, batches[0].Hash)
assert.Equal(t, batchHash2, batches[1].Hash)
assert.Equal(t, types.ProvingTaskFailed, types.ProvingStatus(batches[0].ProvingStatus))
assert.Equal(t, types.RollupCommitFailed, types.RollupStatus(batches[0].RollupStatus))
assert.Equal(t, types.ProvingTaskVerified, types.ProvingStatus(batches[1].ProvingStatus))
assert.Equal(t, types.RollupFinalizeFailed, types.RollupStatus(batches[1].RollupStatus))
}
}

View File

@@ -164,6 +164,7 @@ type BatchMetadata struct {
StartChunkHash common.Hash
EndChunkHash common.Hash
BlobBytes []byte
ChallengeDigest common.Hash
}
// GetBatchMetadata retrieves the metadata of a batch.
@@ -179,10 +180,11 @@ func GetBatchMetadata(batch *encoding.Batch, codecVersion encoding.CodecVersion)
}
batchMeta := &BatchMetadata{
BatchHash: daBatch.Hash(),
BatchDataHash: daBatch.DataHash(),
BatchBytes: daBatch.Encode(),
BlobBytes: daBatch.BlobBytes(),
BatchHash: daBatch.Hash(),
BatchDataHash: daBatch.DataHash(),
BatchBytes: daBatch.Encode(),
BlobBytes: daBatch.BlobBytes(),
ChallengeDigest: daBatch.ChallengeDigest(),
}
batchMeta.BatchBlobDataProof, err = daBatch.BlobDataProofForPointEvaluation()

2023
zkvm-prover/Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -18,13 +18,13 @@ serde = { version = "1.0.198", features = ["derive"] }
serde_json = "1.0.116"
futures = "0.3.30"
scroll-zkvm-prover = { git = "https://github.com/scroll-tech/zkvm-prover", tag = "v0.1.0-rc.6" }
scroll-zkvm-prover-euclid = { git = "https://github.com/scroll-tech/zkvm-prover", tag = "v0.2.0", package = "scroll-zkvm-prover" }
ethers-core = { git = "https://github.com/scroll-tech/ethers-rs.git", branch = "v2.0.7" }
ethers-providers = { git = "https://github.com/scroll-tech/ethers-rs.git", branch = "v2.0.7" }
scroll-proving-sdk = { git = "https://github.com/scroll-tech/scroll-proving-sdk.git", rev = "af95d2a", features = [
scroll-proving-sdk = { git = "https://github.com/scroll-tech/scroll-proving-sdk.git", branch = "main", features = [
"openvm",
] }
sbv-primitives = { git = "https://github.com/scroll-tech/stateless-block-verifier", branch = "zkvm/euclid-v2", features = [
sbv-primitives = { git = "https://github.com/scroll-tech/stateless-block-verifier", branch = "zkvm/euclid-upgrade", features = [
"scroll",
] }
base64 = "0.13.1"
@@ -45,3 +45,34 @@ clap = { version = "4.5", features = ["derive"] }
ctor = "0.2.8"
url = "2.5.4"
serde_bytes = "0.11.15"
[patch."https://github.com/openvm-org/stark-backend.git"]
openvm-stark-backend = { git = "ssh://git@github.com/scroll-tech/openvm-stark-gpu.git", branch = "main", features = ["gpu"] }
openvm-stark-sdk = { git = "ssh://git@github.com/scroll-tech/openvm-stark-gpu.git", branch = "main", features = ["gpu"] }
[patch."https://github.com/Plonky3/Plonky3.git"]
p3-air = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.1.0" }
p3-field = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.1.0" }
p3-commit = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.1.0" }
p3-matrix = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.1.0" }
p3-baby-bear = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", features = [
"nightly-features",
], tag = "v0.1.0" }
p3-util = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.1.0" }
p3-challenger = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.1.0" }
p3-dft = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.1.0" }
p3-fri = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.1.0" }
p3-goldilocks = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.1.0" }
p3-keccak = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.1.0" }
p3-keccak-air = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.1.0" }
p3-blake3 = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.1.0" }
p3-mds = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.1.0" }
p3-merkle-tree = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.1.0" }
p3-monty-31 = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.1.0" }
p3-poseidon = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.1.0" }
p3-poseidon2 = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.1.0" }
p3-poseidon2-air = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.1.0" }
p3-symmetric = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.1.0" }
p3-uni-stark = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.1.0" }
p3-maybe-rayon = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.1.0" } # the "parallel" feature is NOT on by default to allow single-threaded benchmarking
p3-bn254-fr = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.1.0" }

View File

@@ -23,8 +23,8 @@
"db_path": "unique-db-path-for-prover-1"
},
"circuits": {
"euclid": {
"hard_fork_name": "euclid",
"euclidV2": {
"hard_fork_name": "euclidV2",
"workspace_path": "/home/ubuntu/prover-workdir"
}
}

View File

@@ -10,7 +10,7 @@ use scroll_proving_sdk::{
};
#[derive(Parser, Debug)]
#[clap(disable_version_flag = true)]
#[command(disable_version_flag = true)]
struct Args {
/// Path of config file
#[arg(long = "config", default_value = "conf/config.json")]

View File

@@ -1,4 +1,6 @@
use crate::zk_circuits_handler::{euclid::EuclidHandler, CircuitsHandler};
use crate::zk_circuits_handler::{
euclid::EuclidHandler, euclidV2::EuclidV2Handler, CircuitsHandler,
};
use anyhow::{anyhow, Result};
use async_trait::async_trait;
use scroll_proving_sdk::{
@@ -181,9 +183,14 @@ impl LocalProver {
// coordinator
let config = self.config.circuits.get(hard_fork_name).unwrap();
Arc::new(match hard_fork_name {
"euclid" => Arc::new(Mutex::new(EuclidHandler::new(&config.workspace_path))),
match hard_fork_name {
"euclid" => Arc::new(Arc::new(Mutex::new(EuclidHandler::new(
&config.workspace_path,
)))) as Arc<dyn CircuitsHandler>,
"euclidV2" => Arc::new(Arc::new(Mutex::new(EuclidV2Handler::new(
&config.workspace_path,
)))) as Arc<dyn CircuitsHandler>,
_ => unreachable!(),
}) as Arc<dyn CircuitsHandler>
}
}
}

View File

@@ -1,5 +1,8 @@
pub mod euclid;
#[allow(non_snake_case)]
pub mod euclidV2;
use anyhow::Result;
use async_trait::async_trait;
use scroll_proving_sdk::prover::{proving_service::ProveRequest, ProofType};

View File

@@ -4,37 +4,95 @@ use super::CircuitsHandler;
use anyhow::{anyhow, Result};
use async_trait::async_trait;
use scroll_proving_sdk::prover::{proving_service::ProveRequest, ProofType};
use scroll_zkvm_prover::{
use scroll_zkvm_prover_euclid::{
task::{batch::BatchProvingTask, bundle::BundleProvingTask, chunk::ChunkProvingTask},
BatchProver, BundleProver, ChunkProver,
BatchProver, BundleProverEuclidV1, ChunkProver, ProverConfig,
};
use tokio::sync::Mutex;
pub struct EuclidHandler {
chunk_prover: ChunkProver,
batch_prover: BatchProver,
bundle_prover: BundleProver,
bundle_prover: BundleProverEuclidV1,
}
#[derive(Clone, Copy)]
pub(crate) enum Phase {
EuclidV1,
EuclidV2,
}
impl Phase {
pub fn as_str(&self) -> &str {
match self {
Phase::EuclidV1 => "euclidv1",
Phase::EuclidV2 => "euclidv2",
}
}
pub fn phase_spec_chunk(&self, workspace_path: &Path) -> ProverConfig {
let dir_cache = Some(workspace_path.join("cache"));
let path_app_exe = workspace_path.join("chunk/app.vmexe");
let path_app_config = workspace_path.join("chunk/openvm.toml");
let segment_len = Some((1 << 22) - 100);
ProverConfig {
dir_cache,
path_app_config,
path_app_exe,
segment_len,
..Default::default()
}
}
pub fn phase_spec_batch(&self, workspace_path: &Path) -> ProverConfig {
let dir_cache = Some(workspace_path.join("cache"));
let path_app_exe = workspace_path.join("batch/app.vmexe");
let path_app_config = workspace_path.join("batch/openvm.toml");
let segment_len = Some((1 << 22) - 100);
ProverConfig {
dir_cache,
path_app_config,
path_app_exe,
segment_len,
..Default::default()
}
}
pub fn phase_spec_bundle(&self, workspace_path: &Path) -> ProverConfig {
let dir_cache = Some(workspace_path.join("cache"));
let path_app_config = workspace_path.join("bundle/openvm.toml");
let segment_len = Some((1 << 22) - 100);
match self {
Phase::EuclidV1 => ProverConfig {
dir_cache,
path_app_config,
segment_len,
path_app_exe: workspace_path.join("bundle/app_euclidv1.vmexe"),
..Default::default()
},
Phase::EuclidV2 => ProverConfig {
dir_cache,
path_app_config,
segment_len,
path_app_exe: workspace_path.join("bundle/app.vmexe"),
..Default::default()
},
}
}
}
unsafe impl Send for EuclidHandler {}
impl EuclidHandler {
pub fn new(workspace_path: &str) -> Self {
let p = Phase::EuclidV1;
let workspace_path = Path::new(workspace_path);
let cache_dir = workspace_path.join("cache");
let chunk_exe = workspace_path.join("chunk/app.vmexe");
let chunk_app_config = workspace_path.join("chunk/openvm.toml");
let chunk_prover = ChunkProver::setup(chunk_exe, chunk_app_config, Some(cache_dir.clone()))
let chunk_prover = ChunkProver::setup(p.phase_spec_chunk(workspace_path))
.expect("Failed to setup chunk prover");
let batch_exe = workspace_path.join("batch/app.vmexe");
let batch_app_config = workspace_path.join("batch/openvm.toml");
let batch_prover = BatchProver::setup(batch_exe, batch_app_config, Some(cache_dir.clone()))
let batch_prover = BatchProver::setup(p.phase_spec_batch(workspace_path))
.expect("Failed to setup batch prover");
let bundle_exe = workspace_path.join("bundle/app.vmexe");
let bundle_app_config = workspace_path.join("bundle/openvm.toml");
let bundle_prover = BundleProver::setup(bundle_exe, bundle_app_config, Some(cache_dir))
let bundle_prover = BundleProverEuclidV1::setup(p.phase_spec_bundle(workspace_path))
.expect("Failed to setup bundle prover");
Self {
@@ -59,16 +117,8 @@ impl CircuitsHandler for Arc<Mutex<EuclidHandler>> {
async fn get_proof_data(&self, prove_request: ProveRequest) -> Result<String> {
match prove_request.proof_type {
ProofType::Chunk => {
let witnesses: Vec<sbv_primitives::types::BlockWitness> =
serde_json::from_str(&prove_request.input)?;
let proof = self
.try_lock()
.unwrap()
.chunk_prover
.gen_proof(&ChunkProvingTask {
block_witnesses: witnesses,
})?;
let task: ChunkProvingTask = serde_json::from_str(&prove_request.input)?;
let proof = self.try_lock().unwrap().chunk_prover.gen_proof(&task)?;
Ok(serde_json::to_string(&proof)?)
}

View File

@@ -0,0 +1,79 @@
use std::{path::Path, sync::Arc};
use super::{euclid::Phase, CircuitsHandler};
use anyhow::{anyhow, Result};
use async_trait::async_trait;
use scroll_proving_sdk::prover::{proving_service::ProveRequest, ProofType};
use scroll_zkvm_prover_euclid::{
task::{batch::BatchProvingTask, bundle::BundleProvingTask, chunk::ChunkProvingTask},
BatchProver, BundleProverEuclidV2, ChunkProver,
};
use tokio::sync::Mutex;
pub struct EuclidV2Handler {
chunk_prover: ChunkProver,
batch_prover: BatchProver,
bundle_prover: BundleProverEuclidV2,
}
unsafe impl Send for EuclidV2Handler {}
impl EuclidV2Handler {
pub fn new(workspace_path: &str) -> Self {
let p = Phase::EuclidV2;
let workspace_path = Path::new(workspace_path);
let chunk_prover = ChunkProver::setup(p.phase_spec_chunk(workspace_path))
.expect("Failed to setup chunk prover");
let batch_prover = BatchProver::setup(p.phase_spec_batch(workspace_path))
.expect("Failed to setup batch prover");
let bundle_prover = BundleProverEuclidV2::setup(p.phase_spec_bundle(workspace_path))
.expect("Failed to setup bundle prover");
Self {
chunk_prover,
batch_prover,
bundle_prover,
}
}
}
#[async_trait]
impl CircuitsHandler for Arc<Mutex<EuclidV2Handler>> {
async fn get_vk(&self, task_type: ProofType) -> Option<Vec<u8>> {
Some(match task_type {
ProofType::Chunk => self.try_lock().unwrap().chunk_prover.get_app_vk(),
ProofType::Batch => self.try_lock().unwrap().batch_prover.get_app_vk(),
ProofType::Bundle => self.try_lock().unwrap().bundle_prover.get_app_vk(),
_ => unreachable!("Unsupported proof type"),
})
}
async fn get_proof_data(&self, prove_request: ProveRequest) -> Result<String> {
match prove_request.proof_type {
ProofType::Chunk => {
let task: ChunkProvingTask = serde_json::from_str(&prove_request.input)?;
let proof = self.try_lock().unwrap().chunk_prover.gen_proof(&task)?;
Ok(serde_json::to_string(&proof)?)
}
ProofType::Batch => {
let task: BatchProvingTask = serde_json::from_str(&prove_request.input)?;
let proof = self.try_lock().unwrap().batch_prover.gen_proof(&task)?;
Ok(serde_json::to_string(&proof)?)
}
ProofType::Bundle => {
let batch_proofs: BundleProvingTask = serde_json::from_str(&prove_request.input)?;
let proof = self
.try_lock()
.unwrap()
.bundle_prover
.gen_proof_evm(&batch_proofs)?;
Ok(serde_json::to_string(&proof)?)
}
_ => Err(anyhow!("Unsupported proof type")),
}
}
}