feat: the CLOAK privacy solution (#1737)

Co-authored-by: Ho <fan@scroll.io>
Co-authored-by: Rohit Narurkar <rohit.narurkar@proton.me>
Co-authored-by: Péter Garamvölgyi <peter@scroll.io>
This commit is contained in:
Zhang Zhuo
2025-11-14 22:00:37 +08:00
committed by GitHub
parent 1985e54ab3
commit 6bee33036f
58 changed files with 1687 additions and 413 deletions

460
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -14,13 +14,12 @@ edition = "2021"
homepage = "https://scroll.io"
readme = "README.md"
repository = "https://github.com/scroll-tech/scroll"
version = "4.5.47"
version = "4.6.3"
[workspace.dependencies]
# include compatiblity fixing from "fix/coordinator"
scroll-zkvm-prover = { git = "https://github.com/scroll-tech/zkvm-prover", rev = "a71dd2b" }
scroll-zkvm-verifier = { git = "https://github.com/scroll-tech/zkvm-prover", rev = "a71dd2b" }
scroll-zkvm-types = { git = "https://github.com/scroll-tech/zkvm-prover", rev = "a71dd2b" }
scroll-zkvm-prover = { git = "https://github.com/scroll-tech/zkvm-prover", rev = "360f364" }
scroll-zkvm-verifier = { git = "https://github.com/scroll-tech/zkvm-prover", rev = "360f364" }
scroll-zkvm-types = { git = "https://github.com/scroll-tech/zkvm-prover", rev = "360f364" }
sbv-primitives = { git = "https://github.com/scroll-tech/stateless-block-verifier", branch = "master", features = ["scroll", "rkyv"] }
sbv-utils = { git = "https://github.com/scroll-tech/stateless-block-verifier", branch = "master" }

View File

@@ -10,6 +10,7 @@ import (
"time"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/rpc"
"github.com/testcontainers/testcontainers-go"
"github.com/testcontainers/testcontainers-go/modules/compose"
"github.com/testcontainers/testcontainers-go/modules/postgres"
@@ -220,11 +221,21 @@ func (t *TestcontainerApps) GetGormDBClient() (*gorm.DB, error) {
// GetL2GethClient returns a ethclient by dialing running L2Geth
func (t *TestcontainerApps) GetL2GethClient() (*ethclient.Client, error) {
rpcCli, err := t.GetL2Client()
if err != nil {
return nil, err
}
return ethclient.NewClient(rpcCli), nil
}
// GetL2GethClient returns a rpc client by dialing running L2Geth
func (t *TestcontainerApps) GetL2Client() (*rpc.Client, error) {
endpoint, err := t.GetL2GethEndPoint()
if err != nil {
return nil, err
}
client, err := ethclient.Dial(endpoint)
client, err := rpc.Dial(endpoint)
if err != nil {
return nil, err
}

View File

@@ -39,10 +39,12 @@ const (
// ChunkTaskDetail is a type containing ChunkTask detail for chunk task.
type ChunkTaskDetail struct {
Version uint8 `json:"version"`
// use one of the string of "euclidv1" / "euclidv2"
ForkName string `json:"fork_name"`
BlockHashes []common.Hash `json:"block_hashes"`
PrevMsgQueueHash common.Hash `json:"prev_msg_queue_hash"`
PostMsgQueueHash common.Hash `json:"post_msg_queue_hash"`
}
// it is a hex encoded big with fixed length on 48 bytes
@@ -90,40 +92,59 @@ func (e *Byte48) UnmarshalJSON(input []byte) error {
// BatchTaskDetail is a type containing BatchTask detail.
type BatchTaskDetail struct {
Version uint8 `json:"version"`
// use one of the string of "euclidv1" / "euclidv2"
ForkName string `json:"fork_name"`
ChunkInfos []*ChunkInfo `json:"chunk_infos"`
ChunkProofs []*OpenVMChunkProof `json:"chunk_proofs"`
BatchHeader interface{} `json:"batch_header"`
BlobBytes []byte `json:"blob_bytes"`
KzgProof Byte48 `json:"kzg_proof,omitempty"`
KzgCommitment Byte48 `json:"kzg_commitment,omitempty"`
ChallengeDigest common.Hash `json:"challenge_digest,omitempty"`
ForkName string `json:"fork_name"`
ChunkProofs []*OpenVMChunkProof `json:"chunk_proofs"`
BatchHeader interface{} `json:"batch_header"`
BlobBytes []byte `json:"blob_bytes"`
KzgProof *Byte48 `json:"kzg_proof,omitempty"`
KzgCommitment *Byte48 `json:"kzg_commitment,omitempty"`
// ChallengeDigest should be a common.Hash type if it is not nil
ChallengeDigest interface{} `json:"challenge_digest,omitempty"`
}
// BundleTaskDetail consists of all the information required to describe the task to generate a proof for a bundle of batches.
type BundleTaskDetail struct {
Version uint8 `json:"version"`
// use one of the string of "euclidv1" / "euclidv2"
ForkName string `json:"fork_name"`
BatchProofs []*OpenVMBatchProof `json:"batch_proofs"`
BundleInfo *OpenVMBundleInfo `json:"bundle_info,omitempty"`
}
type RawBytes []byte
func (r RawBytes) MarshalJSON() ([]byte, error) {
if r == nil {
return []byte("null"), nil
}
// Marshal the []byte as a JSON array of numbers
rn := make([]uint16, len(r))
for i := range r {
rn[i] = uint16(r[i])
}
return json.Marshal(rn)
}
// ChunkInfo is for calculating pi_hash for chunk
type ChunkInfo struct {
ChainID uint64 `json:"chain_id"`
PrevStateRoot common.Hash `json:"prev_state_root"`
PostStateRoot common.Hash `json:"post_state_root"`
WithdrawRoot common.Hash `json:"withdraw_root"`
DataHash common.Hash `json:"data_hash"`
IsPadding bool `json:"is_padding"`
TxBytes []byte `json:"tx_bytes"`
ChainID uint64 `json:"chain_id"`
PrevStateRoot common.Hash `json:"prev_state_root"`
PostStateRoot common.Hash `json:"post_state_root"`
WithdrawRoot common.Hash `json:"withdraw_root"`
DataHash common.Hash `json:"data_hash"`
IsPadding bool `json:"is_padding"`
// TxBytes []byte `json:"tx_bytes"`
TxBytesHash common.Hash `json:"tx_data_digest"`
PrevMsgQueueHash common.Hash `json:"prev_msg_queue_hash"`
PostMsgQueueHash common.Hash `json:"post_msg_queue_hash"`
TxDataLength uint64 `json:"tx_data_length"`
InitialBlockNumber uint64 `json:"initial_block_number"`
BlockCtxs []BlockContextV2 `json:"block_ctxs"`
PrevBlockhash common.Hash `json:"prev_blockhash"`
PostBlockhash common.Hash `json:"post_blockhash"`
EncryptionKey RawBytes `json:"encryption_key"`
}
// BlockContextV2 is the block context for euclid v2
@@ -186,6 +207,7 @@ type OpenVMBatchInfo struct {
ChainID uint64 `json:"chain_id"`
PrevMsgQueueHash common.Hash `json:"prev_msg_queue_hash"`
PostMsgQueueHash common.Hash `json:"post_msg_queue_hash"`
EncryptionKey RawBytes `json:"encryption_key"`
}
// BatchProof includes the proof info that are required for batch verification and rollup.
@@ -246,6 +268,7 @@ type OpenVMBundleInfo struct {
PrevBatchHash common.Hash `json:"prev_batch_hash"`
BatchHash common.Hash `json:"batch_hash"`
MsgQueueHash common.Hash `json:"msg_queue_hash"`
EncryptionKey RawBytes `json:"encryption_key"`
}
// OpenVMBundleProof includes the proof info that are required for verification of a bundle of batch proofs.

View File

@@ -5,7 +5,7 @@ import (
"runtime/debug"
)
var tag = "v4.6.1"
var tag = "v4.6.3"
var commit = func() string {
if info, ok := debug.ReadBuildInfo(); ok {

View File

@@ -37,9 +37,9 @@ coordinator_tool:
localsetup: coordinator_api ## Local setup: build coordinator_api, copy config, and setup releases
mkdir -p build/bin/conf
@echo "Copying configuration files..."
cp -r $(PWD)/conf/config.json $(PWD)/build/bin/conf/config.template.json
cp -fL $(CURDIR)/conf/config.json $(CURDIR)/build/bin/conf/config.template.json
@echo "Setting up releases..."
cd $(PWD)/build && bash setup_releases.sh
cd $(CURDIR)/build && bash setup_releases.sh
#coordinator_api_skip_libzkp:

View File

@@ -36,7 +36,7 @@ func verify(cCtx *cli.Context) error {
return fmt.Errorf("error reading file: %w", err)
}
vf, err := verifier.NewVerifier(cfg.ProverManager.Verifier)
vf, err := verifier.NewVerifier(cfg.ProverManager.Verifier, cfg.L2.ValidiumMode)
if err != nil {
return err
}

View File

@@ -36,8 +36,9 @@ type L2Endpoint struct {
// L2 loads l2geth configuration items.
type L2 struct {
// l2geth chain_id.
ChainID uint64 `json:"chain_id"`
Endpoint *L2Endpoint `json:"l2geth"`
ChainID uint64 `json:"chain_id"`
Endpoint *L2Endpoint `json:"l2geth"`
ValidiumMode bool `json:"validium_mode"`
}
// Auth provides the auth coordinator
@@ -47,17 +48,24 @@ type Auth struct {
LoginExpireDurationSec int `json:"login_expire_duration_sec"`
}
// The sequencer controlled data
type Sequencer struct {
DecryptionKey string `json:"decryption_key"`
}
// Config load configuration items.
type Config struct {
ProverManager *ProverManager `json:"prover_manager"`
DB *database.Config `json:"db"`
L2 *L2 `json:"l2"`
Auth *Auth `json:"auth"`
Sequencer *Sequencer `json:"sequencer"`
}
// AssetConfig contain assets configurated for each fork, the defaul vkfile name is "OpenVmVk.json".
type AssetConfig struct {
AssetsPath string `json:"assets_path"`
Version uint8 `json:"version,omitempty"`
ForkName string `json:"fork_name"`
Vkfile string `json:"vk_file,omitempty"`
MinProverVersion string `json:"min_prover_version,omitempty"`

View File

@@ -35,13 +35,17 @@ func TestConfig(t *testing.T) {
"maxIdleNum": 20
},
"l2": {
"chain_id": 111
"chain_id": 111,
"validium_mode": false
},
"auth": {
"secret": "prover secret key",
"challenge_expire_duration_sec": 3600,
"login_expire_duration_sec": 3600
}
},
"sequencer": {
"decryption_key": "sequencer decryption key"
}
}`
t.Run("Success Case", func(t *testing.T) {

View File

@@ -24,7 +24,9 @@ var (
// InitController inits Controller with database
func InitController(cfg *config.Config, chainCfg *params.ChainConfig, db *gorm.DB, reg prometheus.Registerer) {
vf, err := verifier.NewVerifier(cfg.ProverManager.Verifier)
validiumMode := cfg.L2.ValidiumMode
vf, err := verifier.NewVerifier(cfg.ProverManager.Verifier, validiumMode)
if err != nil {
panic("proof receiver new verifier failure")
}

View File

@@ -93,8 +93,8 @@ func fromMessageTaskType(taskType int) int {
}
// Generate a universal task
func GenerateUniversalTask(taskType int, taskJSON, forkName string, expectedVk []byte) (bool, string, string, []byte) {
return generateUniversalTask(fromMessageTaskType(taskType), taskJSON, strings.ToLower(forkName), expectedVk)
func GenerateUniversalTask(taskType int, taskJSON, forkName string, expectedVk []byte, decryptionKey []byte) (bool, string, string, []byte) {
return generateUniversalTask(fromMessageTaskType(taskType), taskJSON, strings.ToLower(forkName), expectedVk, decryptionKey)
}
// Generate wrapped proof

View File

@@ -40,7 +40,9 @@ HandlingResult gen_universal_task(
char* task,
char* fork_name,
const unsigned char* expected_vk,
size_t expected_vk_len
size_t expected_vk_len,
const unsigned char* decryption_key,
size_t decryption_key_len
);
// Release memory allocated for a HandlingResult returned by gen_universal_task

View File

@@ -14,7 +14,7 @@ import (
func InitL2geth(configJSON string) {
}
func generateUniversalTask(taskType int, taskJSON, forkName string, expectedVk []byte) (bool, string, string, []byte) {
func generateUniversalTask(taskType int, taskJSON, forkName string, expectedVk []byte, decryptionKey []byte) (bool, string, string, []byte) {
fmt.Printf("call mocked generate universal task %d, taskJson %s\n", taskType, taskJSON)
var metadata interface{}

View File

@@ -17,7 +17,7 @@ func InitL2geth(configJSON string) {
C.init_l2geth(cConfig)
}
func generateUniversalTask(taskType int, taskJSON, forkName string, expectedVk []byte) (bool, string, string, []byte) {
func generateUniversalTask(taskType int, taskJSON, forkName string, expectedVk []byte, decryptionKey []byte) (bool, string, string, []byte) {
cTask := goToCString(taskJSON)
cForkName := goToCString(forkName)
defer freeCString(cTask)
@@ -29,7 +29,13 @@ func generateUniversalTask(taskType int, taskJSON, forkName string, expectedVk [
cVk = (*C.uchar)(unsafe.Pointer(&expectedVk[0]))
}
result := C.gen_universal_task(C.int(taskType), cTask, cForkName, cVk, C.size_t(len(expectedVk)))
// Create a C array from Go slice
var cDk *C.uchar
if len(decryptionKey) > 0 {
cDk = (*C.uchar)(unsafe.Pointer(&decryptionKey[0]))
}
result := C.gen_universal_task(C.int(taskType), cTask, cForkName, cVk, C.size_t(len(expectedVk)), cDk, C.size_t(len(decryptionKey)))
defer C.release_task_result(result)
// Check if the operation was successful

View File

@@ -257,36 +257,21 @@ func (bp *BatchProverTask) formatProverTask(ctx context.Context, task *orm.Prove
}
var chunkProofs []*message.OpenVMChunkProof
var chunkInfos []*message.ChunkInfo
// var chunkInfos []*message.ChunkInfo
for _, chunk := range chunks {
var proof message.OpenVMChunkProof
if encodeErr := json.Unmarshal(chunk.Proof, &proof); encodeErr != nil {
return nil, fmt.Errorf("Chunk.GetProofsByBatchHash unmarshal proof error: %w, batch hash: %v, chunk hash: %v", encodeErr, task.TaskID, chunk.Hash)
}
chunkProofs = append(chunkProofs, &proof)
chunkInfo := message.ChunkInfo{
ChainID: bp.cfg.L2.ChainID,
PrevStateRoot: common.HexToHash(chunk.ParentChunkStateRoot),
PostStateRoot: common.HexToHash(chunk.StateRoot),
WithdrawRoot: common.HexToHash(chunk.WithdrawRoot),
DataHash: common.HexToHash(chunk.Hash),
PrevMsgQueueHash: common.HexToHash(chunk.PrevL1MessageQueueHash),
PostMsgQueueHash: common.HexToHash(chunk.PostL1MessageQueueHash),
IsPadding: false,
InitialBlockNumber: proof.MetaData.ChunkInfo.InitialBlockNumber,
BlockCtxs: proof.MetaData.ChunkInfo.BlockCtxs,
TxDataLength: proof.MetaData.ChunkInfo.TxDataLength,
}
chunkInfos = append(chunkInfos, &chunkInfo)
}
taskDetail, err := bp.getBatchTaskDetail(batch, chunkInfos, chunkProofs, hardForkName)
taskDetail, err := bp.getBatchTaskDetail(batch, chunkProofs, hardForkName)
if err != nil {
return nil, fmt.Errorf("failed to get batch task detail, taskID:%s err:%w", task.TaskID, err)
}
chunkProofsBytes, err := json.Marshal(taskDetail)
taskBytesWithchunkProofs, err := json.Marshal(taskDetail)
if err != nil {
return nil, fmt.Errorf("failed to marshal chunk proofs, taskID:%s err:%w", task.TaskID, err)
}
@@ -294,7 +279,7 @@ func (bp *BatchProverTask) formatProverTask(ctx context.Context, task *orm.Prove
taskMsg := &coordinatorType.GetTaskSchema{
TaskID: task.TaskID,
TaskType: int(message.ProofTypeBatch),
TaskData: string(chunkProofsBytes),
TaskData: string(taskBytesWithchunkProofs),
HardForkName: hardForkName,
}
@@ -309,38 +294,56 @@ func (bp *BatchProverTask) recoverActiveAttempts(ctx *gin.Context, batchTask *or
}
}
func (bp *BatchProverTask) getBatchTaskDetail(dbBatch *orm.Batch, chunkInfos []*message.ChunkInfo, chunkProofs []*message.OpenVMChunkProof, hardForkName string) (*message.BatchTaskDetail, error) {
func (bp *BatchProverTask) getBatchTaskDetail(dbBatch *orm.Batch, chunkProofs []*message.OpenVMChunkProof, hardForkName string) (*message.BatchTaskDetail, error) {
// Get the version byte.
version, err := bp.version(hardForkName)
if err != nil {
return nil, fmt.Errorf("failed to decode version byte: %w", err)
}
taskDetail := &message.BatchTaskDetail{
ChunkInfos: chunkInfos,
Version: version,
ChunkProofs: chunkProofs,
ForkName: hardForkName,
}
dbBatchCodecVersion := encoding.CodecVersion(dbBatch.CodecVersion)
switch dbBatchCodecVersion {
case encoding.CodecV3, encoding.CodecV4, encoding.CodecV6, encoding.CodecV7, encoding.CodecV8:
default:
return taskDetail, nil
}
codec, err := encoding.CodecFromVersion(encoding.CodecVersion(dbBatch.CodecVersion))
if err != nil {
return nil, fmt.Errorf("failed to get codec from version %d, err: %w", dbBatch.CodecVersion, err)
}
batchHeader, decodeErr := codec.NewDABatchFromBytes(dbBatch.BatchHeader)
if decodeErr != nil {
return nil, fmt.Errorf("failed to decode batch header version %d: %w", dbBatch.CodecVersion, decodeErr)
}
taskDetail.BatchHeader = batchHeader
taskDetail.BlobBytes = dbBatch.BlobBytes
taskDetail.ChallengeDigest = common.HexToHash(dbBatch.ChallengeDigest)
// Memory layout of `BlobDataProof`: used in Codec.BlobDataProofForPointEvaluation()
// | z | y | kzg_commitment | kzg_proof |
// |---------|---------|----------------|-----------|
// | bytes32 | bytes32 | bytes48 | bytes48 |
taskDetail.KzgProof = message.Byte48{Big: hexutil.Big(*new(big.Int).SetBytes(dbBatch.BlobDataProof[112:160]))}
taskDetail.KzgCommitment = message.Byte48{Big: hexutil.Big(*new(big.Int).SetBytes(dbBatch.BlobDataProof[64:112]))}
if !bp.validiumMode() {
dbBatchCodecVersion := encoding.CodecVersion(dbBatch.CodecVersion)
switch dbBatchCodecVersion {
case encoding.CodecV3, encoding.CodecV4, encoding.CodecV6, encoding.CodecV7, encoding.CodecV8:
default:
return taskDetail, nil
}
codec, err := encoding.CodecFromVersion(encoding.CodecVersion(dbBatch.CodecVersion))
if err != nil {
return nil, fmt.Errorf("failed to get codec from version %d, err: %w", dbBatch.CodecVersion, err)
}
batchHeader, decodeErr := codec.NewDABatchFromBytes(dbBatch.BatchHeader)
if decodeErr != nil {
return nil, fmt.Errorf("failed to decode batch header version %d: %w", dbBatch.CodecVersion, decodeErr)
}
taskDetail.BatchHeader = batchHeader
taskDetail.ChallengeDigest = common.HexToHash(dbBatch.ChallengeDigest)
// Memory layout of `BlobDataProof`: used in Codec.BlobDataProofForPointEvaluation()
// | z | y | kzg_commitment | kzg_proof |
// |---------|---------|----------------|-----------|
// | bytes32 | bytes32 | bytes48 | bytes48 |
taskDetail.KzgProof = &message.Byte48{Big: hexutil.Big(*new(big.Int).SetBytes(dbBatch.BlobDataProof[112:160]))}
taskDetail.KzgCommitment = &message.Byte48{Big: hexutil.Big(*new(big.Int).SetBytes(dbBatch.BlobDataProof[64:112]))}
} else {
log.Debug("Apply validium mode for batch proving task")
codec := cutils.FromVersion(version)
batchHeader, decodeErr := codec.DABatchForTaskFromBytes(dbBatch.BatchHeader)
if decodeErr != nil {
return nil, fmt.Errorf("failed to decode batch header version %d: %w", dbBatch.CodecVersion, decodeErr)
}
batchHeader.SetHash(common.HexToHash(dbBatch.Hash))
taskDetail.BatchHeader = batchHeader
}
return taskDetail, nil
}

View File

@@ -273,7 +273,14 @@ func (bp *BundleProverTask) formatProverTask(ctx context.Context, task *orm.Prov
batchProofs = append(batchProofs, &proof)
}
// Get the version byte.
version, err := bp.version(hardForkName)
if err != nil {
return nil, fmt.Errorf("failed to decode version byte: %w", err)
}
taskDetail := message.BundleTaskDetail{
Version: version,
BatchProofs: batchProofs,
ForkName: hardForkName,
}

View File

@@ -237,14 +237,21 @@ func (cp *ChunkProverTask) formatProverTask(ctx context.Context, task *orm.Prove
return nil, fmt.Errorf("failed to fetch block hashes of a chunk, chunk hash:%s err:%v", task.TaskID, dbErr)
}
// Get the version byte.
version, err := cp.version(hardForkName)
if err != nil {
return nil, fmt.Errorf("failed to decode version byte: %w", err)
}
var taskDetailBytes []byte
taskDetail := message.ChunkTaskDetail{
Version: version,
BlockHashes: blockHashes,
PrevMsgQueueHash: common.HexToHash(chunk.PrevL1MessageQueueHash),
PostMsgQueueHash: common.HexToHash(chunk.PostL1MessageQueueHash),
ForkName: hardForkName,
}
var err error
taskDetailBytes, err = json.Marshal(taskDetail)
if err != nil {
return nil, fmt.Errorf("failed to marshal block hashes hash:%s, err:%w", task.TaskID, err)

View File

@@ -1,6 +1,7 @@
package provertask
import (
"encoding/hex"
"errors"
"fmt"
"strings"
@@ -20,6 +21,7 @@ import (
"scroll-tech/coordinator/internal/logic/libzkp"
"scroll-tech/coordinator/internal/orm"
coordinatorType "scroll-tech/coordinator/internal/types"
"scroll-tech/coordinator/internal/utils"
)
var (
@@ -66,6 +68,17 @@ type proverTaskContext struct {
hasAssignedTask *orm.ProverTask
}
func (b *BaseProverTask) version(hardForkName string) (uint8, error) {
return utils.Version(hardForkName, b.validiumMode())
}
// validiumMode induce different behavior in task generation:
// + skip the point_evaluation part in batch task
// + encode batch header with codec in utils instead of da-codec
func (b *BaseProverTask) validiumMode() bool {
return b.cfg.L2.ValidiumMode
}
// hardForkName get the chunk/batch/bundle hard fork name
func (b *BaseProverTask) hardForkName(ctx *gin.Context, taskCtx *proverTaskContext) (string, error) {
switch {
@@ -193,7 +206,16 @@ func (b *BaseProverTask) applyUniversal(schema *coordinatorType.GetTaskSchema) (
return nil, nil, fmt.Errorf("no expectedVk found from hardfork %s", schema.HardForkName)
}
ok, uTaskData, metadata, _ := libzkp.GenerateUniversalTask(schema.TaskType, schema.TaskData, schema.HardForkName, expectedVk)
var decryptionKey []byte
if b.cfg.L2.ValidiumMode {
var err error
decryptionKey, err = hex.DecodeString(b.cfg.Sequencer.DecryptionKey)
if err != nil {
return nil, nil, fmt.Errorf("sequencer decryption key hex-decoding failed")
}
}
ok, uTaskData, metadata, _ := libzkp.GenerateUniversalTask(schema.TaskType, schema.TaskData, schema.HardForkName, expectedVk, decryptionKey)
if !ok {
return nil, nil, fmt.Errorf("can not generate universal task, see coordinator log for the reason")
}

View File

@@ -9,7 +9,7 @@ import (
)
// NewVerifier Sets up a mock verifier.
func NewVerifier(cfg *config.VerifierConfig) (*Verifier, error) {
func NewVerifier(cfg *config.VerifierConfig, _ bool) (*Verifier, error) {
return &Verifier{
cfg: cfg,
OpenVMVkMap: map[string]struct{}{"mock_vk": {}},

View File

@@ -19,20 +19,34 @@ import (
"scroll-tech/coordinator/internal/config"
"scroll-tech/coordinator/internal/logic/libzkp"
"scroll-tech/coordinator/internal/utils"
)
// This struct maps to `CircuitConfig` in libzkp/src/verifier.rs
// Define a brand new struct here is to eliminate side effects in case fields
// in `*config.CircuitConfig` being changed
type rustCircuitConfig struct {
Version uint `json:"version"`
ForkName string `json:"fork_name"`
AssetsPath string `json:"assets_path"`
}
var validiumMode bool
func newRustCircuitConfig(cfg config.AssetConfig) *rustCircuitConfig {
ver := cfg.Version
if ver == 0 {
var err error
ver, err = utils.Version(cfg.ForkName, validiumMode)
if err != nil {
panic(err)
}
}
return &rustCircuitConfig{
ForkName: cfg.ForkName,
Version: uint(ver),
AssetsPath: cfg.AssetsPath,
ForkName: cfg.ForkName,
}
}
@@ -60,7 +74,8 @@ type rustVkDump struct {
}
// NewVerifier Sets up a rust ffi to call verify.
func NewVerifier(cfg *config.VerifierConfig) (*Verifier, error) {
func NewVerifier(cfg *config.VerifierConfig, useValidiumMode bool) (*Verifier, error) {
validiumMode = useValidiumMode
verifierConfig := newRustVerifierConfig(cfg)
configBytes, err := json.Marshal(verifierConfig)
if err != nil {

View File

@@ -0,0 +1,91 @@
package utils
import (
"encoding/binary"
"fmt"
"github.com/scroll-tech/go-ethereum/common"
)
type CodecVersion uint8
const (
daBatchValidiumEncodedLength = 137
)
type DABatch interface {
SetHash(common.Hash)
}
type daBatchValidiumV1 struct {
Version CodecVersion `json:"version"`
BatchIndex uint64 `json:"batch_index"`
ParentBatchHash common.Hash `json:"parent_batch_hash"`
PostStateRoot common.Hash `json:"post_state_root"`
WithDrawRoot common.Hash `json:"withdraw_root"`
Commitment common.Hash `json:"commitment"`
}
type daBatchValidium struct {
V1 *daBatchValidiumV1 `json:"V1,omitempty"`
BatchHash common.Hash `json:"batch_hash"`
}
func (da *daBatchValidium) SetHash(h common.Hash) {
da.BatchHash = h
}
func FromVersion(v uint8) CodecVersion {
return CodecVersion(v & STFVersionMask)
}
func (c CodecVersion) DABatchForTaskFromBytes(b []byte) (DABatch, error) {
switch c {
case 1:
if v1, err := decodeDABatchV1(b); err == nil {
return &daBatchValidium{
V1: v1,
}, nil
} else {
return nil, err
}
default:
return nil, fmt.Errorf("unknown codec type %d", c)
}
}
func decodeDABatchV1(data []byte) (*daBatchValidiumV1, error) {
if len(data) != daBatchValidiumEncodedLength {
return nil, fmt.Errorf("invalid data length for DABatchV7, expected %d bytes but got %d", daBatchValidiumEncodedLength, len(data))
}
const (
versionSize = 1
indexSize = 8
hashSize = 32
)
// Offsets (same as encodeBatchHeaderValidium)
versionOffset := 0
indexOffset := versionOffset + versionSize
parentHashOffset := indexOffset + indexSize
stateRootOffset := parentHashOffset + hashSize
withdrawRootOffset := stateRootOffset + hashSize
commitmentOffset := withdrawRootOffset + hashSize
version := CodecVersion(data[versionOffset])
batchIndex := binary.BigEndian.Uint64(data[indexOffset : indexOffset+indexSize])
parentBatchHash := common.BytesToHash(data[parentHashOffset : parentHashOffset+hashSize])
postStateRoot := common.BytesToHash(data[stateRootOffset : stateRootOffset+hashSize])
withdrawRoot := common.BytesToHash(data[withdrawRootOffset : withdrawRootOffset+hashSize])
commitment := common.BytesToHash(data[commitmentOffset : commitmentOffset+hashSize])
return &daBatchValidiumV1{
Version: version,
BatchIndex: batchIndex,
ParentBatchHash: parentBatchHash,
PostStateRoot: postStateRoot,
WithDrawRoot: withdrawRoot,
Commitment: commitment,
}, nil
}

View File

@@ -0,0 +1,40 @@
package utils
import (
"errors"
"strings"
)
const (
DomainOffset = 6
STFVersionMask = (1 << DomainOffset) - 1
)
// version get the version for the chain instance
//
// TODO: This is not foolproof and does not cover all scenarios.
func Version(hardForkName string, ValidiumMode bool) (uint8, error) {
var domain, stfVersion uint8
if ValidiumMode {
domain = 1
stfVersion = 1
} else {
domain = 0
switch canonicalName := strings.ToLower(hardForkName); canonicalName {
case "euclidv1":
stfVersion = 6
case "euclidv2":
stfVersion = 7
case "feynman":
stfVersion = 8
case "galileo":
stfVersion = 9
default:
return 0, errors.New("unknown fork name " + canonicalName)
}
}
return (domain << DomainOffset) + stfVersion, nil
}

View File

@@ -5,7 +5,7 @@ use alloy::{
};
use eyre::Result;
use libzkp::tasks::ChunkInterpreter;
use sbv_primitives::types::Network;
use sbv_primitives::types::{consensus::TxL1Message, Network};
use serde::{Deserialize, Serialize};
fn default_max_retry() -> u32 {
@@ -168,6 +168,40 @@ impl<T: Provider<Network>> ChunkInterpreter for RpcClient<'_, T> {
self.handle
.block_on(fetch_storage_node_async(&self.provider, node_hash))
}
fn try_fetch_l1_msgs(&self, block_number: u64) -> Result<Vec<TxL1Message>> {
async fn fetch_l1_msgs(
provider: impl Provider<Network>,
block_number: u64,
) -> Result<Vec<TxL1Message>> {
let block_number_hex = format!("0x{:x}", block_number);
#[derive(Deserialize, Debug)]
#[serde(untagged)]
enum NullOrVec {
Null, // matches JSON `null`
Vec(Vec<TxL1Message>), // matches JSON array
}
Ok(
match provider
.client()
.request::<_, NullOrVec>(
"scroll_getL1MessagesInBlock",
(block_number_hex, "synced"),
)
.await?
{
NullOrVec::Null => Vec::new(),
NullOrVec::Vec(r) => r,
},
)
}
tracing::debug!("fetch L1 msgs for {block_number}");
self.handle
.block_on(fetch_l1_msgs(&self.provider, block_number))
}
}
#[cfg(test)]
@@ -218,4 +252,16 @@ mod tests {
println!("{}", serde_json::to_string_pretty(&wit2).unwrap());
}
#[test]
#[ignore = "Requires L2GETH_ENDPOINT environment variable"]
fn test_try_fetch_l1_messages() {
let config = create_config_from_env();
let client_core = RpcClientCore::create(&config).expect("Failed to create RPC client");
let client = client_core.get_client();
let msgs = client.try_fetch_l1_msgs(32).expect("should success");
println!("{}", serde_json::to_string_pretty(&msgs).unwrap());
}
}

View File

@@ -5,7 +5,7 @@ edition.workspace = true
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
scroll-zkvm-types.workspace = true
scroll-zkvm-types = { workspace = true, features = ["scroll"] }
scroll-zkvm-verifier.workspace = true
alloy-primitives.workspace = true #depress the effect of "native-keccak"

View File

@@ -13,6 +13,7 @@ use tasks::chunk_interpreter::{ChunkInterpreter, TryFromWithInterpreter};
/// global features: use legacy encoding for witness
static mut LEGACY_WITNESS_ENCODING: bool = false;
pub(crate) fn witness_use_legacy_mode() -> bool {
unsafe { LEGACY_WITNESS_ENCODING }
}
@@ -36,14 +37,13 @@ pub fn set_dynamic_feature(feats: &str) {
/// task (with full witnesses)
pub fn checkout_chunk_task(
task_json: &str,
decryption_key: Option<&[u8]>,
interpreter: impl ChunkInterpreter,
) -> eyre::Result<String> {
let chunk_task = serde_json::from_str::<tasks::ChunkTask>(task_json)?;
let ret = serde_json::to_string(&tasks::ChunkProvingTask::try_from_with_interpret(
chunk_task,
interpreter,
)?)?;
Ok(ret)
Ok(serde_json::to_string(
&tasks::ChunkProvingTask::try_from_with_interpret(chunk_task, decryption_key, interpreter)?,
)?)
}
/// Convert the universal task json into compatible form for old prover

View File

@@ -8,9 +8,10 @@ use scroll_zkvm_types::{
bundle::BundleInfo,
chunk::ChunkInfo,
proof::{EvmProof, OpenVmEvmProof, ProofEnum, StarkProof},
public_inputs::{ForkName, MultiVersionPublicInputs},
public_inputs::MultiVersionPublicInputs,
types_agg::AggregationInput,
utils::{serialize_vk, vec_as_base64},
version,
};
use serde::{de::DeserializeOwned, Deserialize, Serialize};
@@ -181,13 +182,13 @@ impl<Metadata: ProofMetadata> WrappedProof<Metadata> {
/// Sanity checks on the wrapped proof:
///
/// - pi_hash computed in host does in fact match pi_hash computed in guest
pub fn pi_hash_check(&self, fork_name: ForkName) -> bool {
pub fn pi_hash_check(&self, ver: version::Version) -> bool {
let proof_pi = self.proof.public_values();
let expected_pi = self
.metadata
.pi_hash_info()
.pi_hash_by_fork(fork_name)
.pi_hash_by_version(ver)
.0
.as_ref()
.iter()
@@ -252,6 +253,7 @@ mod tests {
batch_hash: B256::repeat_byte(4),
withdraw_root: B256::repeat_byte(5),
msg_queue_hash: B256::repeat_byte(6),
encryption_key: None,
};
let bundle_pi_hash = bundle_info.pi_hash(ForkName::EuclidV1);
BundleProofMetadata {

View File

@@ -10,32 +10,27 @@ pub use chunk_interpreter::ChunkInterpreter;
pub use scroll_zkvm_types::task::ProvingTask;
use crate::{
proofs::{self, BatchProofMetadata, BundleProofMetadata, ChunkProofMetadata},
proofs::{BatchProofMetadata, BundleProofMetadata, ChunkProofMetadata},
utils::panic_catch,
};
use sbv_primitives::B256;
use scroll_zkvm_types::public_inputs::{ForkName, MultiVersionPublicInputs};
use scroll_zkvm_types::public_inputs::{ForkName, MultiVersionPublicInputs, Version};
fn encode_task_to_witness<T: serde::Serialize>(task: &T) -> eyre::Result<Vec<u8>> {
let config = bincode::config::standard();
Ok(bincode::serde::encode_to_vec(task, config)?)
}
fn check_aggregation_proofs<Metadata>(
proofs: &[proofs::WrappedProof<Metadata>],
fork_name: ForkName,
) -> eyre::Result<()>
where
Metadata: proofs::ProofMetadata,
{
fn check_aggregation_proofs<Metadata: MultiVersionPublicInputs>(
metadata: &[Metadata],
version: Version,
) -> eyre::Result<()> {
panic_catch(|| {
for w in proofs.windows(2) {
w[1].metadata
.pi_hash_info()
.validate(w[0].metadata.pi_hash_info(), fork_name);
for w in metadata.windows(2) {
w[1].validate(&w[0], version);
}
})
.map_err(|e| eyre::eyre!("Chunk data validation failed: {}", e))?;
.map_err(|e| eyre::eyre!("Metadata validation failed: {}", e))?;
Ok(())
}

View File

@@ -1,21 +1,31 @@
use crate::proofs::ChunkProof;
use c_kzg::Bytes48;
use eyre::Result;
use sbv_primitives::{B256, U256};
use scroll_zkvm_types::{
batch::{
build_point_eval_witness, BatchHeader, BatchHeaderV6, BatchHeaderV7, BatchHeaderV8,
BatchInfo, BatchWitness, Envelope, EnvelopeV6, EnvelopeV7, EnvelopeV8, LegacyBatchWitness,
ReferenceHeader, N_BLOB_BYTES,
BatchHeaderValidium, BatchInfo, BatchWitness, Envelope, EnvelopeV6, EnvelopeV7, EnvelopeV8,
LegacyBatchWitness, ReferenceHeader, N_BLOB_BYTES,
},
public_inputs::ForkName,
chunk::ChunkInfo,
public_inputs::{ForkName, Version},
task::ProvingTask,
utils::{to_rkyv_bytes, RancorError},
version::{Domain, STFVersion},
};
use crate::proofs::ChunkProof;
mod utils;
use utils::{base64, point_eval};
#[derive(Clone, serde::Deserialize, serde::Serialize)]
pub struct BatchHeaderValidiumWithHash {
#[serde(flatten)]
header: BatchHeaderValidium,
batch_hash: B256,
}
/// Define variable batch header type, since BatchHeaderV6 can not
/// be decoded as V7 we can always has correct deserialization
/// Notice: V6 header MUST be put above V7 since untagged enum
@@ -23,36 +33,55 @@ use utils::{base64, point_eval};
#[derive(Clone, serde::Deserialize, serde::Serialize)]
#[serde(untagged)]
pub enum BatchHeaderV {
Validium(BatchHeaderValidiumWithHash),
V6(BatchHeaderV6),
V7_8(BatchHeaderV7),
}
impl core::fmt::Display for BatchHeaderV {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
match self {
BatchHeaderV::V6(_) => write!(f, "V6"),
BatchHeaderV::V7_8(_) => write!(f, "V7_8"),
BatchHeaderV::Validium(_) => write!(f, "Validium"),
}
}
}
impl BatchHeaderV {
pub fn batch_hash(&self) -> B256 {
match self {
BatchHeaderV::V6(h) => h.batch_hash(),
BatchHeaderV::V7_8(h) => h.batch_hash(),
BatchHeaderV::Validium(h) => h.header.batch_hash(),
}
}
pub fn must_v6_header(&self) -> &BatchHeaderV6 {
match self {
BatchHeaderV::V6(h) => h,
_ => panic!("try to pick other header type"),
_ => unreachable!("A header of {} is considered to be v6", self),
}
}
pub fn must_v7_header(&self) -> &BatchHeaderV7 {
match self {
BatchHeaderV::V7_8(h) => h,
_ => panic!("try to pick other header type"),
_ => unreachable!("A header of {} is considered to be v7", self),
}
}
pub fn must_v8_header(&self) -> &BatchHeaderV8 {
match self {
BatchHeaderV::V7_8(h) => h,
_ => panic!("try to pick other header type"),
_ => unreachable!("A header of {} is considered to be v8", self),
}
}
pub fn must_validium_header(&self) -> &BatchHeaderValidium {
match self {
BatchHeaderV::Validium(h) => &h.header,
_ => unreachable!("A header of {} is considered to be validium", self),
}
}
}
@@ -61,6 +90,8 @@ impl BatchHeaderV {
/// is compatible with both pre-euclidv2 and euclidv2
#[derive(Clone, serde::Deserialize, serde::Serialize)]
pub struct BatchProvingTask {
/// The version of the chunks in the batch, as per [`Version`].
pub version: u8,
/// Chunk proofs for the contiguous list of chunks within the batch.
pub chunk_proofs: Vec<ChunkProof>,
/// The [`BatchHeaderV6/V7`], as computed on-chain for this batch.
@@ -107,85 +138,135 @@ impl TryFrom<BatchProvingTask> for ProvingTask {
impl BatchProvingTask {
fn build_guest_input(&self) -> BatchWitness {
let fork_name = self.fork_name.to_lowercase().as_str().into();
let version = Version::from(self.version);
// sanity check: calculate point eval needed and compare with task input
let (kzg_commitment, kzg_proof, challenge_digest) = {
let blob = point_eval::to_blob(&self.blob_bytes);
let commitment = point_eval::blob_to_kzg_commitment(&blob);
let versioned_hash = point_eval::get_versioned_hash(&commitment);
let challenge_digest = match &self.batch_header {
BatchHeaderV::V6(_) => {
assert_eq!(
fork_name,
ForkName::EuclidV1,
"hardfork mismatch for da-codec@v6 header: found={fork_name:?}, expected={:?}",
ForkName::EuclidV1,
);
EnvelopeV6::from_slice(self.blob_bytes.as_slice())
.challenge_digest(versioned_hash)
}
BatchHeaderV::V7_8(_) => {
let padded_blob_bytes = {
let mut padded_blob_bytes = self.blob_bytes.to_vec();
padded_blob_bytes.resize(N_BLOB_BYTES, 0);
padded_blob_bytes
};
match fork_name {
ForkName::EuclidV2 => {
<EnvelopeV7 as Envelope>::from_slice(padded_blob_bytes.as_slice())
.challenge_digest(versioned_hash)
}
ForkName::Feynman => {
<EnvelopeV8 as Envelope>::from_slice(padded_blob_bytes.as_slice())
.challenge_digest(versioned_hash)
}
f => unreachable!(
"hardfork mismatch for da-codec@v7 header: found={}, expected={:?}",
f,
[ForkName::EuclidV2, ForkName::Feynman],
),
let point_eval_witness = if !version.is_validium() {
// sanity check: calculate point eval needed and compare with task input
let (kzg_commitment, kzg_proof, challenge_digest) = {
let blob = point_eval::to_blob(&self.blob_bytes);
let commitment = point_eval::blob_to_kzg_commitment(&blob);
let versioned_hash = point_eval::get_versioned_hash(&commitment);
let challenge_digest = match &self.batch_header {
BatchHeaderV::V6(_) => {
assert_eq!(
version.fork,
ForkName::EuclidV1,
"hardfork mismatch for da-codec@v6 header: found={:?}, expected={:?}",
version.fork,
ForkName::EuclidV1,
);
EnvelopeV6::from_slice(self.blob_bytes.as_slice())
.challenge_digest(versioned_hash)
}
}
BatchHeaderV::V7_8(_) => {
let padded_blob_bytes = {
let mut padded_blob_bytes = self.blob_bytes.to_vec();
padded_blob_bytes.resize(N_BLOB_BYTES, 0);
padded_blob_bytes
};
match version.fork {
ForkName::EuclidV2 => {
<EnvelopeV7 as Envelope>::from_slice(padded_blob_bytes.as_slice())
.challenge_digest(versioned_hash)
}
ForkName::Feynman => {
<EnvelopeV8 as Envelope>::from_slice(padded_blob_bytes.as_slice())
.challenge_digest(versioned_hash)
}
fork_name => unreachable!(
"hardfork mismatch for da-codec@v7 header: found={}, expected={:?}",
fork_name,
[ForkName::EuclidV2, ForkName::Feynman],
),
}
}
BatchHeaderV::Validium(_) => unreachable!("version!=validium"),
};
let (proof, _) = point_eval::get_kzg_proof(&blob, challenge_digest);
(commitment.to_bytes(), proof.to_bytes(), challenge_digest)
};
let (proof, _) = point_eval::get_kzg_proof(&blob, challenge_digest);
if let Some(k) = self.kzg_commitment {
assert_eq!(k, kzg_commitment);
}
(commitment.to_bytes(), proof.to_bytes(), challenge_digest)
if let Some(c) = self.challenge_digest {
assert_eq!(c, U256::from_be_bytes(challenge_digest.0));
}
if let Some(p) = self.kzg_proof {
assert_eq!(p, kzg_proof);
}
Some(build_point_eval_witness(
kzg_commitment.into_inner(),
kzg_proof.into_inner(),
))
} else {
assert!(self.kzg_proof.is_none(), "domain=validium has no blob-da");
assert!(
self.kzg_commitment.is_none(),
"domain=validium has no blob-da"
);
assert!(
self.challenge_digest.is_none(),
"domain=validium has no blob-da"
);
match &self.batch_header {
BatchHeaderV::Validium(h) => assert_eq!(
h.header.batch_hash(),
h.batch_hash,
"calculated batch hash match which from coordinator"
),
_ => panic!("unexpected header type"),
}
None
};
if let Some(k) = self.kzg_commitment {
assert_eq!(k, kzg_commitment);
}
if let Some(c) = self.challenge_digest {
assert_eq!(c, U256::from_be_bytes(challenge_digest.0));
}
if let Some(p) = self.kzg_proof {
assert_eq!(p, kzg_proof);
}
let point_eval_witness = Some(build_point_eval_witness(
kzg_commitment.into_inner(),
kzg_proof.into_inner(),
));
let reference_header = match fork_name {
ForkName::EuclidV1 => ReferenceHeader::V6(*self.batch_header.must_v6_header()),
ForkName::EuclidV2 => ReferenceHeader::V7(*self.batch_header.must_v7_header()),
ForkName::Feynman => ReferenceHeader::V8(*self.batch_header.must_v8_header()),
let reference_header = match (version.domain, version.stf_version) {
(Domain::Scroll, STFVersion::V6) => {
ReferenceHeader::V6(*self.batch_header.must_v6_header())
}
(Domain::Scroll, STFVersion::V7) => {
ReferenceHeader::V7(*self.batch_header.must_v7_header())
}
(Domain::Scroll, STFVersion::V8) => {
ReferenceHeader::V8(*self.batch_header.must_v8_header())
}
(Domain::Validium, STFVersion::V1) => {
ReferenceHeader::Validium(*self.batch_header.must_validium_header())
}
(domain, stf_version) => {
unreachable!("unsupported domain={domain:?},stf-version={stf_version:?}")
}
};
// patch: ensure block_hash field is ZERO for scroll domain
let chunk_infos = self
.chunk_proofs
.iter()
.map(|p| {
if version.domain == Domain::Scroll {
ChunkInfo {
prev_blockhash: B256::ZERO,
post_blockhash: B256::ZERO,
..p.metadata.chunk_info.clone()
}
} else {
p.metadata.chunk_info.clone()
}
})
.collect();
BatchWitness {
fork_name,
version: version.as_version_byte(),
fork_name: version.fork,
chunk_proofs: self.chunk_proofs.iter().map(|proof| proof.into()).collect(),
chunk_infos: self
.chunk_proofs
.iter()
.map(|p| p.metadata.chunk_info.clone())
.collect(),
chunk_infos,
blob_bytes: self.blob_bytes.clone(),
reference_header,
point_eval_witness,
@@ -193,15 +274,81 @@ impl BatchProvingTask {
}
pub fn precheck_and_build_metadata(&self) -> Result<BatchInfo> {
let fork_name = ForkName::from(self.fork_name.as_str());
// for every aggregation task, there are two steps needed to build the metadata:
// 1. generate data for metadata from the witness
// 2. validate every adjacent proof pair
let witness = self.build_guest_input();
let metadata = BatchInfo::from(&witness);
super::check_aggregation_proofs(self.chunk_proofs.as_slice(), fork_name)?;
super::check_aggregation_proofs(
witness.chunk_infos.as_slice(),
Version::from(self.version),
)?;
Ok(metadata)
}
}
#[test]
fn test_deserde_batch_header_v_validium() {
use std::str::FromStr;
// Top-level JSON: flattened enum tag "V1" + batch_hash
let json = r#"{
"V1": {
"version": 1,
"batch_index": 42,
"parent_batch_hash": "0x1111111111111111111111111111111111111111111111111111111111111111",
"post_state_root": "0x2222222222222222222222222222222222222222222222222222222222222222",
"withdraw_root": "0x3333333333333333333333333333333333333333333333333333333333333333",
"commitment": "0x4444444444444444444444444444444444444444444444444444444444444444"
},
"batch_hash": "0x5555555555555555555555555555555555555555555555555555555555555555"
}"#;
let parsed: BatchHeaderV = serde_json::from_str(json).expect("deserialize BatchHeaderV");
match parsed {
BatchHeaderV::Validium(v) => {
// Check the batch_hash field
let expected_batch_hash = B256::from_str(
"0x5555555555555555555555555555555555555555555555555555555555555555",
)
.unwrap();
assert_eq!(v.batch_hash, expected_batch_hash);
// Check the inner header variant and fields
match v.header {
BatchHeaderValidium::V1(h) => {
assert_eq!(h.version, 1);
assert_eq!(h.batch_index, 42);
let p = B256::from_str(
"0x1111111111111111111111111111111111111111111111111111111111111111",
)
.unwrap();
let s = B256::from_str(
"0x2222222222222222222222222222222222222222222222222222222222222222",
)
.unwrap();
let w = B256::from_str(
"0x3333333333333333333333333333333333333333333333333333333333333333",
)
.unwrap();
let c = B256::from_str(
"0x4444444444444444444444444444444444444444444444444444444444444444",
)
.unwrap();
assert_eq!(h.parent_batch_hash, p);
assert_eq!(h.post_state_root, s);
assert_eq!(h.withdraw_root, w);
assert_eq!(h.commitment, c);
// Sanity: computed batch hash equals the provided one (if method available)
// assert_eq!(v.header.batch_hash(), expected_batch_hash);
}
}
}
_ => panic!("expected validium header variant"),
}
}

View File

@@ -1,17 +1,21 @@
use crate::proofs::BatchProof;
use eyre::Result;
use scroll_zkvm_types::{
bundle::{BundleInfo, BundleWitness},
public_inputs::ForkName,
bundle::{BundleInfo, BundleWitness, LegacyBundleWitness},
public_inputs::Version,
task::ProvingTask,
utils::{to_rkyv_bytes, RancorError},
};
use crate::proofs::BatchProof;
/// Message indicating a sanity check failure.
const BUNDLE_SANITY_MSG: &str = "bundle must have at least one batch";
#[derive(Clone, serde::Deserialize, serde::Serialize)]
pub struct BundleProvingTask {
/// The version of batches in the bundle.
pub version: u8,
/// The STARK proofs of each batch in the bundle.
pub batch_proofs: Vec<BatchProof>,
/// for sanity check
pub bundle_info: Option<BundleInfo>,
@@ -40,26 +44,29 @@ impl BundleProvingTask {
}
fn build_guest_input(&self) -> BundleWitness {
let version = Version::from(self.version);
BundleWitness {
version: version.as_version_byte(),
batch_proofs: self.batch_proofs.iter().map(|proof| proof.into()).collect(),
batch_infos: self
.batch_proofs
.iter()
.map(|wrapped_proof| wrapped_proof.metadata.batch_info.clone())
.collect(),
fork_name: self.fork_name.to_lowercase().as_str().into(),
fork_name: version.fork,
}
}
pub fn precheck_and_build_metadata(&self) -> Result<BundleInfo> {
let fork_name = ForkName::from(self.fork_name.as_str());
// for every aggregation task, there are two steps needed to build the metadata:
// 1. generate data for metadata from the witness
// 2. validate every adjacent proof pair
let witness = self.build_guest_input();
let metadata = BundleInfo::from(&witness);
super::check_aggregation_proofs(self.batch_proofs.as_slice(), fork_name)?;
super::check_aggregation_proofs(
witness.batch_infos.as_slice(),
Version::from(self.version),
)?;
Ok(metadata)
}
@@ -71,7 +78,8 @@ impl TryFrom<BundleProvingTask> for ProvingTask {
fn try_from(value: BundleProvingTask) -> Result<Self> {
let witness = value.build_guest_input();
let serialized_witness = if crate::witness_use_legacy_mode() {
to_rkyv_bytes::<RancorError>(&witness)?.into_vec()
let legacy = LegacyBundleWitness::from(witness);
to_rkyv_bytes::<RancorError>(&legacy)?.into_vec()
} else {
super::encode_task_to_witness(&witness)?
};

View File

@@ -1,20 +1,26 @@
use super::chunk_interpreter::*;
use eyre::Result;
use sbv_core::BlockWitness;
use sbv_primitives::B256;
use sbv_primitives::{types::consensus::BlockHeader, B256};
use scroll_zkvm_types::{
chunk::{execute, ChunkInfo, ChunkWitness, LegacyChunkWitness},
chunk::{execute, ChunkInfo, ChunkWitness, LegacyChunkWitness, ValidiumInputs},
task::ProvingTask,
utils::{to_rkyv_bytes, RancorError},
version::Version,
};
use super::chunk_interpreter::*;
/// The type aligned with coordinator's defination
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
pub struct ChunkTask {
/// The version for the chunk, as per [`Version`].
pub version: u8,
/// block hashes for a series of block
pub block_hashes: Vec<B256>,
/// The on-chain L1 msg queue hash before applying L1 msg txs from the chunk.
pub prev_msg_queue_hash: B256,
/// The on-chain L1 msg queue hash after applying L1 msg txs from the chunk (for validate)
pub post_msg_queue_hash: B256,
/// Fork name specify
pub fork_name: String,
}
@@ -22,6 +28,7 @@ pub struct ChunkTask {
impl TryFromWithInterpreter<ChunkTask> for ChunkProvingTask {
fn try_from_with_interpret(
value: ChunkTask,
decryption_key: Option<&[u8]>,
interpreter: impl ChunkInterpreter,
) -> Result<Self> {
let mut block_witnesses = Vec::new();
@@ -31,10 +38,28 @@ impl TryFromWithInterpreter<ChunkTask> for ChunkProvingTask {
block_witnesses.push(witness);
}
let validium_txs = if Version::from(value.version).is_validium() {
let mut validium_txs = Vec::new();
for block_number in block_witnesses.iter().map(|w| w.header.number()) {
validium_txs.push(interpreter.try_fetch_l1_msgs(block_number)?);
}
validium_txs
} else {
vec![]
};
let validium_inputs = decryption_key.map(|secret_key| ValidiumInputs {
validium_txs,
secret_key: secret_key.into(),
});
Ok(Self {
version: value.version,
block_witnesses,
prev_msg_queue_hash: value.prev_msg_queue_hash,
post_msg_queue_hash: value.post_msg_queue_hash,
fork_name: value.fork_name,
validium_inputs,
})
}
}
@@ -48,12 +73,18 @@ const CHUNK_SANITY_MSG: &str = "chunk must have at least one block";
/// - {first_block_number}-{last_block_number}
#[derive(Clone, Debug, serde::Deserialize, serde::Serialize)]
pub struct ChunkProvingTask {
/// The version for the chunk, as per [Version][scroll_zkvm_types::version::Version].
pub version: u8,
/// Witnesses for every block in the chunk.
pub block_witnesses: Vec<BlockWitness>,
/// The on-chain L1 msg queue hash before applying L1 msg txs from the chunk.
pub prev_msg_queue_hash: B256,
/// The on-chain L1 msg queue hash after applying L1 msg txs from the chunk (for validate)
pub post_msg_queue_hash: B256,
/// Fork name specify
pub fork_name: String,
/// Optional inputs in case of domain=validium.
pub validium_inputs: Option<ValidiumInputs>,
}
#[derive(Clone, Debug)]
@@ -126,11 +157,25 @@ impl ChunkProvingTask {
}
fn build_guest_input(&self) -> ChunkWitness {
ChunkWitness::new(
&self.block_witnesses,
self.prev_msg_queue_hash,
self.fork_name.to_lowercase().as_str().into(),
)
let version = Version::from(self.version);
if version.is_validium() {
assert!(self.validium_inputs.is_some());
ChunkWitness::new(
version.as_version_byte(),
&self.block_witnesses,
self.prev_msg_queue_hash,
version.fork,
self.validium_inputs.clone(),
)
} else {
ChunkWitness::new_scroll(
version.as_version_byte(),
&self.block_witnesses,
self.prev_msg_queue_hash,
version.fork,
)
}
}
fn insert_state(&mut self, node: sbv_primitives::Bytes) {
@@ -139,8 +184,8 @@ impl ChunkProvingTask {
pub fn precheck_and_build_metadata(&self) -> Result<ChunkInfo> {
let witness = self.build_guest_input();
let ret = ChunkInfo::try_from(witness).map_err(|e| eyre::eyre!("{e}"))?;
assert_eq!(ret.post_msg_queue_hash, self.post_msg_queue_hash);
Ok(ret)
}

View File

@@ -1,6 +1,6 @@
use eyre::Result;
use sbv_core::BlockWitness;
use sbv_primitives::{Bytes, B256};
use sbv_primitives::{types::consensus::TxL1Message, Bytes, B256};
/// An interpreter which is cirtical in translating chunk data
/// since we need to grep block witness and storage node data
@@ -13,13 +13,22 @@ pub trait ChunkInterpreter {
) -> Result<BlockWitness> {
Err(eyre::eyre!("no implement"))
}
fn try_fetch_storage_node(&self, _node_hash: B256) -> Result<Bytes> {
Err(eyre::eyre!("no implement"))
}
fn try_fetch_l1_msgs(&self, _block_number: u64) -> Result<Vec<TxL1Message>> {
Err(eyre::eyre!("no implement"))
}
}
pub trait TryFromWithInterpreter<T>: Sized {
fn try_from_with_interpret(value: T, intepreter: impl ChunkInterpreter) -> Result<Self>;
fn try_from_with_interpret(
value: T,
decryption_key: Option<&[u8]>,
intepreter: impl ChunkInterpreter,
) -> Result<Self>;
}
pub struct DummyInterpreter {}

View File

@@ -41,6 +41,7 @@ pub trait ProofVerifier {
#[derive(Debug, Serialize, Deserialize)]
pub struct CircuitConfig {
pub version: u8,
pub fork_name: String,
pub assets_path: String,
}
@@ -61,14 +62,18 @@ pub fn init(config: VerifierConfig) {
for cfg in &config.circuits {
let canonical_fork_name = cfg.fork_name.to_lowercase();
let verifier = Verifier::new(&cfg.assets_path, canonical_fork_name.as_str().into());
let verifier = Verifier::new(&cfg.assets_path, cfg.version);
let ret = verifiers.insert(canonical_fork_name, Arc::new(Mutex::new(verifier)));
assert!(
ret.is_none(),
"DO NOT init the same fork {} twice",
cfg.fork_name
);
tracing::info!("load verifier config for fork {}", cfg.fork_name);
tracing::info!(
"load verifier config for fork {} (ver {})",
cfg.fork_name,
cfg.version
);
}
let ret = VERIFIERS.set(verifiers).is_ok();

View File

@@ -6,22 +6,22 @@ use crate::{
proofs::{AsRootProof, BatchProof, BundleProof, ChunkProof, IntoEvmProof},
utils::panic_catch,
};
use scroll_zkvm_types::public_inputs::ForkName;
use scroll_zkvm_types::version::Version;
use scroll_zkvm_verifier::verifier::UniversalVerifier;
use std::path::Path;
pub struct Verifier {
verifier: UniversalVerifier,
fork: ForkName,
version: Version,
}
impl Verifier {
pub fn new(assets_dir: &str, fork: ForkName) -> Self {
pub fn new(assets_dir: &str, ver_n: u8) -> Self {
let verifier_bin = Path::new(assets_dir);
Self {
verifier: UniversalVerifier::setup(verifier_bin).expect("Setting up chunk verifier"),
fork,
version: Version::from(ver_n),
}
}
}
@@ -31,21 +31,21 @@ impl ProofVerifier for Verifier {
panic_catch(|| match task_type {
TaskType::Chunk => {
let proof = serde_json::from_slice::<ChunkProof>(proof).unwrap();
assert!(proof.pi_hash_check(self.fork));
assert!(proof.pi_hash_check(self.version));
self.verifier
.verify_stark_proof(proof.as_root_proof(), &proof.vk)
.unwrap()
}
TaskType::Batch => {
let proof = serde_json::from_slice::<BatchProof>(proof).unwrap();
assert!(proof.pi_hash_check(self.fork));
assert!(proof.pi_hash_check(self.version));
self.verifier
.verify_stark_proof(proof.as_root_proof(), &proof.vk)
.unwrap()
}
TaskType::Bundle => {
let proof = serde_json::from_slice::<BundleProof>(proof).unwrap();
assert!(proof.pi_hash_check(self.fork));
assert!(proof.pi_hash_check(self.version));
let vk = proof.vk.clone();
let evm_proof = proof.into_evm_proof();
self.verifier.verify_evm_proof(&evm_proof, &vk).unwrap()

View File

@@ -152,11 +152,28 @@ pub unsafe extern "C" fn gen_universal_task(
fork_name: *const c_char,
expected_vk: *const u8,
expected_vk_len: usize,
decryption_key: *const u8,
decryption_key_len: usize,
) -> HandlingResult {
let task_json = if task_type == TaskType::Chunk as i32 {
let pre_task_str = c_char_to_str(task);
let cli = l2geth::get_client();
match libzkp::checkout_chunk_task(pre_task_str, cli) {
let decryption_key = if decryption_key_len > 0 {
if decryption_key_len != 32 {
tracing::error!(
"gen_universal_task received {}-byte decryption key; expected 32",
decryption_key_len
);
return failed_handling_result();
}
Some(std::slice::from_raw_parts(
decryption_key,
decryption_key_len,
))
} else {
None
};
match libzkp::checkout_chunk_task(pre_task_str, decryption_key, cli) {
Ok(str) => str,
Err(e) => {
tracing::error!("gen_universal_task failed at pre interpret step, error: {e}");

View File

@@ -2,6 +2,9 @@
"feynman": {
"b68fdc3f28a5ce006280980df70cd3447e56913e5bca6054603ba85f0794c23a6618ea25a7991845bbc5fd571670ee47379ba31ace92d345bca59702a0d4112d": "https://circuit-release.s3.us-west-2.amazonaws.com/scroll-zkvm/releases/0.5.2/chunk/",
"9a3f66370f11e3303f1a1248921025104e83253efea43a70d221cf4e15fc145bf2be2f4468d1ac4a70e7682babb1c60417e21c7633d4b55b58f44703ec82b05a": "https://circuit-release.s3.us-west-2.amazonaws.com/scroll-zkvm/releases/0.5.2/batch/",
"1f8627277e1c1f6e1cc70c03e6fde06929e5ea27ca5b1d56e23b235dfeda282e22c0e5294bcb1b3a9def836f8d0f18612a9860629b9497292976ca11844b7e73": "https://circuit-release.s3.us-west-2.amazonaws.com/scroll-zkvm/releases/0.5.2/bundle/"
"1f8627277e1c1f6e1cc70c03e6fde06929e5ea27ca5b1d56e23b235dfeda282e22c0e5294bcb1b3a9def836f8d0f18612a9860629b9497292976ca11844b7e73": "https://circuit-release.s3.us-west-2.amazonaws.com/scroll-zkvm/releases/0.5.2/bundle/",
"7eb91f1885cc7a63cc848928f043fa56bf747161a74cd933d88c0456b90643346618ea25a7991845bbc5fd571670ee47379ba31ace92d345bca59702a0d4112d": "https://circuit-release.s3.us-west-2.amazonaws.com/scroll-zkvm/releases/0.6.0-rc.1/chunk/",
"dc653e7416628c612fa4d80b4724002bad4fde3653aef7316b80df0c19740a1bf2be2f4468d1ac4a70e7682babb1c60417e21c7633d4b55b58f44703ec82b05a": "https://circuit-release.s3.us-west-2.amazonaws.com/scroll-zkvm/releases/0.6.0-rc.1/batch/",
"14de1c74b663ed3c99acb03e90a5753b5923233c5c590864ad7746570297d16722c0e5294bcb1b3a9def836f8d0f18612a9860629b9497292976ca11844b7e73": "https://circuit-release.s3.us-west-2.amazonaws.com/scroll-zkvm/releases/0.6.0-rc.1/bundle/"
}
}

View File

@@ -793,6 +793,7 @@ github.com/deckarep/golang-set v1.8.0/go.mod h1:5nI87KwE7wgsBU1F4GKAw2Qod7p5kyS3
github.com/deckarep/golang-set/v2 v2.6.0/go.mod h1:VAky9rY/yGXJOLEDv3OMci+7wtDpOF4IN+y82NBOac4=
github.com/decred/dcrd/crypto/blake256 v1.0.0 h1:/8DMNYp9SGi5f0w7uCm6d6M4OU2rGFK09Y2A4Xv7EE0=
github.com/decred/dcrd/crypto/blake256 v1.0.0/go.mod h1:sQl2p6Y26YV+ZOcSTP6thNdn47hh8kt6rqSlvmrXFAc=
github.com/decred/dcrd/crypto/blake256 v1.0.1/go.mod h1:2OfgNZ5wDpcsFmHmCK5gZTPcCXqlm2ArzUIkw9czNJo=
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1 h1:YLtO71vCjJRCBcrPMtQ9nqBsqpA1m5sE92cU+pd5Mcc=
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1/go.mod h1:hyedUtir6IdtD/7lIxGeCxkaw7y45JueMRL4DIyJDKs=
github.com/deepmap/oapi-codegen v1.6.0 h1:w/d1ntwh91XI0b/8ja7+u5SvA4IFfM0UNNLmiDR1gg0=
@@ -1412,6 +1413,7 @@ github.com/scroll-tech/da-codec v0.1.3-0.20250609154559-8935de62c148 h1:cyK1ifU2
github.com/scroll-tech/da-codec v0.1.3-0.20250609154559-8935de62c148/go.mod h1:gz5x3CsLy5htNTbv4PWRPBU9nSAujfx1U2XtFcXoFuk=
github.com/scroll-tech/da-codec v0.1.3-0.20250626091118-58b899494da6/go.mod h1:Z6kN5u2khPhiqHyk172kGB7o38bH/nj7Ilrb/46wZGg=
github.com/scroll-tech/da-codec v0.1.3-0.20250825071838-cddc263e5ef6/go.mod h1:Z6kN5u2khPhiqHyk172kGB7o38bH/nj7Ilrb/46wZGg=
github.com/scroll-tech/ecies-go/v2 v2.0.10-beta.1/go.mod h1:A+pHaITd+ogBm4Rk35xebF9OPiyMYlFlgqBOiY5PSjg=
github.com/scroll-tech/go-ethereum v1.10.14-0.20240607130425-e2becce6a1a4/go.mod h1:byf/mZ8jLYUCnUePTicjJWn+RvKdxDn7buS6glTnMwQ=
github.com/scroll-tech/go-ethereum v1.10.14-0.20240821074444-b3fa00861e5e/go.mod h1:swB5NSp8pKNDuYsTxfR08bHS6L56i119PBx8fxvV8Cs=
github.com/scroll-tech/go-ethereum v1.10.14-0.20241010064814-3d88e870ae22/go.mod h1:r9FwtxCtybMkTbWYCyBuevT9TW3zHmOTHqD082Uh+Oo=
@@ -1616,6 +1618,7 @@ golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOM
golang.org/x/crypto v0.22.0/go.mod h1:vr6Su+7cTlO45qkww3VDJlzDn0ctJvRgYbC2NvXHt+M=
golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8=
golang.org/x/crypto v0.26.0/go.mod h1:GY7jblb9wI+FOo5y8/S2oY4zWP07AkOJ4+jxCqdqn54=
golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk=
golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
@@ -1786,6 +1789,7 @@ golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2 h1:IRJeR9r1pYWsHKTRe/IInb7lYvbBVIqOgsX/u0mbOWY=
golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE=
@@ -1794,6 +1798,7 @@ golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U=
golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0=
golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58=
golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY=
golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=

View File

@@ -10,8 +10,8 @@ import (
"github.com/scroll-tech/da-codec/encoding"
"github.com/urfave/cli/v2"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/log"
"github.com/scroll-tech/go-ethereum/rpc"
"scroll-tech/common/database"
"scroll-tech/common/observability"
@@ -91,12 +91,13 @@ func action(ctx *cli.Context) error {
bundleProposer := watcher.NewBundleProposer(subCtx, cfg.L2Config.BundleProposerConfig, minCodecVersion, genesis.Config, db, registry)
// Init l2geth connection
l2client, err := ethclient.Dial(cfg.L2Config.Endpoint)
l2client, err := rpc.Dial(cfg.L2Config.Endpoint)
if err != nil {
return fmt.Errorf("failed to connect to L2geth at RPC=%s: %w", cfg.L2Config.Endpoint, err)
}
l2Watcher := watcher.NewL2WatcherClient(subCtx, l2client, cfg.L2Config.Confirmations, cfg.L2Config.L2MessageQueueAddress, cfg.L2Config.WithdrawTrieRootSlot, genesis.Config, db, registry)
l2Watcher := watcher.NewL2WatcherClient(subCtx, l2client, cfg.L2Config.Confirmations, cfg.L2Config.L2MessageQueueAddress,
cfg.L2Config.WithdrawTrieRootSlot, genesis.Config, db, cfg.L2Config.RelayerConfig.ValidiumMode, registry)
recovery := permissionless_batches.NewRecovery(subCtx, cfg, genesis, db, chunkProposer, batchProposer, bundleProposer, l2Watcher)

View File

@@ -12,6 +12,7 @@ import (
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/log"
"github.com/scroll-tech/go-ethereum/rollup/l1"
"github.com/scroll-tech/go-ethereum/rpc"
"github.com/urfave/cli/v2"
"scroll-tech/common/database"
@@ -69,10 +70,11 @@ func action(ctx *cli.Context) error {
observability.Server(ctx, db)
// Init l2geth connection
l2client, err := ethclient.Dial(cfg.L2Config.Endpoint)
l2client, err := rpc.Dial(cfg.L2Config.Endpoint)
if err != nil {
log.Crit("failed to connect l2 geth", "config file", cfgFile, "error", err)
}
l2ethClient := ethclient.NewClient(l2client)
genesisPath := ctx.String(utils.Genesis.Name)
genesis, err := utils.ReadGenesis(genesisPath)
@@ -100,7 +102,7 @@ func action(ctx *cli.Context) error {
log.Crit("cfg.L2Config.RelayerConfig.SenderConfig.FusakaTimestamp must be set")
}
l2relayer, err := relayer.NewLayer2Relayer(ctx.Context, l2client, db, cfg.L2Config.RelayerConfig, genesis.Config, relayer.ServiceTypeL2RollupRelayer, registry)
l2relayer, err := relayer.NewLayer2Relayer(ctx.Context, l2ethClient, db, cfg.L2Config.RelayerConfig, genesis.Config, relayer.ServiceTypeL2RollupRelayer, registry)
if err != nil {
log.Crit("failed to create l2 relayer", "config file", cfgFile, "error", err)
}
@@ -114,7 +116,7 @@ func action(ctx *cli.Context) error {
batchProposer := watcher.NewBatchProposer(subCtx, cfg.L2Config.BatchProposerConfig, minCodecVersion, genesis.Config, db, cfg.L2Config.RelayerConfig.ValidiumMode, registry)
bundleProposer := watcher.NewBundleProposer(subCtx, cfg.L2Config.BundleProposerConfig, minCodecVersion, genesis.Config, db, registry)
l2watcher := watcher.NewL2WatcherClient(subCtx, l2client, cfg.L2Config.Confirmations, cfg.L2Config.L2MessageQueueAddress, cfg.L2Config.WithdrawTrieRootSlot, genesis.Config, db, registry)
l2watcher := watcher.NewL2WatcherClient(subCtx, l2client, cfg.L2Config.Confirmations, cfg.L2Config.L2MessageQueueAddress, cfg.L2Config.WithdrawTrieRootSlot, genesis.Config, db, cfg.L2Config.RelayerConfig.ValidiumMode, registry)
if cfg.RecoveryConfig != nil && cfg.RecoveryConfig.Enable {
log.Info("Starting rollup-relayer in recovery mode", "version", version.Version)
@@ -144,7 +146,7 @@ func action(ctx *cli.Context) error {
// Watcher loop to fetch missing blocks
go utils.LoopWithContext(subCtx, 2*time.Second, func(ctx context.Context) {
number, loopErr := rutils.GetLatestConfirmedBlockNumber(ctx, l2client, cfg.L2Config.Confirmations)
number, loopErr := rutils.GetLatestConfirmedBlockNumber(ctx, l2ethClient, cfg.L2Config.Confirmations)
if loopErr != nil {
log.Error("failed to get block number", "err", loopErr)
return

View File

@@ -1049,7 +1049,15 @@ func (r *Layer2Relayer) constructCommitBatchPayloadValidium(batch *dbBatchWithCh
lastChunk := batch.Chunks[len(batch.Chunks)-1]
commitment := common.HexToHash(lastChunk.EndBlockHash)
version := encoding.CodecVersion(batch.Batch.CodecVersion)
var version uint8
if encoding.CodecVersion(batch.Batch.CodecVersion) == encoding.CodecV8 {
// Validium version line starts with v1,
// but rollup-relayer behavior follows v8.
version = 1
} else {
return nil, 0, 0, fmt.Errorf("unexpected codec version %d for validium mode", batch.Batch.CodecVersion)
}
calldata, err := r.validiumABI.Pack("commitBatch", version, common.HexToHash(batch.Batch.ParentBatchHash), common.HexToHash(batch.Batch.StateRoot), common.HexToHash(batch.Batch.WithdrawRoot), commitment[:])
if err != nil {
return nil, 0, 0, fmt.Errorf("failed to pack commitBatch: %w", err)

View File

@@ -52,7 +52,7 @@ func (s *Sender) estimateDynamicGas(to *common.Address, data []byte, baseFee uin
if err != nil {
log.Error("estimateDynamicGas estimateGasLimit failure",
"from", s.transactionSigner.GetAddr().String(), "nonce", s.transactionSigner.GetNonce(), "to address", to.String(),
"fallback gas limit", "error", err)
"error", err)
return nil, err
}

View File

@@ -8,6 +8,7 @@ import (
"github.com/prometheus/client_golang/prometheus"
"github.com/scroll-tech/da-codec/encoding"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/event"
"github.com/scroll-tech/go-ethereum/log"
@@ -24,6 +25,7 @@ type L2WatcherClient struct {
event.Feed
*ethclient.Client
rpcCli *rpc.Client
l2BlockOrm *orm.L2Block
@@ -32,16 +34,19 @@ type L2WatcherClient struct {
messageQueueAddress common.Address
withdrawTrieRootSlot common.Hash
validiumMode bool
metrics *l2WatcherMetrics
chainCfg *params.ChainConfig
}
// NewL2WatcherClient take a l2geth instance to generate a l2watcherclient instance
func NewL2WatcherClient(ctx context.Context, client *ethclient.Client, confirmations rpc.BlockNumber, messageQueueAddress common.Address, withdrawTrieRootSlot common.Hash, chainCfg *params.ChainConfig, db *gorm.DB, reg prometheus.Registerer) *L2WatcherClient {
func NewL2WatcherClient(ctx context.Context, client *rpc.Client, confirmations rpc.BlockNumber, messageQueueAddress common.Address, withdrawTrieRootSlot common.Hash, chainCfg *params.ChainConfig, db *gorm.DB, validiumMode bool, reg prometheus.Registerer) *L2WatcherClient {
return &L2WatcherClient{
ctx: ctx,
Client: client,
Client: ethclient.NewClient(client),
rpcCli: client,
l2BlockOrm: orm.NewL2Block(db),
@@ -50,6 +55,8 @@ func NewL2WatcherClient(ctx context.Context, client *ethclient.Client, confirmat
messageQueueAddress: messageQueueAddress,
withdrawTrieRootSlot: withdrawTrieRootSlot,
validiumMode: validiumMode,
metrics: initL2WatcherMetrics(reg),
chainCfg: chainCfg,
@@ -95,21 +102,51 @@ func (w *L2WatcherClient) GetAndStoreBlocks(ctx context.Context, from, to uint64
return fmt.Errorf("failed to BlockByNumber: %v. number: %v", err, number)
}
blockTxs := block.Transactions()
var count int
for _, tx := range block.Transactions() {
for _, tx := range blockTxs {
if tx.IsL1MessageTx() {
count++
}
}
log.Info("retrieved block", "height", block.Header().Number, "hash", block.Header().Hash().String(), "L1 message count", count)
// use original (encrypted) L1 message txs in validium mode
if w.validiumMode {
var txs []*types.Transaction
if count > 0 {
log.Info("Fetching encrypted messages in validium mode")
err = w.rpcCli.CallContext(ctx, &txs, "scroll_getL1MessagesInBlock", block.Hash(), "synced")
if err != nil {
return fmt.Errorf("failed to get L1 messages: %v, block hash: %v", err, block.Hash().Hex())
}
}
// sanity check
if len(txs) != count {
return fmt.Errorf("L1 message count mismatch: expected %d, got %d", count, len(txs))
}
for ii := 0; ii < count; ii++ {
// sanity check
if blockTxs[ii].AsL1MessageTx().QueueIndex != txs[ii].AsL1MessageTx().QueueIndex {
return fmt.Errorf("L1 message queue index mismatch at index %d: expected %d, got %d", ii, blockTxs[ii].AsL1MessageTx().QueueIndex, txs[ii].AsL1MessageTx().QueueIndex)
}
log.Info("Replacing L1 message tx in validium mode", "index", ii, "queueIndex", txs[ii].AsL1MessageTx().QueueIndex, "decryptedTxHash", blockTxs[ii].Hash().Hex(), "originalTxHash", txs[ii].Hash().Hex())
blockTxs[ii] = txs[ii]
}
}
withdrawRoot, err3 := w.StorageAt(ctx, w.messageQueueAddress, w.withdrawTrieRootSlot, big.NewInt(int64(number)))
if err3 != nil {
return fmt.Errorf("failed to get withdrawRoot: %v. number: %v", err3, number)
}
blocks = append(blocks, &encoding.Block{
Header: block.Header(),
Transactions: encoding.TxsToTxsData(block.Transactions()),
Transactions: encoding.TxsToTxsData(blockTxs),
WithdrawRoot: common.BytesToHash(withdrawRoot),
})
}

View File

@@ -2,12 +2,13 @@ package watcher
import (
"context"
"os"
"testing"
"gorm.io/gorm"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/rpc"
"github.com/stretchr/testify/assert"
@@ -20,7 +21,7 @@ import (
func setupL2Watcher(t *testing.T) (*L2WatcherClient, *gorm.DB) {
db := setupDB(t)
l2cfg := cfg.L2Config
watcher := NewL2WatcherClient(context.Background(), l2Cli, l2cfg.Confirmations, l2cfg.L2MessageQueueAddress, l2cfg.WithdrawTrieRootSlot, nil, db, nil)
watcher := NewL2WatcherClient(context.Background(), l2Rpc, l2cfg.Confirmations, l2cfg.L2MessageQueueAddress, l2cfg.WithdrawTrieRootSlot, nil, db, false, nil)
return watcher, db
}
@@ -34,7 +35,7 @@ func testFetchRunningMissingBlocks(t *testing.T) {
if err != nil {
return false
}
wc := prepareWatcherClient(l2Cli, db)
wc := prepareWatcherClient(l2Rpc, db)
wc.TryFetchRunningMissingBlocks(latestHeight)
fetchedHeight, err := l2BlockOrm.GetL2BlocksLatestHeight(context.Background())
return err == nil && fetchedHeight == latestHeight
@@ -42,7 +43,32 @@ func testFetchRunningMissingBlocks(t *testing.T) {
assert.True(t, ok)
}
func prepareWatcherClient(l2Cli *ethclient.Client, db *gorm.DB) *L2WatcherClient {
func prepareWatcherClient(l2Cli *rpc.Client, db *gorm.DB) *L2WatcherClient {
confirmations := rpc.LatestBlockNumber
return NewL2WatcherClient(context.Background(), l2Cli, confirmations, common.Address{}, common.Hash{}, nil, db, nil)
return NewL2WatcherClient(context.Background(), l2Cli, confirmations, common.Address{}, common.Hash{}, nil, db, false, nil)
}
// New test for raw RPC GetBlockByHash from an endpoint URL in env.
func TestRawRPCGetBlockByHash(t *testing.T) {
url := os.Getenv("RPC_ENDPOINT_URL")
if url == "" {
t.Log("warn: RPC_ENDPOINT_URL not set, skipping raw RPC test")
t.Skip("missing RPC_ENDPOINT_URL")
}
ctx := context.Background()
cli, err := rpc.DialContext(ctx, url)
if err != nil {
t.Fatalf("failed to dial RPC endpoint %s: %v", url, err)
}
defer cli.Close()
var txs []*types.Transaction
blkHash := common.HexToHash("0xc80cf12883341827d71c08f734ba9a9d6da7e59eb16921d26e6706887e552c74")
err = cli.CallContext(ctx, &txs, "scroll_getL1MessagesInBlock", blkHash, "synced")
if err != nil {
t.Logf("scroll_getL1MessagesInBlock failed: err=%v", err)
t.Fail()
}
t.Log(txs, txs[0].Hash())
}

View File

@@ -8,6 +8,7 @@ import (
"github.com/scroll-tech/da-codec/encoding"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/log"
"github.com/scroll-tech/go-ethereum/rpc"
"github.com/stretchr/testify/assert"
"gorm.io/gorm"
@@ -27,6 +28,7 @@ var (
// l2geth client
l2Cli *ethclient.Client
l2Rpc *rpc.Client
// block trace
block1 *encoding.Block
@@ -62,8 +64,9 @@ func setupEnv(t *testing.T) (err error) {
}
// Create l2geth client.
l2Cli, err = testApps.GetL2GethClient()
l2Rpc, err = testApps.GetL2Client()
assert.NoError(t, err)
l2Cli = ethclient.NewClient(l2Rpc)
block1 = readBlockFromJSON(t, "../../../testdata/blockTrace_02.json")
block2 = readBlockFromJSON(t, "../../../testdata/blockTrace_03.json")

View File

@@ -8,6 +8,7 @@ import (
"github.com/scroll-tech/da-codec/encoding"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/crypto"
"github.com/scroll-tech/go-ethereum/log"
)
// ChunkMetrics indicates the metrics for proposing a chunk.
@@ -138,6 +139,26 @@ func encodeBatchHeaderValidium(b *encoding.Batch, codecVersion encoding.CodecVer
// TODO: This is a temporary solution, we might use a larger commitment in the future
lastBlock := b.Blocks[len(b.Blocks)-1]
commitment := lastBlock.Header.Hash()
stateRoot := b.StateRoot()
// Temporary workaround for the wrong genesis state root configuration issue.
if lastBlock.Header.Number.Uint64() == 0 {
if commitment == common.HexToHash("0x76a8e1359fe1a51ec3917ca98dec95ba005f1a73bcdbc2c7f87c7683e828fbb1") && stateRoot == common.HexToHash("0x08d535cc60f40af5dd3b31e0998d7567c2d568b224bed2ba26070aeb078d1339") {
// cloak-xen/sepolia
stateRoot = common.HexToHash("0x0711f02d6f85b0597c4705298e01ee27159fdd8bd8bdeda670ae8b9073091246")
} else if commitment == common.HexToHash("0x8005a02271085eaded2565f3e252013cd9d3cd0a4775d89f9ba4224289671276") && stateRoot == common.HexToHash("0x08d535cc60f40af5dd3b31e0998d7567c2d568b224bed2ba26070aeb078d1339") {
// cloak-xen/mainnet
stateRoot = common.HexToHash("0x8da1aaf41660ddf7870ab5ff4f6a3ab4b2e652568d341ede87ada56aad5fb097")
} else if commitment == common.HexToHash("0xa7e50dfc812039410c2009c74cdcb0c0797aa5485dec062985eaa43b17d333ea") && stateRoot == common.HexToHash("0x08d535cc60f40af5dd3b31e0998d7567c2d568b224bed2ba26070aeb078d1339") {
// cloak-etherfi/sepolia
stateRoot = common.HexToHash("0x7b44ea23770dda8810801779eb6847d56be0399e35de7c56465ccf8b7578ddf6")
} else if commitment == common.HexToHash("0xeccf4fab24f8b5dd3b72667c6bf5e28b17ccffdea01e3e5c08f393edaa9e7657") && stateRoot == common.HexToHash("0x08d535cc60f40af5dd3b31e0998d7567c2d568b224bed2ba26070aeb078d1339") {
// cloak-shiga/sepolia
stateRoot = common.HexToHash("0x05973227854ac82c22f164ed3d4510b7df516a0eecdfd9bed5f2446efc9994b9")
}
log.Warn("Using genesis state root", "stateRoot", stateRoot.Hex())
}
// Batch header field sizes
const (
@@ -164,10 +185,22 @@ func encodeBatchHeaderValidium(b *encoding.Batch, codecVersion encoding.CodecVer
commitmentOffset = withdrawRootOffset + withdrawRootSize
)
batchBytes[versionOffset] = uint8(codecVersion) // version
var version uint8
if codecVersion == encoding.CodecV8 {
// Validium version line starts with v1,
// but rollup-relayer behavior follows v8.
version = 1
} else if codecVersion == encoding.CodecV0 {
// Special case for genesis batch
version = 0
} else {
return nil, common.Hash{}, fmt.Errorf("unexpected codec version %d for batch %v in validium mode", codecVersion, b.Index)
}
batchBytes[versionOffset] = version // version
binary.BigEndian.PutUint64(batchBytes[indexOffset:indexOffset+indexSize], b.Index) // batch index
copy(batchBytes[parentHashOffset:parentHashOffset+parentHashSize], b.ParentBatchHash[0:parentHashSize]) // parentBatchHash
copy(batchBytes[stateRootOffset:stateRootOffset+stateRootSize], b.StateRoot().Bytes()[0:stateRootSize]) // postStateRoot
copy(batchBytes[stateRootOffset:stateRootOffset+stateRootSize], stateRoot.Bytes()[0:stateRootSize]) // postStateRoot
copy(batchBytes[withdrawRootOffset:withdrawRootOffset+withdrawRootSize], b.WithdrawRoot().Bytes()[0:withdrawRootSize]) // postWithdrawRoot
copy(batchBytes[commitmentOffset:commitmentOffset+commitmentSize], commitment[0:commitmentSize]) // data commitment

View File

@@ -10,9 +10,11 @@ import (
"github.com/scroll-tech/da-codec/encoding"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/log"
"scroll-tech/common/database"
ctypes "scroll-tech/common/types"
"scroll-tech/rollup/internal/orm"
"scroll-tech/rollup/internal/utils"
@@ -46,6 +48,7 @@ func importData(ctx context.Context, beginBlk, endBlk uint64, chkNum, batchNum,
if err != nil {
return nil, err
}
ret := &importRecord{}
// Create a new random source with the provided seed
source := rand.NewSource(seed)
@@ -62,6 +65,9 @@ func importData(ctx context.Context, beginBlk, endBlk uint64, chkNum, batchNum,
log.Info("separated chunk", "border", chkSep)
head := beginBlk
lastMsgHash := common.Hash{}
if err := initLeadingChunk(ctx, db, beginBlk, endBlk, lastMsgHash); err != nil {
return nil, err
}
ormChks := make([]*orm.Chunk, 0, chkNum)
encChks := make([]*encoding.Chunk, 0, chkNum)
@@ -118,6 +124,73 @@ func importData(ctx context.Context, beginBlk, endBlk uint64, chkNum, batchNum,
return ret, nil
}
func initLeadingChunk(ctx context.Context, db *gorm.DB, beginBlk, endBlk uint64, prevMsgQueueHash common.Hash) error {
blockOrm := orm.NewL2Block(db)
if beginBlk <= 1 {
log.Info("start from genesis, no need to insert leading chunk")
return nil
}
var l1MsgPoppedBefore uint64
blks, err := blockOrm.GetL2BlocksGEHeight(ctx, beginBlk, int(endBlk-beginBlk+1))
if err != nil {
return err
}
for i, block := range blks {
for _, tx := range block.Transactions {
if tx.Type == types.L1MessageTxType {
l1MsgPoppedBefore = tx.Nonce
log.Info("search first l1 nonce", "index", l1MsgPoppedBefore, "blk", beginBlk+uint64(i))
break
}
}
if l1MsgPoppedBefore != 0 {
break
}
}
if l1MsgPoppedBefore == 0 {
log.Info("no l1 message in target blks, no need for leading chunk")
return nil
}
prevBlks, err := blockOrm.GetL2BlocksGEHeight(ctx, beginBlk-1, 1)
if err != nil {
log.Error("get prev block fail, we also need at least 1 block before selected range", "need block", beginBlk-1, "err", err)
return err
}
// we use InsertTestChunkForProposerTool to insert leading chunk, which do not calculate l1 message
// so we simply exclude l1 in this hacked chunk
prevBlk := prevBlks[0]
var trimLen int
for _, tx := range prevBlk.Transactions {
if tx.Type != types.L1MessageTxType {
prevBlk.Transactions[trimLen] = tx
trimLen++
}
}
prevBlk.Transactions = prevBlk.Transactions[:trimLen]
postHash, err := encoding.MessageQueueV2ApplyL1MessagesFromBlocks(prevMsgQueueHash, prevBlks)
if err != nil {
return err
}
chunkOrm := orm.NewChunk(db)
log.Info("Insert leading chunk with prev block", "msgPoppedBefore", l1MsgPoppedBefore)
leadingChunk, err := chunkOrm.InsertTestChunkForProposerTool(ctx, &encoding.Chunk{
Blocks: prevBlks,
PrevL1MessageQueueHash: prevMsgQueueHash,
PostL1MessageQueueHash: postHash,
}, codecCfg, l1MsgPoppedBefore)
if err != nil {
return err
}
return chunkOrm.UpdateProvingStatus(ctx, leadingChunk.Hash, ctypes.ProvingTaskProvedDEPRECATED)
}
func importChunk(ctx context.Context, db *gorm.DB, beginBlk, endBlk uint64, prevMsgQueueHash common.Hash) (*orm.Chunk, *encoding.Chunk, error) {
nblk := int(endBlk-beginBlk) + 1
blockOrm := orm.NewL2Block(db)
@@ -183,9 +256,13 @@ func importBatch(ctx context.Context, db *gorm.DB, chks []*orm.Chunk, encChks []
ParentBatchHash: parentHash,
Chunks: encChks,
Blocks: blks,
PrevL1MessageQueueHash: encChks[0].PrevL1MessageQueueHash,
PostL1MessageQueueHash: encChks[len(encChks)-1].PostL1MessageQueueHash,
}
dbBatch, err := batchOrm.InsertBatch(ctx, batch, codecCfg, utils.BatchMetrics{})
dbBatch, err := batchOrm.InsertBatch(ctx, batch, codecCfg, utils.BatchMetrics{
ValidiumMode: cfg.ValidiumMode,
})
if err != nil {
return nil, err
}

View File

@@ -86,7 +86,8 @@ func parseThreeIntegers(value string) (int, int, int, error) {
// load a comptabile type of config for rollup
type config struct {
DBConfig *database.Config `json:"db_config"`
DBConfig *database.Config `json:"db_config"`
ValidiumMode bool `json:"validium_mode"`
}
func init() {

View File

@@ -1,2 +1,3 @@
build/*
testset.json
testset.json
conf

View File

@@ -1,15 +1,24 @@
.PHONY: clean setup_db test_tool all check_vars
include conf/.make.env
GOOSE_CMD?=goose
BEGIN_BLOCK?=10973711
END_BLOCK?=10973721
ifndef BEGIN_BLOCK
$(error BEGIN_BLOCK is not set. Define it in .make.env or pass BEGIN_BLOCK=<start_block>)
endif
ifndef END_BLOCK
$(error END_BLOCK is not set. Define it in .make.env or pass END_BLOCK=<end_block>)
endif
all: setup_db test_tool import_data
clean:
docker compose down
check_vars:
conf:
@echo "Please link sepolia or cloak-xen as conf"
exit 1
check_vars: | conf
@if [ -z "$(BEGIN_BLOCK)" ] || [ -z "$(END_BLOCK)" ]; then \
echo "Error: BEGIN_BLOCK and END_BLOCK must be defined"; \
echo "Usage: make import_data BEGIN_BLOCK=<start_block> END_BLOCK=<end_block>"; \
@@ -33,7 +42,7 @@ setup_db: clean
fi; \
done
${GOOSE_CMD} up
GOOSE_MIGRATION_DIR=./ ${GOOSE_CMD} up-to 100
GOOSE_MIGRATION_DIR=conf ${GOOSE_CMD} up-to 100
test_tool:
go build -o $(PWD)/build/bin/e2e_tool ../../rollup/tests/integration_tool
@@ -41,7 +50,11 @@ test_tool:
build/bin/e2e_tool: test_tool
import_data_euclid: build/bin/e2e_tool check_vars
build/bin/e2e_tool --config ./config.json --codec 7 ${BEGIN_BLOCK} ${END_BLOCK}
build/bin/e2e_tool --config conf/config.json --codec 7 ${BEGIN_BLOCK} ${END_BLOCK}
import_data: build/bin/e2e_tool check_vars
build/bin/e2e_tool --config ./config.json --codec 8 ${BEGIN_BLOCK} ${END_BLOCK}
build/bin/e2e_tool --config conf/config.json --codec 8 ${BEGIN_BLOCK} ${END_BLOCK}
coordinator_setup:
$(MAKE) -C ../../coordinator localsetup
cp -f conf/genesis.json ../../coordinator/build/bin/conf

View File

@@ -1,12 +1,16 @@
## A new e2e test tool to setup a local environment for testing coordinator and prover.
It contains data from some blocks in scroll sepolia, and helps to generate a series of chunks/batches/bundles from these blocks, filling the DB for the coordinator, so an e2e test (from chunk to bundle) can be run completely local
It contains data from some blocks in a specified testnet, and helps to generate a series of chunks/batches/bundles from these blocks, filling the DB for the coordinator, so an e2e test (from chunk to bundle) can be run completely local
Prepare:
link the staff dir as "conf" from one of the dir with staff set, currently we have following staff sets:
+ sepolia: with blocks from scroll sepolia
+ cloak-xen: with blocks from xen sepolia, which is a cloak network
Steps:
1. run `make all` under `tests/prover-e2e`, it would launch a postgreSql db in local docker container, which is ready to be used by coordinator (include some chunks/batches/bundles waiting to be proven)
2. download circuit assets with `download-release.sh` script in `zkvm-prover`
3. generate the verifier stuff corresponding to the downloaded assets by `make gen_verifier_stuff` in `zkvm-prover`
4. setup `config.json` and `genesis.json` for coordinator, copy the generated verifier stuff in step 3 to the directory which coordinator would load them
5. build and launch `coordinator_api` service locally
6. setup the `config.json` for zkvm prover to connect with the locally launched coordinator api
7. in `zkvm-prover`, launch `make test_e2e_run`, which would specific prover run locally, connect to the local coordinator api service according to the `config.json`, and prove all tasks being injected to db in step 1.
2. setup assets by run `make coordinator_setup`
3. in `coordinator/build/bin/conf`, update necessary items in `config.template.json` and rename it as `config.json`
4. build and launch `coordinator_api` service locally
5. setup the `config.json` for zkvm prover to connect with the locally launched coordinator api
6. in `zkvm-prover`, launch `make test_e2e_run`, which would specific prover run locally, connect to the local coordinator api service according to the `config.json`, and prove all tasks being injected to db in step 1.

View File

@@ -0,0 +1,2 @@
BEGIN_BLOCK?=35
END_BLOCK?=49

File diff suppressed because one or more lines are too long

View File

@@ -4,5 +4,6 @@
"dsn": "postgres://dev:dev@localhost:5432/scroll?sslmode=disable",
"maxOpenNum": 5,
"maxIdleNum": 1
}
},
"validium_mode": true
}

File diff suppressed because one or more lines are too long

View File

@@ -25,7 +25,7 @@ SELECT 'INSERT INTO l2_block (number, hash, parent_hash, header, withdraw_root,
quote_literal(transactions) ||
');'
FROM l2_block
WHERE number >= 10973700 and number <= 10973730
WHERE number >= 1 and number <= 49
ORDER BY number ASC;
-- Write footer

View File

@@ -0,0 +1,56 @@
-- Create a file with INSERT statements for the specific records
\o 00102_import_chunks.sql
\t on
\a
-- Write header comment
SELECT '-- +goose Up';
SELECT '-- +goose StatementBegin';
SELECT '';
SELECT
'INSERT INTO chunk (' ||
'index, hash, start_block_number, start_block_hash, end_block_number, end_block_hash, ' ||
'start_block_time, total_l1_messages_popped_before, total_l1_messages_popped_in_chunk, ' ||
'prev_l1_message_queue_hash, post_l1_message_queue_hash, parent_chunk_hash, state_root, ' ||
'parent_chunk_state_root, withdraw_root, codec_version, enable_compress, ' ||
'total_l2_tx_gas, total_l2_tx_num, total_l1_commit_calldata_size, total_l1_commit_gas, ' ||
'created_at, updated_at' ||
') VALUES (' ||
quote_literal(index) || ', ' ||
quote_literal(hash) || ', ' ||
quote_literal(start_block_number) || ', ' ||
quote_literal(start_block_hash) || ', ' ||
quote_literal(end_block_number) || ', ' ||
quote_literal(end_block_hash) || ', ' ||
quote_literal(start_block_time) || ', ' ||
quote_literal(total_l1_messages_popped_before) || ', ' ||
quote_literal(total_l1_messages_popped_in_chunk) || ', ' ||
quote_literal(prev_l1_message_queue_hash) || ', ' ||
quote_literal(post_l1_message_queue_hash) || ', ' ||
quote_literal(parent_chunk_hash) || ', ' ||
quote_literal(state_root) || ', ' ||
quote_literal(parent_chunk_state_root) || ', ' ||
quote_literal(withdraw_root) || ', ' ||
quote_literal(codec_version) || ', ' ||
quote_literal(enable_compress) || ', ' ||
quote_literal(total_l2_tx_gas) || ', ' ||
quote_literal(total_l2_tx_num) || ', ' ||
quote_literal(total_l1_commit_calldata_size) || ', ' ||
quote_literal(total_l1_commit_gas) || ', ' ||
quote_literal(created_at) || ', ' ||
quote_literal(updated_at) ||
');'
FROM chunk
ORDER BY index ASC;
-- Write footer
SELECT '';
SELECT '-- +goose StatementEnd';
SELECT '-- +goose Down';
SELECT '-- +goose StatementBegin';
SELECT 'DELETE FROM chunk;';
SELECT '-- +goose StatementEnd';
\t off
\a
\o

View File

@@ -0,0 +1,2 @@
BEGIN_BLOCK?=10973711
END_BLOCK?=10973721

View File

@@ -0,0 +1,9 @@
{
"db_config": {
"driver_name": "postgres",
"dsn": "postgres://dev:dev@localhost:5432/scroll?sslmode=disable",
"maxOpenNum": 5,
"maxIdleNum": 1
},
"validium_mode": false
}