Compare commits

..

9 Commits

Author SHA1 Message Date
Morty
392ae07736 feat(blob-uploader): support codec v8 (#1707) 2025-07-24 01:34:46 +08:00
colin
db80b47820 fix(rollup-relayer): upgrade boundary message queue hash initialization (#1706) 2025-07-23 18:51:56 +08:00
Zhang Zhuo
daa1387208 circuit-0.5.2 (#1705)
Co-authored-by: georgehao <georgehao@users.noreply.github.com>
2025-07-23 14:16:16 +08:00
Zhang Zhuo
67b05558e2 upgrade circuit to 0.5.2 (#1703)
Co-authored-by: Rohit Narurkar <rohit.narurkar@proton.me>
Co-authored-by: georgehao <georgehao@users.noreply.github.com>
2025-07-23 10:52:08 +08:00
Ho
1e447b0fef [Fix] building failure in gpu image (#1702) 2025-07-21 20:26:39 +08:00
georgehao
f7c6ecadf4 bump to v4.5.31 (#1700) 2025-07-18 16:41:59 +08:00
Ho
9d94f943e5 [Upgrade] feynman 0.5.0rc1 (#1699) 2025-07-18 15:57:31 +08:00
Morty
de17ad43ff fix(blob-uploader): orm function InsertOrUpdateBlobUpload and s3 bucket region configuration (#1679)
Co-authored-by: yiweichi <yiweichi@users.noreply.github.com>
2025-07-16 18:36:27 +08:00
colin
4233ad928c feat(rollup-relayer): support Validium (#1693)
Co-authored-by: Péter Garamvölgyi <peter@scroll.io>
2025-07-09 15:02:54 +08:00
40 changed files with 12076 additions and 551 deletions

856
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -17,12 +17,12 @@ repository = "https://github.com/scroll-tech/scroll"
version = "4.5.8"
[workspace.dependencies]
scroll-zkvm-prover-euclid = { git = "https://github.com/scroll-tech/zkvm-prover", rev = "0498edb", package = "scroll-zkvm-prover" }
scroll-zkvm-verifier-euclid = { git = "https://github.com/scroll-tech/zkvm-prover", rev = "0498edb", package = "scroll-zkvm-verifier" }
scroll-zkvm-types = { git = "https://github.com/scroll-tech/zkvm-prover", rev = "0498edb" }
scroll-zkvm-prover-euclid = { git = "https://github.com/scroll-tech/zkvm-prover", branch = "feat/0.5.1", package = "scroll-zkvm-prover" }
scroll-zkvm-verifier-euclid = { git = "https://github.com/scroll-tech/zkvm-prover", branch = "feat/0.5.1", package = "scroll-zkvm-verifier" }
scroll-zkvm-types = { git = "https://github.com/scroll-tech/zkvm-prover", branch = "feat/0.5.1" }
sbv-primitives = { git = "https://github.com/scroll-tech/stateless-block-verifier", branch = "chore/upgrade", features = ["scroll"] }
sbv-utils = { git = "https://github.com/scroll-tech/stateless-block-verifier", branch = "chore/upgrade" }
sbv-primitives = { git = "https://github.com/scroll-tech/stateless-block-verifier", branch = "chore/openvm-1.3", features = ["scroll"] }
sbv-utils = { git = "https://github.com/scroll-tech/stateless-block-verifier", branch = "chore/openvm-1.3" }
metrics = "0.23.0"
metrics-util = "0.17"
@@ -46,18 +46,18 @@ once_cell = "1.20"
base64 = "0.22"
[patch.crates-io]
revm = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v74" }
revm-bytecode = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v74" }
revm-context = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v74" }
revm-context-interface = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v74" }
revm-database = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v74" }
revm-database-interface = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v74" }
revm-handler = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v74" }
revm-inspector = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v74" }
revm-interpreter = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v74" }
revm-precompile = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v74" }
revm-primitives = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v74" }
revm-state = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v74" }
revm = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v78" }
revm-bytecode = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v78" }
revm-context = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v78" }
revm-context-interface = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v78" }
revm-database = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v78" }
revm-database-interface = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v78" }
revm-handler = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v78" }
revm-inspector = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v78" }
revm-interpreter = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v78" }
revm-precompile = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v78" }
revm-primitives = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v78" }
revm-state = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v78" }
ruint = { git = "https://github.com/scroll-tech/uint.git", branch = "v1.15.0" }
alloy-primitives = { git = "https://github.com/scroll-tech/alloy-core", branch = "v1.2.0" }
@@ -65,4 +65,4 @@ alloy-primitives = { git = "https://github.com/scroll-tech/alloy-core", branch =
[profile.maxperf]
inherits = "release"
lto = "fat"
codegen-units = 1
codegen-units = 1

View File

@@ -5,7 +5,7 @@ import (
"runtime/debug"
)
var tag = "v4.5.27"
var tag = "v4.5.35"
var commit = func() string {
if info, ok := debug.ReadBuildInfo(); ok {

View File

@@ -1,4 +1,4 @@
.PHONY: lint docker clean coordinator coordinator_skip_libzkp mock_coordinator
.PHONY: lint docker clean coordinator coordinator_skip_libzkp mock_coordinator libzkp
IMAGE_VERSION=latest
REPO_ROOT_DIR=./..

View File

@@ -86,6 +86,10 @@ func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
var tmpBatchTask *orm.Batch
if taskCtx.hasAssignedTask != nil {
if taskCtx.hasAssignedTask.TaskType != int16(message.ProofTypeBatch) {
return nil, fmt.Errorf("prover with publicKey %s is already assigned a task. ProverName: %s, ProverVersion: %s", taskCtx.PublicKey, taskCtx.ProverName, taskCtx.ProverVersion)
}
tmpBatchTask, getTaskError = bp.batchOrm.GetBatchByHash(ctx.Copy(), taskCtx.hasAssignedTask.TaskID)
if getTaskError != nil {
log.Error("failed to get batch has assigned to prover", "taskID", taskCtx.hasAssignedTask.TaskID, "err", getTaskError)
@@ -95,6 +99,14 @@ func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
return nil, fmt.Errorf("prover with publicKey %s is already assigned a dropped batch. ProverName: %s, ProverVersion: %s",
taskCtx.PublicKey, taskCtx.ProverName, taskCtx.ProverVersion)
}
} else if getTaskParameter.TaskID != "" {
tmpBatchTask, getTaskError = bp.batchOrm.GetBatchByHash(ctx.Copy(), getTaskParameter.TaskID)
if getTaskError != nil {
log.Error("failed to get expected batch", "taskID", getTaskParameter.TaskID, "err", getTaskError)
return nil, ErrCoordinatorInternalFailure
} else if tmpBatchTask == nil {
return nil, fmt.Errorf("Expected task (%s) is already dropped", getTaskParameter.TaskID)
}
}
if tmpBatchTask == nil {

View File

@@ -84,6 +84,10 @@ func (bp *BundleProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinat
var tmpBundleTask *orm.Bundle
if taskCtx.hasAssignedTask != nil {
if taskCtx.hasAssignedTask.TaskType != int16(message.ProofTypeBundle) {
return nil, fmt.Errorf("prover with publicKey %s is already assigned a task. ProverName: %s, ProverVersion: %s", taskCtx.PublicKey, taskCtx.ProverName, taskCtx.ProverVersion)
}
tmpBundleTask, getTaskError = bp.bundleOrm.GetBundleByHash(ctx.Copy(), taskCtx.hasAssignedTask.TaskID)
if getTaskError != nil {
log.Error("failed to get bundle has assigned to prover", "taskID", taskCtx.hasAssignedTask.TaskID, "err", getTaskError)
@@ -93,6 +97,14 @@ func (bp *BundleProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinat
return nil, fmt.Errorf("prover with publicKey %s is already assigned a dropped bundle. ProverName: %s, ProverVersion: %s",
taskCtx.PublicKey, taskCtx.ProverName, taskCtx.ProverVersion)
}
} else if getTaskParameter.TaskID != "" {
tmpBundleTask, getTaskError = bp.bundleOrm.GetBundleByHash(ctx.Copy(), getTaskParameter.TaskID)
if getTaskError != nil {
log.Error("failed to get expected bundle", "taskID", getTaskParameter.TaskID, "err", getTaskError)
return nil, ErrCoordinatorInternalFailure
} else if tmpBundleTask == nil {
return nil, fmt.Errorf("Expected task (%s) is already dropped", getTaskParameter.TaskID)
}
}
if tmpBundleTask == nil {
@@ -234,9 +246,14 @@ func (bp *BundleProverTask) formatProverTask(ctx context.Context, task *orm.Prov
return nil, fmt.Errorf("failed to get batch proofs for bundle task id:%s, no batch found", task.TaskID)
}
parentBatch, err := bp.batchOrm.GetBatchByHash(ctx, batches[0].ParentBatchHash)
if err != nil {
return nil, fmt.Errorf("failed to get parent batch for batch task id:%s err:%w", task.TaskID, err)
var prevStateRoot common.Hash
// this would be common in test cases: the first batch has empty parent
if batches[0].Index > 1 {
parentBatch, err := bp.batchOrm.GetBatchByHash(ctx, batches[0].ParentBatchHash)
if err != nil {
return nil, fmt.Errorf("failed to get parent batch for batch task id:%s err:%w", task.TaskID, err)
}
prevStateRoot = common.HexToHash(parentBatch.StateRoot)
}
var batchProofs []*message.OpenVMBatchProof
@@ -255,7 +272,7 @@ func (bp *BundleProverTask) formatProverTask(ctx context.Context, task *orm.Prov
taskDetail.BundleInfo = &message.OpenVMBundleInfo{
ChainID: bp.cfg.L2.ChainID,
PrevStateRoot: common.HexToHash(parentBatch.StateRoot),
PrevStateRoot: prevStateRoot,
PostStateRoot: common.HexToHash(batches[len(batches)-1].StateRoot),
WithdrawRoot: common.HexToHash(batches[len(batches)-1].WithdrawRoot),
NumBatches: uint32(len(batches)),

View File

@@ -80,7 +80,12 @@ func (cp *ChunkProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
for i := 0; i < 5; i++ {
var getTaskError error
var tmpChunkTask *orm.Chunk
if taskCtx.hasAssignedTask != nil {
if taskCtx.hasAssignedTask.TaskType != int16(message.ProofTypeChunk) {
return nil, fmt.Errorf("prover with publicKey %s is already assigned a task. ProverName: %s, ProverVersion: %s", taskCtx.PublicKey, taskCtx.ProverName, taskCtx.ProverVersion)
}
log.Debug("retrieved assigned task chunk", "taskID", taskCtx.hasAssignedTask.TaskID, "prover", taskCtx.ProverName)
tmpChunkTask, getTaskError = cp.chunkOrm.GetChunkByHash(ctx.Copy(), taskCtx.hasAssignedTask.TaskID)
if getTaskError != nil {
@@ -91,6 +96,14 @@ func (cp *ChunkProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
return nil, fmt.Errorf("prover with publicKey %s is already assigned a dropped chunk. ProverName: %s, ProverVersion: %s",
taskCtx.PublicKey, taskCtx.ProverName, taskCtx.ProverVersion)
}
} else if getTaskParameter.TaskID != "" {
tmpChunkTask, getTaskError = cp.chunkOrm.GetChunkByHash(ctx.Copy(), getTaskParameter.TaskID)
if getTaskError != nil {
log.Error("failed to get expected chunk", "taskID", getTaskParameter.TaskID, "err", getTaskError)
return nil, ErrCoordinatorInternalFailure
} else if tmpChunkTask == nil {
return nil, fmt.Errorf("Expected task (%s) is already dropped", getTaskParameter.TaskID)
}
}
if tmpChunkTask == nil {
@@ -221,7 +234,7 @@ func (cp *ChunkProverTask) formatProverTask(ctx context.Context, task *orm.Prove
// Get block hashes.
blockHashes, dbErr := cp.blockOrm.GetL2BlockHashesByChunkHash(ctx, task.TaskID)
if dbErr != nil || len(blockHashes) == 0 {
return nil, fmt.Errorf("failed to fetch block hashes of a chunk, chunk hash:%s err:%w", task.TaskID, dbErr)
return nil, fmt.Errorf("failed to fetch block hashes of a chunk, chunk hash:%s err:%v", task.TaskID, dbErr)
}
var taskDetailBytes []byte

View File

@@ -5,10 +5,12 @@ package verifier
import (
"encoding/base64"
"encoding/json"
"fmt"
"io"
"os"
"path"
"path/filepath"
"strings"
"github.com/scroll-tech/go-ethereum/log"
@@ -117,6 +119,16 @@ func (v *Verifier) VerifyBundleProof(proof *message.OpenVMBundleProof, forkName
return libzkp.VerifyBundleProof(string(buf), forkName), nil
}
/*
add vk of imcompatilbe circuit app here to avoid we had used them unexpectedly
25/07/15: 0.5.0rc0 is no longer compatible since a breaking change
*/
const blocked_vks = `
rSJNNBpsxBdKlstbIIU/aYc7bHau98Qb2yjZMc5PmDhmGOolp5kYRbvF/VcWcO5HN5ujGs6S00W8pZcCoNQRLQ==,
2Lo7Cebm6SFtcsYXipkcMxIBmVY7UpoMXik/Msm7t2nyvi9EaNGsSnDnaCurscYEF+IcdjPUtVtY9EcD7IKwWg==,
D6YFHwTLZF/U2zpYJPQ3LwJZRm85yA5Vq2iFBqd3Mk4iwOUpS8sbOp3vg2+NDxhhKphgYpuUlykpdsoRhEt+cw==,
`
func (v *Verifier) loadOpenVMVks(cfg config.AssetConfig) error {
vkFileName := cfg.Vkfile
@@ -138,6 +150,16 @@ func (v *Verifier) loadOpenVMVks(cfg config.AssetConfig) error {
if err := json.Unmarshal(byt, &dump); err != nil {
return err
}
if strings.Contains(blocked_vks, dump.Chunk) {
return fmt.Errorf("loaded blocked chunk vk %s", dump.Chunk)
}
if strings.Contains(blocked_vks, dump.Batch) {
return fmt.Errorf("loaded blocked batch vk %s", dump.Batch)
}
if strings.Contains(blocked_vks, dump.Bundle) {
return fmt.Errorf("loaded blocked bundle vk %s", dump.Bundle)
}
v.OpenVMVkMap[dump.Chunk] = struct{}{}
v.OpenVMVkMap[dump.Batch] = struct{}{}
v.OpenVMVkMap[dump.Bundle] = struct{}{}

View File

@@ -13,33 +13,33 @@ openvm-sdk = { git = "ssh://git@github.com/scroll-tech/openvm-gpu.git", branch =
openvm-transpiler = { git = "ssh://git@github.com/scroll-tech/openvm-gpu.git", branch = "patch-v1.2.1-rc.1-pipe", default-features = false }
[patch."https://github.com/openvm-org/stark-backend.git"]
openvm-stark-backend = { git = "ssh://git@github.com/scroll-tech/openvm-stark-gpu.git", branch = "sync/upstream-250702", features = ["gpu"] }
openvm-stark-sdk = { git = "ssh://git@github.com/scroll-tech/openvm-stark-gpu.git", branch = "sync/upstream-250702", features = ["gpu"] }
openvm-stark-backend = { git = "ssh://git@github.com/scroll-tech/openvm-stark-gpu.git", branch = "main", features = ["gpu"] }
openvm-stark-sdk = { git = "ssh://git@github.com/scroll-tech/openvm-stark-gpu.git", branch = "main", features = ["gpu"] }
[patch."https://github.com/Plonky3/Plonky3.git"]
p3-air = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", rev = "450ec18" }
p3-field = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", rev = "450ec18" }
p3-commit = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", rev = "450ec18" }
p3-matrix = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", rev = "450ec18" }
p3-air = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
p3-field = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
p3-commit = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
p3-matrix = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
p3-baby-bear = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", features = [
"nightly-features",
], rev = "450ec18" }
p3-koala-bear = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", rev = "450ec18" }
p3-util = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", rev = "450ec18" }
p3-challenger = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", rev = "450ec18" }
p3-dft = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", rev = "450ec18" }
p3-fri = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", rev = "450ec18" }
p3-goldilocks = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", rev = "450ec18" }
p3-keccak = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", rev = "450ec18" }
p3-keccak-air = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", rev = "450ec18" }
p3-blake3 = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", rev = "450ec18" }
p3-mds = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", rev = "450ec18" }
p3-merkle-tree = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", rev = "450ec18" }
p3-monty-31 = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", rev = "450ec18" }
p3-poseidon = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", rev = "450ec18" }
p3-poseidon2 = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", rev = "450ec18" }
p3-poseidon2-air = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", rev = "450ec18" }
p3-symmetric = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", rev = "450ec18" }
p3-uni-stark = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", rev = "450ec18" }
p3-maybe-rayon = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", rev = "450ec18" } # the "parallel" feature is NOT on by default to allow single-threaded benchmarking
p3-bn254-fr = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", rev = "450ec18" }
], tag = "v0.2.1" }
p3-koala-bear = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
p3-util = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
p3-challenger = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
p3-dft = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
p3-fri = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
p3-goldilocks = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
p3-keccak = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
p3-keccak-air = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
p3-blake3 = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
p3-mds = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
p3-merkle-tree = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
p3-monty-31 = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
p3-poseidon = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
p3-poseidon2 = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
p3-poseidon2-air = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
p3-symmetric = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
p3-uni-stark = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }
p3-maybe-rayon = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" } # the "parallel" feature is NOT on by default to allow single-threaded benchmarking
p3-bn254-fr = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.1" }

11002
crates/gpu_override/Cargo.lock generated Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,21 @@
.PHONY: build update clean
ZKVM_COMMIT ?= freebuild
PLONKY3_GPU_VERSION=$(shell ./print_plonky3gpu_version.sh | sed -n '2p')
$(info PLONKY3_GPU_VERSION is ${PLONKY3_GPU_VERSION})
GIT_REV ?= $(shell git rev-parse --short HEAD)
GO_TAG ?= $(shell grep "var tag = " ../../common/version/version.go | cut -d "\"" -f2)
ZK_VERSION=${ZKVM_COMMIT}-${PLONKY3_GPU_VERSION}
$(info ZK_GPU_VERSION is ${ZK_VERSION})
clean:
cargo clean -Z unstable-options --release -p prover --lockfile-path ./Cargo.lock
# build gpu prover, never touch lock file
build:
GO_TAG=${GO_TAG} GIT_REV=${GIT_REV} ZK_VERSION=${ZK_VERSION} cargo build -Z unstable-options --release -p prover --lockfile-path ./Cargo.lock
# update Cargo.lock while override config has been updated
#update:
# GO_TAG=${GO_TAG} GIT_REV=${GIT_REV} ZK_VERSION=${ZK_VERSION} cargo build -Z unstable-options --release -p prover --lockfile-path ./Cargo.lock

View File

@@ -1,6 +1,6 @@
#!/bin/bash
config_file=~/.cargo/config.toml
config_file=.cargo/config.toml
plonky3_gpu_path=$(grep 'path.*plonky3-gpu' "$config_file" | cut -d'"' -f2 | head -n 1)
plonky3_gpu_path=$(dirname "$plonky3_gpu_path")

View File

@@ -108,38 +108,42 @@ impl ChunkInterpreter for RpcClient<'_> {
.get_block_by_hash(block_hash)
.full()
.await?
.ok_or_else(|| eyre::eyre!("Block not found"))?;
.ok_or_else(|| eyre::eyre!("Block {block_hash} not found"))?;
let number = block.header.number;
let parent_hash = block.header.parent_hash;
if number == 0 {
eyre::bail!("no number in header or use block 0");
}
let prev_state_root = if let Some(witness) = prev_witness {
if witness.header.number != number - 1 {
eyre::bail!(
"the ref witness is not the previous block, expected {} get {}",
number - 1,
witness.header.number,
);
}
witness.header.state_root
} else {
provider
.scroll_disk_root((number - 1).into())
.await?
.disk_root
};
let witness = WitnessBuilder::new()
let mut witness_builder = WitnessBuilder::new()
.block(block)
.chain_id(chain_id)
.execution_witness(provider.debug_execution_witness(number.into()).await?)
.state_root(provider.scroll_disk_root(number.into()).await?.disk_root)?
.prev_state_root(prev_state_root)
.build()?;
.execution_witness(provider.debug_execution_witness(number.into()).await?);
Ok(witness)
let prev_state_root = match prev_witness {
Some(witness) => {
if witness.header.number != number - 1 {
eyre::bail!(
"the ref witness is not the previous block, expected {} get {}",
number - 1,
witness.header.number,
);
}
witness.header.state_root
}
None => {
let parent_block = provider
.get_block_by_hash(parent_hash)
.await?
.expect("parent block should exist");
parent_block.header.state_root
}
};
witness_builder = witness_builder.prev_state_root(prev_state_root);
Ok(witness_builder.build()?)
}
tracing::debug!("fetch witness for {block_hash}");

View File

@@ -9,7 +9,7 @@ scroll-zkvm-types.workspace = true
scroll-zkvm-verifier-euclid.workspace = true
alloy-primitives.workspace = true #depress the effect of "native-keccak"
sbv-primitives.workspace = true
sbv-primitives = {workspace = true, features = ["scroll-compress-ratio", "scroll"]}
base64.workspace = true
serde.workspace = true
serde_derive.workspace = true

View File

@@ -5,7 +5,7 @@ pub use verifier::{TaskType, VerifierConfig};
mod utils;
use sbv_primitives::B256;
use scroll_zkvm_types::{public_inputs::ForkName, util::vec_as_base64};
use scroll_zkvm_types::util::vec_as_base64;
use serde::{Deserialize, Serialize};
use serde_json::value::RawValue;
use std::path::Path;
@@ -49,27 +49,39 @@ pub fn gen_universal_task(
let (pi_hash, metadata, mut u_task) = match task_type {
x if x == TaskType::Chunk as i32 => {
let mut task = serde_json::from_str::<ChunkProvingTask>(task_json)?;
let fork_name = ForkName::from(task.fork_name.to_lowercase().as_str());
task.fork_name = fork_name.to_string();
assert_eq!(fork_name_str, task.fork_name.as_str());
let (pi_hash, metadata, u_task) =
gen_universal_chunk_task(task, fork_name, interpreter)?;
// normailze fork name field in task
task.fork_name = task.fork_name.to_lowercase();
// always respect the fork_name_str (which has been normalized) being passed
// if the fork_name wrapped in task is not match, consider it a malformed task
if fork_name_str != task.fork_name.as_str() {
eyre::bail!("fork name in chunk task not match the calling arg, expected {fork_name_str}, get {}", task.fork_name);
}
let (pi_hash, metadata, u_task) = utils::panic_catch(move || {
gen_universal_chunk_task(task, fork_name_str.into(), interpreter)
})
.map_err(|e| eyre::eyre!("caught panic in chunk task{e}"))??;
(pi_hash, AnyMetaData::Chunk(metadata), u_task)
}
x if x == TaskType::Batch as i32 => {
let mut task = serde_json::from_str::<BatchProvingTask>(task_json)?;
let fork_name = ForkName::from(task.fork_name.to_lowercase().as_str());
task.fork_name = fork_name.to_string();
assert_eq!(fork_name_str, task.fork_name.as_str());
let (pi_hash, metadata, u_task) = gen_universal_batch_task(task, fork_name)?;
task.fork_name = task.fork_name.to_lowercase();
if fork_name_str != task.fork_name.as_str() {
eyre::bail!("fork name in batch task not match the calling arg, expected {fork_name_str}, get {}", task.fork_name);
}
let (pi_hash, metadata, u_task) =
utils::panic_catch(move || gen_universal_batch_task(task, fork_name_str.into()))
.map_err(|e| eyre::eyre!("caught panic in chunk task{e}"))??;
(pi_hash, AnyMetaData::Batch(metadata), u_task)
}
x if x == TaskType::Bundle as i32 => {
let mut task = serde_json::from_str::<BundleProvingTask>(task_json)?;
let fork_name = ForkName::from(task.fork_name.to_lowercase().as_str());
task.fork_name = fork_name.to_string();
assert_eq!(fork_name_str, task.fork_name.as_str());
let (pi_hash, metadata, u_task) = gen_universal_bundle_task(task, fork_name)?;
task.fork_name = task.fork_name.to_lowercase();
if fork_name_str != task.fork_name.as_str() {
eyre::bail!("fork name in bundle task not match the calling arg, expected {fork_name_str}, get {}", task.fork_name);
}
let (pi_hash, metadata, u_task) =
utils::panic_catch(move || gen_universal_bundle_task(task, fork_name_str.into()))
.map_err(|e| eyre::eyre!("caught panic in chunk task{e}"))??;
(pi_hash, AnyMetaData::Bundle(metadata), u_task)
}
_ => return Err(eyre::eyre!("unrecognized task type {task_type}")),

View File

@@ -9,7 +9,10 @@ pub use chunk::{ChunkProvingTask, ChunkTask};
pub use chunk_interpreter::ChunkInterpreter;
pub use scroll_zkvm_types::task::ProvingTask;
use crate::proofs::{self, BatchProofMetadata, BundleProofMetadata, ChunkProofMetadata};
use crate::{
proofs::{self, BatchProofMetadata, BundleProofMetadata, ChunkProofMetadata},
utils::panic_catch,
};
use sbv_primitives::B256;
use scroll_zkvm_types::public_inputs::{ForkName, MultiVersionPublicInputs};
@@ -20,25 +23,14 @@ fn check_aggregation_proofs<Metadata>(
where
Metadata: proofs::ProofMetadata,
{
use std::panic::{self, AssertUnwindSafe};
panic::catch_unwind(AssertUnwindSafe(|| {
panic_catch(|| {
for w in proofs.windows(2) {
w[1].metadata
.pi_hash_info()
.validate(w[0].metadata.pi_hash_info(), fork_name);
}
}))
.map_err(|e| {
let error_msg = if let Some(string) = e.downcast_ref::<String>() {
string.clone()
} else if let Some(str) = e.downcast_ref::<&str>() {
str.to_string()
} else {
"Unknown validation error occurred".to_string()
};
eyre::eyre!("Chunk data validation failed: {}", error_msg)
})?;
})
.map_err(|e| eyre::eyre!("Chunk data validation failed: {}", e))?;
Ok(())
}

View File

@@ -4,8 +4,9 @@ use eyre::Result;
use sbv_primitives::{B256, U256};
use scroll_zkvm_types::{
batch::{
BatchHeader, BatchHeaderV6, BatchHeaderV7, BatchInfo, BatchWitness, Envelope, EnvelopeV6,
EnvelopeV7, PointEvalWitness, ReferenceHeader, ToArchievedWitness, N_BLOB_BYTES,
BatchHeader, BatchHeaderV6, BatchHeaderV7, BatchHeaderV8, BatchInfo, BatchWitness,
Envelope, EnvelopeV6, EnvelopeV7, EnvelopeV8, PointEvalWitness, ReferenceHeader,
ToArchievedWitness, N_BLOB_BYTES,
},
public_inputs::ForkName,
task::ProvingTask,
@@ -23,37 +24,35 @@ use utils::{base64, point_eval};
#[serde(untagged)]
pub enum BatchHeaderV {
V6(BatchHeaderV6),
V7(BatchHeaderV7),
}
impl From<BatchHeaderV> for ReferenceHeader {
fn from(value: BatchHeaderV) -> Self {
match value {
BatchHeaderV::V6(h) => ReferenceHeader::V6(h),
BatchHeaderV::V7(h) => ReferenceHeader::V7(h),
}
}
V7_8(BatchHeaderV7),
}
impl BatchHeaderV {
pub fn batch_hash(&self) -> B256 {
match self {
BatchHeaderV::V6(h) => h.batch_hash(),
BatchHeaderV::V7(h) => h.batch_hash(),
BatchHeaderV::V7_8(h) => h.batch_hash(),
}
}
pub fn must_v6_header(&self) -> &BatchHeaderV6 {
match self {
BatchHeaderV::V6(h) => h,
BatchHeaderV::V7(_) => panic!("try to pick v7 header"),
_ => panic!("try to pick other header type"),
}
}
pub fn must_v7_header(&self) -> &BatchHeaderV7 {
match self {
BatchHeaderV::V7(h) => h,
BatchHeaderV::V6(_) => panic!("try to pick v6 header"),
BatchHeaderV::V7_8(h) => h,
_ => panic!("try to pick other header type"),
}
}
pub fn must_v8_header(&self) -> &BatchHeaderV8 {
match self {
BatchHeaderV::V7_8(h) => h,
_ => panic!("try to pick other header type"),
}
}
}
@@ -120,20 +119,28 @@ impl BatchProvingTask {
EnvelopeV6::from_slice(self.blob_bytes.as_slice())
.challenge_digest(versioned_hash)
}
BatchHeaderV::V7(_) => {
match fork_name {
ForkName::EuclidV2 => (),
_ => unreachable!("hardfork mismatch for da-codec@v6 header: found={fork_name:?}, expected={:?}",
[ForkName::EuclidV2],
),
}
BatchHeaderV::V7_8(_) => {
let padded_blob_bytes = {
let mut padded_blob_bytes = self.blob_bytes.to_vec();
padded_blob_bytes.resize(N_BLOB_BYTES, 0);
padded_blob_bytes
};
EnvelopeV7::from_slice(padded_blob_bytes.as_slice())
.challenge_digest(versioned_hash)
match fork_name {
ForkName::EuclidV2 => {
<EnvelopeV7 as Envelope>::from_slice(padded_blob_bytes.as_slice())
.challenge_digest(versioned_hash)
}
ForkName::Feynman => {
<EnvelopeV8 as Envelope>::from_slice(padded_blob_bytes.as_slice())
.challenge_digest(versioned_hash)
}
f => unreachable!(
"hardfork mismatch for da-codec@v7 header: found={}, expected={:?}",
f,
[ForkName::EuclidV2, ForkName::Feynman],
),
}
}
};
@@ -159,7 +166,11 @@ impl BatchProvingTask {
kzg_proof: kzg_proof.into_inner(),
};
let reference_header = self.batch_header.clone().into();
let reference_header = match fork_name {
ForkName::EuclidV1 => ReferenceHeader::V6(*self.batch_header.must_v6_header()),
ForkName::EuclidV2 => ReferenceHeader::V7(*self.batch_header.must_v7_header()),
ForkName::Feynman => ReferenceHeader::V8(*self.batch_header.must_v8_header()),
};
BatchWitness {
fork_name,

View File

@@ -64,7 +64,6 @@ fn verify_proof(proof: *const c_char, fork_name: *const c_char, task_type: TaskT
let fork_name_str = c_char_to_str(fork_name);
let proof_str = proof;
let proof = c_char_to_vec(proof);
tracing::info!("verify proof for fork {fork_name_str}, type {task_type}");
match libzkp::verify_proof(proof, fork_name_str, task_type) {
Err(e) => {
@@ -179,13 +178,10 @@ pub unsafe extern "C" fn gen_universal_task(
&[]
};
let fork_name_str = c_char_to_str(fork_name);
tracing::info!("generate universtal task for fork {fork_name_str}, type {task_type}");
let ret = libzkp::gen_universal_task(
task_type,
&task_json,
fork_name_str,
c_char_to_str(fork_name),
expected_vk,
interpreter,
);
@@ -206,7 +202,8 @@ pub unsafe extern "C" fn gen_universal_task(
.duration_since(UNIX_EPOCH)
.unwrap_or_default()
.as_secs();
let filename = format!("/tmp/task_{}_{}.json", fork_name_str, timestamp);
let c_str = unsafe { std::ffi::CStr::from_ptr(fork_name) };
let filename = format!("/tmp/task_{}_{}.json", c_str.to_str().unwrap(), timestamp);
if let Err(e) = std::fs::write(&filename, task_json.as_bytes()) {
eprintln!("Failed to write task to file {}: {}", filename, e);
} else {

View File

@@ -5,9 +5,10 @@ mod zk_circuits_handler;
use clap::{ArgAction, Parser, Subcommand};
use prover::{LocalProver, LocalProverConfig};
use scroll_proving_sdk::{
prover::ProverBuilder,
prover::{types::ProofType, ProverBuilder},
utils::{get_version, init_tracing},
};
use std::{fs::File, io::BufReader, path::Path};
#[derive(Parser, Debug)]
#[command(disable_version_flag = true)]
@@ -38,6 +39,17 @@ enum Commands {
/// path to save the verifier's asset
asset_path: String,
},
Handle {
/// path to save the verifier's asset
task_path: String,
},
}
#[derive(Debug, serde::Deserialize)]
struct HandleSet {
chunks: Vec<String>,
batches: Vec<String>,
bundles: Vec<String>,
}
#[tokio::main]
@@ -62,6 +74,40 @@ async fn main() -> eyre::Result<()> {
println!("dump assets for {fork_name} into {asset_path}");
local_prover.dump_verifier_assets(&fork_name, asset_path.as_ref())?;
}
Some(Commands::Handle { task_path }) => {
let file = File::open(Path::new(&task_path))?;
let reader = BufReader::new(file);
let handle_set: HandleSet = serde_json::from_reader(reader)?;
let prover = ProverBuilder::new(sdk_config, local_prover)
.build()
.await
.map_err(|e| eyre::eyre!("build prover fail: {e}"))?;
let prover = std::sync::Arc::new(prover);
println!("Handling task set 1: chunks ...");
assert!(
prover
.clone()
.one_shot(&handle_set.chunks, ProofType::Chunk)
.await
);
println!("Done! Handling task set 2: batches ...");
assert!(
prover
.clone()
.one_shot(&handle_set.batches, ProofType::Batch)
.await
);
println!("Done! Handling task set 3: bundles ...");
assert!(
prover
.clone()
.one_shot(&handle_set.bundles, ProofType::Bundle)
.await
);
println!("All done!");
}
None => {
let prover = ProverBuilder::new(sdk_config, local_prover)
.build()

View File

@@ -203,6 +203,10 @@ impl LocalProver {
.get(hard_fork_name)
.ok_or_else(|| eyre::eyre!("no corresponding config for fork {hard_fork_name}"))?;
if !config.vks.is_empty() {
eyre::bail!("clean vks cache first or we will have wrong dumped vk");
}
let workspace_path = &config.workspace_path;
let universal_prover = EuclidV2Handler::new(config);
let _ = universal_prover

File diff suppressed because one or more lines are too long

View File

@@ -70,7 +70,7 @@ func action(ctx *cli.Context) error {
log.Crit("failed to create l2 relayer", "config file", cfgFile, "error", err)
}
go utils.Loop(subCtx, 2*time.Second, blobUploader.UploadBlobToS3)
go utils.Loop(subCtx, 1*time.Second, blobUploader.UploadBlobToS3)
// Finish start all blob-uploader functions.
log.Info("Start blob-uploader successfully", "version", version.Version)

View File

@@ -107,7 +107,7 @@ func action(ctx *cli.Context) error {
}
chunkProposer := watcher.NewChunkProposer(subCtx, cfg.L2Config.ChunkProposerConfig, minCodecVersion, genesis.Config, db, registry)
batchProposer := watcher.NewBatchProposer(subCtx, cfg.L2Config.BatchProposerConfig, minCodecVersion, genesis.Config, db, registry)
batchProposer := watcher.NewBatchProposer(subCtx, cfg.L2Config.BatchProposerConfig, minCodecVersion, genesis.Config, db, cfg.L2Config.RelayerConfig.ValidiumMode, registry)
bundleProposer := watcher.NewBundleProposer(subCtx, cfg.L2Config.BundleProposerConfig, minCodecVersion, genesis.Config, db, registry)
l2watcher := watcher.NewL2WatcherClient(subCtx, l2client, cfg.L2Config.Confirmations, cfg.L2Config.L2MessageQueueAddress, cfg.L2Config.WithdrawTrieRootSlot, genesis.Config, db, registry)

View File

@@ -36,6 +36,7 @@
"endpoint": "https://rpc.scroll.io",
"l2_message_queue_address": "0x0000000000000000000000000000000000000000",
"relayer_config": {
"validium_mode": false,
"rollup_contract_address": "0x0000000000000000000000000000000000000000",
"gas_price_oracle_address": "0x0000000000000000000000000000000000000000",
"sender_config": {
@@ -123,4 +124,4 @@
"maxOpenNum": 200,
"maxIdleNum": 20
}
}
}

View File

@@ -53,6 +53,8 @@ type ChainMonitor struct {
// RelayerConfig loads relayer configuration items.
// What we need to pay attention to is that
type RelayerConfig struct {
// ValidiumMode indicates if the relayer is in validium mode.
ValidiumMode bool `json:"validium_mode"`
// RollupContractAddress store the rollup contract address.
RollupContractAddress common.Address `json:"rollup_contract_address,omitempty"`
// GasPriceOracleContractAddress store the scroll messenger contract address.
@@ -73,8 +75,6 @@ type RelayerConfig struct {
// Indicates if bypass features specific to testing environments are enabled.
EnableTestEnvBypassFeatures bool `json:"enable_test_env_bypass_features"`
// The timeout in seconds for finalizing a batch without proof, only used when EnableTestEnvBypassFeatures is true.
FinalizeBatchWithoutProofTimeoutSec uint64 `json:"finalize_batch_without_proof_timeout_sec"`
// The timeout in seconds for finalizing a bundle without proof, only used when EnableTestEnvBypassFeatures is true.
FinalizeBundleWithoutProofTimeoutSec uint64 `json:"finalize_bundle_without_proof_timeout_sec"`
}

View File

@@ -167,7 +167,7 @@ func (b *BlobUploader) constructBlobCodec(dbBatch *orm.Batch) (*kzg4844.Blob, er
Chunks: chunks,
}
case encoding.CodecV7:
case encoding.CodecV7, encoding.CodecV8:
encodingBatch = &encoding.Batch{
Index: dbBatch.Index,
ParentBatchHash: common.HexToHash(dbBatch.ParentBatchHash),

View File

@@ -25,8 +25,6 @@ type S3Uploader struct {
func NewS3Uploader(cfg *config.AWSS3Config) (*S3Uploader, error) {
// load AWS config
var opts []func(*awsconfig.LoadOptions) error
opts = append(opts, awsconfig.WithRegion(cfg.Region))
// if AccessKey && SecretKey provided, use it
if cfg.AccessKey != "" && cfg.SecretKey != "" {
opts = append(opts, awsconfig.WithCredentialsProvider(
@@ -38,6 +36,10 @@ func NewS3Uploader(cfg *config.AWSS3Config) (*S3Uploader, error) {
)
}
if cfg.Region != "" {
opts = append(opts, awsconfig.WithRegion(cfg.Region))
}
awsCfg, err := awsconfig.LoadDefaultConfig(context.Background(), opts...)
if err != nil {
return nil, fmt.Errorf("failed to load default config: %w", err)

View File

@@ -79,6 +79,7 @@ type Layer2Relayer struct {
commitSender *sender.Sender
finalizeSender *sender.Sender
l1RollupABI *abi.ABI
validiumABI *abi.ABI
l2GasOracleABI *abi.ABI
@@ -172,6 +173,7 @@ func NewLayer2Relayer(ctx context.Context, l2Client *ethclient.Client, db *gorm.
commitSender: commitSender,
finalizeSender: finalizeSender,
l1RollupABI: bridgeAbi.ScrollChainABI,
validiumABI: bridgeAbi.ValidiumABI,
l2GasOracleABI: bridgeAbi.L2GasPriceOracleABI,
batchStrategy: strategy,
@@ -239,10 +241,11 @@ func (r *Layer2Relayer) initializeGenesis() error {
TotalL1MessagePoppedBefore: 0,
ParentBatchHash: common.Hash{},
Chunks: []*encoding.Chunk{chunk},
Blocks: chunk.Blocks,
}
var dbBatch *orm.Batch
dbBatch, err = r.batchOrm.InsertBatch(r.ctx, batch, encoding.CodecV0, rutils.BatchMetrics{}, dbTX)
dbBatch, err = r.batchOrm.InsertBatch(r.ctx, batch, encoding.CodecV0, rutils.BatchMetrics{ValidiumMode: r.cfg.ValidiumMode}, dbTX)
if err != nil {
return fmt.Errorf("failed to insert batch: %v", err)
}
@@ -274,10 +277,23 @@ func (r *Layer2Relayer) initializeGenesis() error {
}
func (r *Layer2Relayer) commitGenesisBatch(batchHash string, batchHeader []byte, stateRoot common.Hash) error {
// encode "importGenesisBatch" transaction calldata
calldata, packErr := r.l1RollupABI.Pack("importGenesisBatch", batchHeader, stateRoot)
if packErr != nil {
return fmt.Errorf("failed to pack importGenesisBatch with batch header: %v and state root: %v. error: %v", common.Bytes2Hex(batchHeader), stateRoot, packErr)
var calldata []byte
var packErr error
if r.cfg.ValidiumMode {
// validium mode: only pass batchHeader
calldata, packErr = r.validiumABI.Pack("importGenesisBatch", batchHeader)
if packErr != nil {
return fmt.Errorf("failed to pack validium importGenesisBatch with batch header: %v. error: %v", common.Bytes2Hex(batchHeader), packErr)
}
log.Info("Validium importGenesis", "calldata", common.Bytes2Hex(calldata))
} else {
// rollup mode: pass batchHeader and stateRoot
calldata, packErr = r.l1RollupABI.Pack("importGenesisBatch", batchHeader, stateRoot)
if packErr != nil {
return fmt.Errorf("failed to pack rollup importGenesisBatch with batch header: %v and state root: %v. error: %v", common.Bytes2Hex(batchHeader), stateRoot, packErr)
}
log.Info("Rollup importGenesis", "calldata", common.Bytes2Hex(calldata), "stateRoot", stateRoot)
}
// submit genesis batch to L1 rollup contract
@@ -285,7 +301,7 @@ func (r *Layer2Relayer) commitGenesisBatch(batchHash string, batchHeader []byte,
if err != nil {
return fmt.Errorf("failed to send import genesis batch tx to L1, error: %v", err)
}
log.Info("importGenesisBatch transaction sent", "contract", r.cfg.RollupContractAddress, "txHash", txHash, "batchHash", batchHash)
log.Info("importGenesisBatch transaction sent", "contract", r.cfg.RollupContractAddress, "txHash", txHash, "batchHash", batchHash, "validium", r.cfg.ValidiumMode)
// wait for confirmation
// we assume that no other transactions are sent before initializeGenesis completes
@@ -310,20 +326,23 @@ func (r *Layer2Relayer) commitGenesisBatch(batchHash string, batchHeader []byte,
if !confirmation.IsSuccessful {
return errors.New("import genesis batch tx failed")
}
log.Info("Successfully committed genesis batch on L1", "txHash", confirmation.TxHash.String())
log.Info("Successfully committed genesis batch on L1", "txHash", confirmation.TxHash.String(), "validium", r.cfg.ValidiumMode)
return nil
}
}
}
// ProcessPendingBatches processes the pending batches by sending commitBatch transactions to layer 1.
// Pending batchess are submitted if one of the following conditions is met:
// Pending batches are submitted if one of the following conditions is met:
// - the first batch is too old -> forceSubmit
// - backlogCount > r.cfg.BatchSubmission.BacklogMax -> forceSubmit
// - we have at least minBatches AND price hits a desired target price
func (r *Layer2Relayer) ProcessPendingBatches() {
// Get effective batch limits based on whether validium mode is enabled.
minBatches, maxBatches := r.getEffectiveBatchLimits()
// get pending batches from database in ascending order by their index.
dbBatches, err := r.batchOrm.GetFailedAndPendingBatches(r.ctx, r.cfg.BatchSubmission.MaxBatches)
dbBatches, err := r.batchOrm.GetFailedAndPendingBatches(r.ctx, maxBatches)
if err != nil {
log.Error("Failed to fetch pending L2 batches", "err", err)
return
@@ -432,21 +451,21 @@ func (r *Layer2Relayer) ProcessPendingBatches() {
break
}
if batchesToSubmitLen < r.cfg.BatchSubmission.MaxBatches {
if batchesToSubmitLen < maxBatches {
batchesToSubmit = append(batchesToSubmit, &dbBatchWithChunks{
Batch: dbBatch,
Chunks: dbChunks,
})
}
if len(batchesToSubmit) >= r.cfg.BatchSubmission.MaxBatches {
if len(batchesToSubmit) >= maxBatches {
break
}
}
// we only submit batches if we have a timeout or if we have enough batches to submit
if !forceSubmit && len(batchesToSubmit) < r.cfg.BatchSubmission.MinBatches {
log.Debug("Not enough batches to submit", "count", len(batchesToSubmit), "minBatches", r.cfg.BatchSubmission.MinBatches, "maxBatches", r.cfg.BatchSubmission.MaxBatches)
if !forceSubmit && len(batchesToSubmit) < minBatches {
log.Debug("Not enough batches to submit", "count", len(batchesToSubmit), "minBatches", minBatches, "maxBatches", maxBatches)
return
}
@@ -466,10 +485,22 @@ func (r *Layer2Relayer) ProcessPendingBatches() {
codecVersion := encoding.CodecVersion(firstBatch.CodecVersion)
switch codecVersion {
case encoding.CodecV7, encoding.CodecV8:
calldata, blobs, maxBlockHeight, totalGasUsed, err = r.constructCommitBatchPayloadCodecV7(batchesToSubmit, firstBatch, lastBatch)
if err != nil {
log.Error("failed to construct constructCommitBatchPayloadCodecV7 payload for V7", "codecVersion", codecVersion, "start index", firstBatch.Index, "end index", lastBatch.Index, "err", err)
return
if r.cfg.ValidiumMode {
if len(batchesToSubmit) != 1 {
log.Error("validium mode only supports committing one batch at a time", "codecVersion", codecVersion, "start index", firstBatch.Index, "end index", lastBatch.Index, "batches count", len(batchesToSubmit))
return
}
calldata, maxBlockHeight, totalGasUsed, err = r.constructCommitBatchPayloadValidium(batchesToSubmit[0])
if err != nil {
log.Error("failed to construct validium payload", "codecVersion", codecVersion, "index", batchesToSubmit[0].Batch.Index, "err", err)
return
}
} else {
calldata, blobs, maxBlockHeight, totalGasUsed, err = r.constructCommitBatchPayloadCodecV7(batchesToSubmit, firstBatch, lastBatch)
if err != nil {
log.Error("failed to construct normal payload", "codecVersion", codecVersion, "start index", firstBatch.Index, "end index", lastBatch.Index, "err", err)
return
}
}
default:
log.Error("unsupported codec version in ProcessPendingBatches", "codecVersion", codecVersion, "start index", firstBatch, "end index", lastBatch.Index)
@@ -522,6 +553,14 @@ func (r *Layer2Relayer) ProcessPendingBatches() {
log.Info("Sent the commitBatches tx to layer1", "batches count", len(batchesToSubmit), "start index", firstBatch.Index, "start hash", firstBatch.Hash, "end index", lastBatch.Index, "end hash", lastBatch.Hash, "tx hash", txHash.String())
}
// getEffectiveBatchLimits returns the effective min and max batch limits based on whether validium mode is enabled.
func (r *Layer2Relayer) getEffectiveBatchLimits() (int, int) {
if r.cfg.ValidiumMode {
return 1, 1 // minBatches=1, maxBatches=1
}
return r.cfg.BatchSubmission.MinBatches, r.cfg.BatchSubmission.MaxBatches
}
func (r *Layer2Relayer) contextIDFromBatches(codecVersion encoding.CodecVersion, batches []*dbBatchWithChunks) string {
contextIDs := []string{fmt.Sprintf("v%d", codecVersion)}
for _, batch := range batches {
@@ -690,9 +729,16 @@ func (r *Layer2Relayer) finalizeBundle(bundle *orm.Bundle, withProof bool) error
var calldata []byte
switch encoding.CodecVersion(bundle.CodecVersion) {
case encoding.CodecV7, encoding.CodecV8:
calldata, err = r.constructFinalizeBundlePayloadCodecV7(dbBatch, endChunk, aggProof)
if err != nil {
return fmt.Errorf("failed to construct finalizeBundle payload codecv7, bundle index: %v, last batch index: %v, err: %w", bundle.Index, dbBatch.Index, err)
if r.cfg.ValidiumMode {
calldata, err = r.constructFinalizeBundlePayloadValidium(dbBatch, endChunk, aggProof)
if err != nil {
return fmt.Errorf("failed to construct validium finalizeBundle payload, codec version: %v, bundle index: %v, last batch index: %v, err: %w", dbBatch.CodecVersion, bundle.Index, dbBatch.Index, err)
}
} else {
calldata, err = r.constructFinalizeBundlePayloadCodecV7(dbBatch, endChunk, aggProof)
if err != nil {
return fmt.Errorf("failed to construct normal finalizeBundle payload, codec version: %v, bundle index: %v, last batch index: %v, err: %w", dbBatch.CodecVersion, bundle.Index, dbBatch.Index, err)
}
}
default:
return fmt.Errorf("unsupported codec version in finalizeBundle, bundle index: %v, version: %d", bundle.Index, bundle.CodecVersion)
@@ -951,6 +997,35 @@ func (r *Layer2Relayer) constructCommitBatchPayloadCodecV7(batchesToSubmit []*db
return calldata, blobs, maxBlockHeight, totalGasUsed, nil
}
func (r *Layer2Relayer) constructCommitBatchPayloadValidium(batch *dbBatchWithChunks) ([]byte, uint64, uint64, error) {
// Calculate metrics
var maxBlockHeight uint64
var totalGasUsed uint64
for _, c := range batch.Chunks {
if c.EndBlockNumber > maxBlockHeight {
maxBlockHeight = c.EndBlockNumber
}
totalGasUsed += c.TotalL2TxGas
}
// Get the commitment from the batch data: for validium mode, we use the last L2 block hash as the commitment to the off-chain data
// Get the last chunk from the batch to find the end block hash
// TODO: This is a temporary solution, we might use a larger commitment in the future
if len(batch.Chunks) == 0 {
return nil, 0, 0, fmt.Errorf("last batch has no chunks")
}
lastChunk := batch.Chunks[len(batch.Chunks)-1]
commitment := common.HexToHash(lastChunk.EndBlockHash)
version := encoding.CodecVersion(batch.Batch.CodecVersion)
calldata, err := r.validiumABI.Pack("commitBatch", version, common.HexToHash(batch.Batch.ParentBatchHash), common.HexToHash(batch.Batch.StateRoot), common.HexToHash(batch.Batch.WithdrawRoot), commitment[:])
if err != nil {
return nil, 0, 0, fmt.Errorf("failed to pack commitBatch: %w", err)
}
log.Info("Validium commitBatch", "maxBlockHeight", maxBlockHeight, "commitment", commitment.Hex())
return calldata, maxBlockHeight, totalGasUsed, nil
}
func (r *Layer2Relayer) constructFinalizeBundlePayloadCodecV7(dbBatch *orm.Batch, endChunk *orm.Chunk, aggProof *message.OpenVMBundleProof) ([]byte, error) {
if aggProof != nil { // finalizeBundle with proof.
calldata, packErr := r.l1RollupABI.Pack(
@@ -967,7 +1042,8 @@ func (r *Layer2Relayer) constructFinalizeBundlePayloadCodecV7(dbBatch *orm.Batch
return calldata, nil
}
fmt.Println("packing finalizeBundlePostEuclidV2NoProof", len(dbBatch.BatchHeader), dbBatch.CodecVersion, dbBatch.BatchHeader, new(big.Int).SetUint64(endChunk.TotalL1MessagesPoppedBefore+endChunk.TotalL1MessagesPoppedInChunk), common.HexToHash(dbBatch.StateRoot), common.HexToHash(dbBatch.WithdrawRoot))
log.Info("Packing finalizeBundlePostEuclidV2NoProof", "batchHeaderLength", len(dbBatch.BatchHeader), "codecVersion", dbBatch.CodecVersion, "totalL1Messages", endChunk.TotalL1MessagesPoppedBefore+endChunk.TotalL1MessagesPoppedInChunk, "stateRoot", dbBatch.StateRoot, "withdrawRoot", dbBatch.WithdrawRoot)
// finalizeBundle without proof.
calldata, packErr := r.l1RollupABI.Pack(
"finalizeBundlePostEuclidV2NoProof",
@@ -982,6 +1058,26 @@ func (r *Layer2Relayer) constructFinalizeBundlePayloadCodecV7(dbBatch *orm.Batch
return calldata, nil
}
func (r *Layer2Relayer) constructFinalizeBundlePayloadValidium(dbBatch *orm.Batch, endChunk *orm.Chunk, aggProof *message.OpenVMBundleProof) ([]byte, error) {
log.Info("Packing validium finalizeBundle", "batchHeaderLength", len(dbBatch.BatchHeader), "codecVersion", dbBatch.CodecVersion, "totalL1Messages", endChunk.TotalL1MessagesPoppedBefore+endChunk.TotalL1MessagesPoppedInChunk, "stateRoot", dbBatch.StateRoot, "withdrawRoot", dbBatch.WithdrawRoot, "withProof", aggProof != nil)
var proof []byte
if aggProof != nil {
proof = aggProof.Proof()
}
calldata, packErr := r.validiumABI.Pack(
"finalizeBundle",
dbBatch.BatchHeader,
new(big.Int).SetUint64(endChunk.TotalL1MessagesPoppedBefore+endChunk.TotalL1MessagesPoppedInChunk),
proof,
)
if packErr != nil {
return nil, fmt.Errorf("failed to pack validium finalizeBundle: %w", packErr)
}
return calldata, nil
}
// StopSenders stops the senders of the rollup-relayer to prevent querying the removed pending_transaction table in unit tests.
// for unit test
func (r *Layer2Relayer) StopSenders() {

View File

@@ -32,6 +32,7 @@ type BatchProposer struct {
cfg *config.BatchProposerConfig
replayMode bool
validiumMode bool
minCodecVersion encoding.CodecVersion
chainCfg *params.ChainConfig
@@ -53,7 +54,7 @@ type BatchProposer struct {
}
// NewBatchProposer creates a new BatchProposer instance.
func NewBatchProposer(ctx context.Context, cfg *config.BatchProposerConfig, minCodecVersion encoding.CodecVersion, chainCfg *params.ChainConfig, db *gorm.DB, reg prometheus.Registerer) *BatchProposer {
func NewBatchProposer(ctx context.Context, cfg *config.BatchProposerConfig, minCodecVersion encoding.CodecVersion, chainCfg *params.ChainConfig, db *gorm.DB, validiumMode bool, reg prometheus.Registerer) *BatchProposer {
log.Info("new batch proposer", "batchTimeoutSec", cfg.BatchTimeoutSec, "maxBlobSize", maxBlobSize, "maxUncompressedBatchBytesSize", cfg.MaxUncompressedBatchBytesSize)
p := &BatchProposer{
@@ -63,7 +64,8 @@ func NewBatchProposer(ctx context.Context, cfg *config.BatchProposerConfig, minC
chunkOrm: orm.NewChunk(db),
l2BlockOrm: orm.NewL2Block(db),
cfg: cfg,
replayMode: false,
replayMode: false, // default is false, set to true when using proposer tool
validiumMode: validiumMode,
minCodecVersion: minCodecVersion,
chainCfg: chainCfg,
@@ -171,7 +173,7 @@ func (p *BatchProposer) updateDBBatchInfo(batch *encoding.Batch, codecVersion en
// recalculate batch metrics after truncation
var calcErr error
metrics, calcErr = utils.CalculateBatchMetrics(batch, codecVersion)
metrics, calcErr = utils.CalculateBatchMetrics(batch, codecVersion, p.validiumMode)
if calcErr != nil {
return fmt.Errorf("failed to calculate batch metrics, batch index: %v, error: %w", batch.Index, calcErr)
}
@@ -287,7 +289,7 @@ func (p *BatchProposer) proposeBatch() error {
batch.Blocks = append(batch.Blocks, chunk.Blocks...)
batch.PostL1MessageQueueHash = common.HexToHash(dbChunks[i].PostL1MessageQueueHash)
metrics, calcErr := utils.CalculateBatchMetrics(&batch, codec.Version())
metrics, calcErr := utils.CalculateBatchMetrics(&batch, codec.Version(), p.validiumMode)
if calcErr != nil {
return fmt.Errorf("failed to calculate batch metrics: %w", calcErr)
}
@@ -312,7 +314,7 @@ func (p *BatchProposer) proposeBatch() error {
batch.PostL1MessageQueueHash = common.HexToHash(dbChunks[i-1].PostL1MessageQueueHash)
batch.Blocks = batch.Blocks[:len(batch.Blocks)-len(lastChunk.Blocks)]
metrics, err = utils.CalculateBatchMetrics(&batch, codec.Version())
metrics, err = utils.CalculateBatchMetrics(&batch, codec.Version(), p.validiumMode)
if err != nil {
return fmt.Errorf("failed to calculate batch metrics: %w", err)
}
@@ -322,7 +324,7 @@ func (p *BatchProposer) proposeBatch() error {
}
}
metrics, calcErr := utils.CalculateBatchMetrics(&batch, codec.Version())
metrics, calcErr := utils.CalculateBatchMetrics(&batch, codec.Version(), p.validiumMode)
if calcErr != nil {
return fmt.Errorf("failed to calculate batch metrics: %w", calcErr)
}

View File

@@ -100,7 +100,7 @@ func testBatchProposerLimitsCodecV7(t *testing.T) {
DarwinV2Time: new(uint64),
EuclidTime: new(uint64),
EuclidV2Time: new(uint64),
}, db, nil)
}, db, false /* rollup mode */, nil)
bp.TryProposeBatch()
batches, err := batchOrm.GetBatches(context.Background(), map[string]interface{}{}, []string{}, 0)
@@ -178,7 +178,7 @@ func testBatchProposerBlobSizeLimitCodecV7(t *testing.T) {
MaxChunksPerBatch: math.MaxInt32,
BatchTimeoutSec: math.MaxUint32,
MaxUncompressedBatchBytesSize: math.MaxUint64,
}, encoding.CodecV7, chainConfig, db, nil)
}, encoding.CodecV7, chainConfig, db, false /* rollup mode */, nil)
for i := 0; i < 2; i++ {
bp.TryProposeBatch()
@@ -246,7 +246,7 @@ func testBatchProposerMaxChunkNumPerBatchLimitCodecV7(t *testing.T) {
MaxChunksPerBatch: 45,
BatchTimeoutSec: math.MaxUint32,
MaxUncompressedBatchBytesSize: math.MaxUint64,
}, encoding.CodecV7, chainConfig, db, nil)
}, encoding.CodecV7, chainConfig, db, false /* rollup mode */, nil)
bp.TryProposeBatch()
batches, err := batchOrm.GetBatches(context.Background(), map[string]interface{}{}, []string{}, 0)
@@ -335,7 +335,7 @@ func testBatchProposerUncompressedBatchBytesLimitCodecV8(t *testing.T) {
MaxChunksPerBatch: math.MaxInt32, // No chunk count limit
BatchTimeoutSec: math.MaxUint32, // No timeout limit
MaxUncompressedBatchBytesSize: 4 * 1024, // 4KiB limit
}, encoding.CodecV8, chainConfig, db, nil)
}, encoding.CodecV8, chainConfig, db, false /* rollup mode */, nil)
bp.TryProposeBatch()

View File

@@ -103,7 +103,7 @@ func testBundleProposerLimitsCodecV7(t *testing.T) {
MaxChunksPerBatch: math.MaxInt32,
BatchTimeoutSec: 0,
MaxUncompressedBatchBytesSize: math.MaxUint64,
}, encoding.CodecV7, chainConfig, db, nil)
}, encoding.CodecV7, chainConfig, db, false /* rollup mode */, nil)
cp.TryProposeChunk() // chunk1 contains block1
bap.TryProposeBatch() // batch1 contains chunk1

View File

@@ -268,13 +268,9 @@ func (p *ChunkProposer) proposeChunk() error {
return fmt.Errorf("failed to get parent chunk: %w", err)
}
// Currently rollup-relayer only supports >= v7 codec version, it checks the minimum codec version after start.
// In EuclidV2 transition, empty PostL1MessageQueueHash will be naturally initialized to the first chunk's PrevL1MessageQueueHash.
chunk.PrevL1MessageQueueHash = common.HexToHash(parentChunk.PostL1MessageQueueHash)
// previous chunk is not CodecV7, this means this is the first chunk of the fork.
if encoding.CodecVersion(parentChunk.CodecVersion) < codecVersion {
chunk.PrevL1MessageQueueHash = common.Hash{}
}
chunk.PostL1MessageQueueHash = chunk.PrevL1MessageQueueHash
var previousPostL1MessageQueueHash common.Hash

View File

@@ -125,7 +125,7 @@ func NewProposerTool(ctx context.Context, cancel context.CancelFunc, cfg *config
chunkProposer := NewChunkProposer(ctx, cfg.L2Config.ChunkProposerConfig, minCodecVersion, chainCfg, db, nil)
chunkProposer.SetReplayDB(dbForReplay)
batchProposer := NewBatchProposer(ctx, cfg.L2Config.BatchProposerConfig, minCodecVersion, chainCfg, db, nil)
batchProposer := NewBatchProposer(ctx, cfg.L2Config.BatchProposerConfig, minCodecVersion, chainCfg, db, false /* rollup mode */, nil)
batchProposer.SetReplayDB(dbForReplay)
bundleProposer := NewBundleProposer(ctx, cfg.L2Config.BundleProposerConfig, minCodecVersion, chainCfg, db, nil)

View File

@@ -285,7 +285,7 @@ func (o *Batch) InsertBatch(ctx context.Context, batch *encoding.Batch, codecVer
startChunkIndex = parentBatch.EndChunkIndex + 1
}
batchMeta, err := rutils.GetBatchMetadata(batch, codecVersion)
batchMeta, err := rutils.GetBatchMetadata(batch, codecVersion, metrics.ValidiumMode)
if err != nil {
log.Error("failed to get batch metadata", "index", batch.Index, "total l1 message popped before", batch.TotalL1MessagePoppedBefore,
"parent hash", batch.ParentBatchHash.Hex(), "number of chunks", numChunks, "err", err)

View File

@@ -115,7 +115,8 @@ func (o *BlobUpload) InsertOrUpdateBlobUpload(ctx context.Context, batchIndex ui
return fmt.Errorf("BlobUpload.InsertOrUpdateBlobUpload query error: %w, batch index: %v, batch_hash: %v, platform: %v", err, batchIndex, batchHash, platform)
}
if err := db.Model(&existing).Update("status", int16(status)).Error; err != nil {
if err := db.Model(&existing).Where("batch_index = ? AND batch_hash = ? AND platform = ? AND deleted_at IS NULL",
batchIndex, batchHash, int16(platform)).Update("status", int16(status)).Error; err != nil {
return fmt.Errorf("BlobUpload.InsertOrUpdateBlobUpload update error: %w, batch index: %v, batch_hash: %v, platform: %v", err, batchIndex, batchHash, platform)
}

View File

@@ -1,11 +1,13 @@
package utils
import (
"encoding/binary"
"fmt"
"time"
"github.com/scroll-tech/da-codec/encoding"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/crypto"
)
// ChunkMetrics indicates the metrics for proposing a chunk.
@@ -60,15 +62,18 @@ type BatchMetrics struct {
L1CommitBlobSize uint64
L1CommitUncompressedBatchBytesSize uint64
ValidiumMode bool // default false: rollup mode
// timing metrics
EstimateBlobSizeTime time.Duration
}
// CalculateBatchMetrics calculates batch metrics.
func CalculateBatchMetrics(batch *encoding.Batch, codecVersion encoding.CodecVersion) (*BatchMetrics, error) {
func CalculateBatchMetrics(batch *encoding.Batch, codecVersion encoding.CodecVersion, validiumMode bool) (*BatchMetrics, error) {
metrics := &BatchMetrics{
NumChunks: uint64(len(batch.Chunks)),
FirstBlockTimestamp: batch.Chunks[0].Blocks[0].Header.Time,
ValidiumMode: validiumMode,
}
codec, err := encoding.CodecFromVersion(codecVersion)
@@ -119,8 +124,59 @@ type BatchMetadata struct {
ChallengeDigest common.Hash
}
// encodeBatchHeaderValidium encodes batch header for validium mode and returns both encoded bytes and hash
func encodeBatchHeaderValidium(b *encoding.Batch, codecVersion encoding.CodecVersion) ([]byte, common.Hash, error) {
if b == nil {
return nil, common.Hash{}, fmt.Errorf("batch is nil, version: %v, index: %v", codecVersion, b.Index)
}
if len(b.Blocks) == 0 {
return nil, common.Hash{}, fmt.Errorf("batch contains no blocks, version: %v, index: %v", codecVersion, b.Index)
}
// For validium mode, use the last block hash as commitment to the off-chain data
// TODO: This is a temporary solution, we might use a larger commitment in the future
lastBlock := b.Blocks[len(b.Blocks)-1]
commitment := lastBlock.Header.Hash()
// Batch header field sizes
const (
versionSize = 1
indexSize = 8
parentHashSize = 32
stateRootSize = 32
withdrawRootSize = 32
commitmentSize = 32 // TODO: 32 bytes for now, might use larger commitment in the future
// Total size of validium batch header
validiumBatchHeaderSize = versionSize + indexSize + parentHashSize + stateRootSize + withdrawRootSize + commitmentSize
)
batchBytes := make([]byte, validiumBatchHeaderSize)
// Define offsets for each field
var (
versionOffset = 0
indexOffset = versionOffset + versionSize
parentHashOffset = indexOffset + indexSize
stateRootOffset = parentHashOffset + parentHashSize
withdrawRootOffset = stateRootOffset + stateRootSize
commitmentOffset = withdrawRootOffset + withdrawRootSize
)
batchBytes[versionOffset] = uint8(codecVersion) // version
binary.BigEndian.PutUint64(batchBytes[indexOffset:indexOffset+indexSize], b.Index) // batch index
copy(batchBytes[parentHashOffset:parentHashOffset+parentHashSize], b.ParentBatchHash[0:parentHashSize]) // parentBatchHash
copy(batchBytes[stateRootOffset:stateRootOffset+stateRootSize], b.StateRoot().Bytes()[0:stateRootSize]) // postStateRoot
copy(batchBytes[withdrawRootOffset:withdrawRootOffset+withdrawRootSize], b.WithdrawRoot().Bytes()[0:withdrawRootSize]) // postWithdrawRoot
copy(batchBytes[commitmentOffset:commitmentOffset+commitmentSize], commitment[0:commitmentSize]) // data commitment
hash := crypto.Keccak256Hash(batchBytes)
return batchBytes, hash, nil
}
// GetBatchMetadata retrieves the metadata of a batch.
func GetBatchMetadata(batch *encoding.Batch, codecVersion encoding.CodecVersion) (*BatchMetadata, error) {
func GetBatchMetadata(batch *encoding.Batch, codecVersion encoding.CodecVersion, validiumMode bool) (*BatchMetadata, error) {
codec, err := encoding.CodecFromVersion(codecVersion)
if err != nil {
return nil, fmt.Errorf("failed to get codec from version: %v, err: %w", codecVersion, err)
@@ -139,9 +195,17 @@ func GetBatchMetadata(batch *encoding.Batch, codecVersion encoding.CodecVersion)
ChallengeDigest: daBatch.ChallengeDigest(),
}
// If this function is used in Validium, we encode the batch header differently.
if validiumMode {
batchMeta.BatchBytes, batchMeta.BatchHash, err = encodeBatchHeaderValidium(batch, codecVersion)
if err != nil {
return nil, fmt.Errorf("failed to encode batch header for validium, version: %v, index: %v, err: %w", codecVersion, batch.Index, err)
}
}
batchMeta.BatchBlobDataProof, err = daBatch.BlobDataProofForPointEvaluation()
if err != nil {
return nil, fmt.Errorf("failed to get blob data proof, version: %v, err: %w", codecVersion, err)
return nil, fmt.Errorf("failed to get blob data proof, version: %v, index: %v, err: %w", codecVersion, batch.Index, err)
}
numChunks := len(batch.Chunks)

View File

@@ -128,7 +128,7 @@ func testCommitBatchAndFinalizeBundleCodecV7(t *testing.T) {
MaxChunksPerBatch: math.MaxInt32,
BatchTimeoutSec: 300,
MaxUncompressedBatchBytesSize: math.MaxUint64,
}, encoding.CodecV7, chainConfig, db, nil)
}, encoding.CodecV7, chainConfig, db, false /* rollup mode */, nil)
bup := watcher.NewBundleProposer(context.Background(), &config.BundleProposerConfig{
MaxBatchNumPerBundle: 2,

View File

@@ -3,7 +3,7 @@
RUST_MIN_STACK ?= 16777216
export RUST_MIN_STACK
CIRCUIT_STUFF = .work/chunk/app.vmexe .work/batch/app.vmexe .work/bundle/app.vmexe
CIRCUIT_STUFF = .work/euclid/chunk/app.vmexe .work/feynman/chunk/app.vmexe
ifeq (4.3,$(firstword $(sort $(MAKE_VERSION) 4.3)))
PLONKY3_VERSION=$(shell grep -m 1 "Plonky3.git" ../Cargo.lock | cut -d "#" -f2 | cut -c-7)
@@ -21,7 +21,7 @@ endif
ZKVM_COMMIT=$(shell echo ${ZKVM_VERSION} | cut -d " " -f2)
$(info ZKVM_COMMIT is ${ZKVM_COMMIT})
#PLONKY3_GPU_VERSION=$(shell ./print_plonky3gpu_version.sh | sed -n '2p')
$(info PLONKY3_VERSION is ${PLONKY3_VERSION})
GIT_REV=$(shell git rev-parse --short HEAD)
GO_TAG=$(shell grep "var tag = " ../common/version/version.go | cut -d "\"" -f2)
@@ -33,25 +33,15 @@ else
endif
ZK_VERSION=${ZKVM_COMMIT}-${PLONKY3_VERSION}
#ifeq (${PLONKY3_GPU_VERSION},)
# # use plonky3 with CPU
# ZK_VERSION=${ZKVM_COMMIT}-${PLONKY3_VERSION}
#else
# # use gpu
# ZK_VERSION=${ZKVM_COMMIT}-${PLONKY3_GPU_VERSION}
#endif
prover_gpu:
cd ../crates/gpu_override && cargo tree >/dev/null
$(eval PLONKY3_GPU_VERSION:=$(shell ./print_plonky3gpu_version.sh | sed -n '2p'))
$(eval ZK_VERSION:=${ZKVM_COMMIT}-${PLONKY3_GPU_VERSION})
@echo "Updated ZK_VERSION to ${ZK_VERSION} after prover_gpu"
E2E_HANDLE_SET = ../tests/prover-e2e/testset.json
DUMP_DIR = .work
prover: prover_gpu
cd ../crates/gpu_override && GO_TAG=${GO_TAG} GIT_REV=${GIT_REV} ZK_VERSION=${ZK_VERSION} cargo build --release -p prover
prover:
GO_TAG=${GO_TAG} GIT_REV=${GIT_REV} ZKVM_COMMIT=${ZKVM_COMMIT} $(MAKE) -C ../crates/gpu_override build
prover_cpu:
GO_TAG=${GO_TAG} GIT_REV=${GIT_REV} ZK_VERSION=${ZK_VERSION} cargo build --release -p prover
GO_TAG=${GO_TAG} GIT_REV=${GIT_REV} ZK_VERSION=${ZK_VERSION} cargo build --locked --release -p prover
tests_binary:
@@ -64,7 +54,15 @@ lint:
cargo fmt --all
$(CIRCUIT_STUFF):
bash .work/download-release.sh
@echo "Download stuff with download-release.sh, and put them into correct directory";
@exit 1;
test_run: $(CIRCUIT_STUFF)
GO_TAG=${GO_TAG} GIT_REV=${GIT_REV} ZK_VERSION=${ZK_VERSION} cargo run --release -p prover -- --config ./config.json
GO_TAG=${GO_TAG} GIT_REV=${GIT_REV} ZK_VERSION=${ZK_VERSION} cargo run --release -p prover -- --config ./config.json
test_e2e_run: $(CIRCUIT_STUFF) ${E2E_HANDLE_SET}
GO_TAG=${GO_TAG} GIT_REV=${GIT_REV} ZK_VERSION=${ZK_VERSION} cargo run --release -p prover -- --config ./config.json handle ${E2E_HANDLE_SET}
gen_verifier_stuff:
mkdir -p ${DUMP_DIR}
GO_TAG=${GO_TAG} GIT_REV=${GIT_REV} ZK_VERSION=${ZK_VERSION} cargo run --release -p prover -- --config ./config.json --forkname feynman dump ${DUMP_DIR}

View File

@@ -3,7 +3,7 @@
# Define version mapping
declare -A VERSION_MAP
VERSION_MAP["euclid"]="0.4.3"
VERSION_MAP["feynman"]="0.5.0rc0"
VERSION_MAP["feynman"]="0.5.0rc1"
# release version
if [ -z "${SCROLL_ZKVM_VERSION}" ]; then

View File

@@ -0,0 +1,14 @@
#!/bin/bash
# release version
SCROLL_ZKVM_STUFFDIR ?= `realpath .work`
SCROLL_ZKVM_VERSION ?= 0.5.0rc1
DIR_OUTPUT="releases/${SCROLL_ZKVM_VERSION}/verifier"
STUFF_FILES=('root-verifier-committed-exe' 'root-verifier-vm-config' 'verifier.bin' 'openVmVk.json')
for stuff_file in "${STUFF_FILES[@]}"; do
SRC="${SCROLL_ZKVM_STUFFDIR}/${stuff_file}"
TARGET="${DIR_OUTPUT}/${stuff_file}"
aws --profile default s3 cp $SRC s3://circuit-release/scroll-zkvm/$TARGET
done