mirror of
https://github.com/scroll-tech/scroll.git
synced 2026-01-11 15:08:09 -05:00
Compare commits
13 Commits
morty-feat
...
develop
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
7de388ef1a | ||
|
|
27dd62eac3 | ||
|
|
22479a7952 | ||
|
|
690bc01c41 | ||
|
|
e75d6c16a9 | ||
|
|
752e4e1117 | ||
|
|
2ecc42e2f5 | ||
|
|
de72e2dccb | ||
|
|
edb51236e2 | ||
|
|
15a23478d1 | ||
|
|
9100a0bd4a | ||
|
|
0ede0cd41f | ||
|
|
9dceae1ca2 |
967
Cargo.lock
generated
967
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
45
Cargo.toml
45
Cargo.toml
@@ -14,16 +14,16 @@ edition = "2021"
|
||||
homepage = "https://scroll.io"
|
||||
readme = "README.md"
|
||||
repository = "https://github.com/scroll-tech/scroll"
|
||||
version = "4.6.3"
|
||||
version = "4.7.1"
|
||||
|
||||
[workspace.dependencies]
|
||||
scroll-zkvm-prover = { git = "https://github.com/scroll-tech/zkvm-prover", rev = "360f364" }
|
||||
scroll-zkvm-verifier = { git = "https://github.com/scroll-tech/zkvm-prover", rev = "360f364" }
|
||||
scroll-zkvm-types = { git = "https://github.com/scroll-tech/zkvm-prover", rev = "360f364" }
|
||||
scroll-zkvm-prover = { git = "https://github.com/scroll-tech/zkvm-prover", tag = "v0.7.1" }
|
||||
scroll-zkvm-verifier = { git = "https://github.com/scroll-tech/zkvm-prover", tag = "v0.7.1" }
|
||||
scroll-zkvm-types = { git = "https://github.com/scroll-tech/zkvm-prover", tag = "v0.7.1" }
|
||||
|
||||
sbv-primitives = { git = "https://github.com/scroll-tech/stateless-block-verifier", branch = "master", features = ["scroll", "rkyv"] }
|
||||
sbv-utils = { git = "https://github.com/scroll-tech/stateless-block-verifier", branch = "master" }
|
||||
sbv-core = { git = "https://github.com/scroll-tech/stateless-block-verifier", branch = "master", features = ["scroll"] }
|
||||
sbv-primitives = { git = "https://github.com/scroll-tech/stateless-block-verifier", tag = "scroll-v91.2", features = ["scroll", "rkyv"] }
|
||||
sbv-utils = { git = "https://github.com/scroll-tech/stateless-block-verifier", tag = "scroll-v91.2" }
|
||||
sbv-core = { git = "https://github.com/scroll-tech/stateless-block-verifier", tag = "scroll-v91.2", features = ["scroll"] }
|
||||
|
||||
metrics = "0.23.0"
|
||||
metrics-util = "0.17"
|
||||
@@ -31,14 +31,14 @@ metrics-tracing-context = "0.16.0"
|
||||
|
||||
anyhow = "1.0"
|
||||
alloy = { version = "1", default-features = false }
|
||||
alloy-primitives = { version = "1.3", default-features = false, features = ["tiny-keccak"] }
|
||||
alloy-primitives = { version = "1.4.1", default-features = false, features = ["tiny-keccak"] }
|
||||
# also use this to trigger "serde" feature for primitives
|
||||
alloy-serde = { version = "1", default-features = false }
|
||||
|
||||
serde = { version = "1", default-features = false, features = ["derive"] }
|
||||
serde_json = { version = "1.0" }
|
||||
serde_derive = "1.0"
|
||||
serde_with = "3.11.0"
|
||||
serde_with = "3"
|
||||
itertools = "0.14"
|
||||
tiny-keccak = "2.0"
|
||||
tracing = "0.1"
|
||||
@@ -46,21 +46,20 @@ eyre = "0.6"
|
||||
once_cell = "1.20"
|
||||
base64 = "0.22"
|
||||
|
||||
[patch.crates-io]
|
||||
revm = { git = "https://github.com/scroll-tech/revm" }
|
||||
revm-bytecode = { git = "https://github.com/scroll-tech/revm" }
|
||||
revm-context = { git = "https://github.com/scroll-tech/revm" }
|
||||
revm-context-interface = { git = "https://github.com/scroll-tech/revm" }
|
||||
revm-database = { git = "https://github.com/scroll-tech/revm" }
|
||||
revm-database-interface = { git = "https://github.com/scroll-tech/revm" }
|
||||
revm-handler = { git = "https://github.com/scroll-tech/revm" }
|
||||
revm-inspector = { git = "https://github.com/scroll-tech/revm" }
|
||||
revm-interpreter = { git = "https://github.com/scroll-tech/revm" }
|
||||
revm-precompile = { git = "https://github.com/scroll-tech/revm" }
|
||||
revm-primitives = { git = "https://github.com/scroll-tech/revm" }
|
||||
revm-state = { git = "https://github.com/scroll-tech/revm" }
|
||||
|
||||
alloy-primitives = { git = "https://github.com/scroll-tech/alloy-core", branch = "feat/rkyv" }
|
||||
[patch.crates-io]
|
||||
revm = { git = "https://github.com/scroll-tech/revm", tag = "scroll-v91" }
|
||||
revm-bytecode = { git = "https://github.com/scroll-tech/revm", tag = "scroll-v91" }
|
||||
revm-context = { git = "https://github.com/scroll-tech/revm", tag = "scroll-v91" }
|
||||
revm-context-interface = { git = "https://github.com/scroll-tech/revm", tag = "scroll-v91" }
|
||||
revm-database = { git = "https://github.com/scroll-tech/revm", tag = "scroll-v91" }
|
||||
revm-database-interface = { git = "https://github.com/scroll-tech/revm", tag = "scroll-v91" }
|
||||
revm-handler = { git = "https://github.com/scroll-tech/revm", tag = "scroll-v91" }
|
||||
revm-inspector = { git = "https://github.com/scroll-tech/revm", tag = "scroll-v91" }
|
||||
revm-interpreter = { git = "https://github.com/scroll-tech/revm", tag = "scroll-v91" }
|
||||
revm-precompile = { git = "https://github.com/scroll-tech/revm", tag = "scroll-v91" }
|
||||
revm-primitives = { git = "https://github.com/scroll-tech/revm", tag = "scroll-v91" }
|
||||
revm-state = { git = "https://github.com/scroll-tech/revm", tag = "scroll-v91" }
|
||||
|
||||
[profile.maxperf]
|
||||
inherits = "release"
|
||||
|
||||
2
Makefile
2
Makefile
@@ -1,6 +1,6 @@
|
||||
.PHONY: fmt dev_docker build_test_docker run_test_docker clean update
|
||||
|
||||
L2GETH_TAG=scroll-v5.9.7
|
||||
L2GETH_TAG=scroll-v5.9.17
|
||||
|
||||
help: ## Display this help message
|
||||
@grep -h \
|
||||
|
||||
@@ -10,8 +10,8 @@ require (
|
||||
github.com/go-redis/redis/v8 v8.11.5
|
||||
github.com/pressly/goose/v3 v3.16.0
|
||||
github.com/prometheus/client_golang v1.19.0
|
||||
github.com/scroll-tech/da-codec v0.9.0
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20251113125950-906b730d541d
|
||||
github.com/scroll-tech/da-codec v0.10.0
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20251128092113-8629f088d78f
|
||||
github.com/stretchr/testify v1.10.0
|
||||
github.com/urfave/cli/v2 v2.25.7
|
||||
golang.org/x/sync v0.11.0
|
||||
@@ -21,7 +21,7 @@ require (
|
||||
// Hotfix for header hash incompatibility issue.
|
||||
// PR: https://github.com/scroll-tech/go-ethereum/pull/1133/
|
||||
// CAUTION: Requires careful handling. When upgrading go-ethereum, ensure this fix remains up-to-date in this branch.
|
||||
replace github.com/scroll-tech/go-ethereum => github.com/scroll-tech/go-ethereum v1.10.14-0.20251117071111-47c22325665e
|
||||
replace github.com/scroll-tech/go-ethereum => github.com/scroll-tech/go-ethereum v1.10.14-0.20251128092359-25d5bf6b817b
|
||||
|
||||
require (
|
||||
dario.cat/mergo v1.0.0 // indirect
|
||||
|
||||
@@ -309,10 +309,10 @@ github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik=
|
||||
github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
|
||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/scroll-tech/da-codec v0.9.0 h1:UvHNdSJuVhi/j9pOH7SXkRck52/zeE8T35Fx6qTPW70=
|
||||
github.com/scroll-tech/da-codec v0.9.0/go.mod h1:w+vwIvNiWdiNHVE9yIY2Klx6G4s+SQhEJVSmVG/IsEQ=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20251117071111-47c22325665e h1:7U1/JilCPIqJTYNgKzlfBEkRNORRwJ8+PCmdR8/XK6A=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20251117071111-47c22325665e/go.mod h1:6BVek7YliYh+YeHOSjguPw9GT9BhVBfThArxzVlpqdQ=
|
||||
github.com/scroll-tech/da-codec v0.10.0 h1:IPHxyTyXTWPV0Q+DZ08cod2fWkhUvrfysmj/VBpB+WU=
|
||||
github.com/scroll-tech/da-codec v0.10.0/go.mod h1:MBlIP4wCXPcUDZ/Ci2B7n/2IbVU1WBo9OTFTZ5ffE0U=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20251128092359-25d5bf6b817b h1:pMQKnroJoS/FeL1aOWkz7/u1iBHUP8PWjZstNuzoUGE=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20251128092359-25d5bf6b817b/go.mod h1:Aa/kD1XB+OV/7rRxMQrjcPCB4b0pKyLH0gsTrtuHi38=
|
||||
github.com/scroll-tech/zktrie v0.8.4 h1:UagmnZ4Z3ITCk+aUq9NQZJNAwnWl4gSxsLb2Nl7IgRE=
|
||||
github.com/scroll-tech/zktrie v0.8.4/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
|
||||
github.com/segmentio/asm v1.2.0 h1:9BQrFxC+YOHJlTlHGkTrFWf59nbL3XnCoFLTwDCI7ys=
|
||||
|
||||
@@ -361,7 +361,6 @@ func getTxHistoryInfoFromBridgeBatchDepositMessage(message *orm.BridgeBatchDepos
|
||||
func (h *HistoryLogic) getCachedTxsInfo(ctx context.Context, cacheKey string, pageNum, pageSize uint64) ([]*types.TxHistoryInfo, uint64, bool, error) {
|
||||
start := int64((pageNum - 1) * pageSize)
|
||||
end := start + int64(pageSize) - 1
|
||||
|
||||
total, err := h.redis.ZCard(ctx, cacheKey).Result()
|
||||
if err != nil {
|
||||
log.Error("failed to get zcard result", "error", err)
|
||||
@@ -372,6 +371,10 @@ func (h *HistoryLogic) getCachedTxsInfo(ctx context.Context, cacheKey string, pa
|
||||
return nil, 0, false, nil
|
||||
}
|
||||
|
||||
if start >= total {
|
||||
return nil, 0, false, nil
|
||||
}
|
||||
|
||||
values, err := h.redis.ZRevRange(ctx, cacheKey, start, end).Result()
|
||||
if err != nil {
|
||||
log.Error("failed to get zrange result", "error", err)
|
||||
@@ -450,5 +453,6 @@ func (h *HistoryLogic) processAndCacheTxHistoryInfo(ctx context.Context, cacheKe
|
||||
log.Error("cache miss after write, expect hit", "cached key", cacheKey, "page", page, "page size", pageSize, "error", err)
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
return pagedTxs, total, nil
|
||||
}
|
||||
|
||||
@@ -157,7 +157,7 @@ func (c *CrossMessage) GetL2UnclaimedWithdrawalsByAddress(ctx context.Context, s
|
||||
db = db.Where("tx_status in (?)", []types.TxStatusType{types.TxStatusTypeSent, types.TxStatusTypeFailedRelayed, types.TxStatusTypeRelayTxReverted})
|
||||
db = db.Where("sender = ?", sender)
|
||||
db = db.Order("block_timestamp desc")
|
||||
db = db.Limit(500)
|
||||
db = db.Limit(10000)
|
||||
if err := db.Find(&messages).Error; err != nil {
|
||||
return nil, fmt.Errorf("failed to get L2 claimable withdrawal messages by sender address, sender: %v, error: %w", sender, err)
|
||||
}
|
||||
|
||||
@@ -15,7 +15,7 @@ require (
|
||||
github.com/modern-go/reflect2 v1.0.2
|
||||
github.com/orcaman/concurrent-map v1.0.0
|
||||
github.com/prometheus/client_golang v1.19.0
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20251117065849-b5c3dd1aa975
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20251128092113-8629f088d78f
|
||||
github.com/stretchr/testify v1.10.0
|
||||
github.com/testcontainers/testcontainers-go v0.30.0
|
||||
github.com/testcontainers/testcontainers-go/modules/compose v0.30.0
|
||||
@@ -184,7 +184,7 @@ require (
|
||||
github.com/rjeczalik/notify v0.9.1 // indirect
|
||||
github.com/rs/cors v1.7.0 // indirect
|
||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||
github.com/scroll-tech/da-codec v0.9.0 // indirect
|
||||
github.com/scroll-tech/da-codec v0.10.0 // indirect
|
||||
github.com/scroll-tech/zktrie v0.8.4 // indirect
|
||||
github.com/secure-systems-lab/go-securesystemslib v0.4.0 // indirect
|
||||
github.com/serialx/hashring v0.0.0-20190422032157-8b2912629002 // indirect
|
||||
|
||||
@@ -636,10 +636,10 @@ github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik=
|
||||
github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
|
||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/scroll-tech/da-codec v0.9.0 h1:UvHNdSJuVhi/j9pOH7SXkRck52/zeE8T35Fx6qTPW70=
|
||||
github.com/scroll-tech/da-codec v0.9.0/go.mod h1:w+vwIvNiWdiNHVE9yIY2Klx6G4s+SQhEJVSmVG/IsEQ=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20251117065849-b5c3dd1aa975 h1:KluZffkRRJ4K9UyvH/r2g8Lp16/NSK8j26MR33hHmoQ=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20251117065849-b5c3dd1aa975/go.mod h1:6BVek7YliYh+YeHOSjguPw9GT9BhVBfThArxzVlpqdQ=
|
||||
github.com/scroll-tech/da-codec v0.10.0 h1:IPHxyTyXTWPV0Q+DZ08cod2fWkhUvrfysmj/VBpB+WU=
|
||||
github.com/scroll-tech/da-codec v0.10.0/go.mod h1:MBlIP4wCXPcUDZ/Ci2B7n/2IbVU1WBo9OTFTZ5ffE0U=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20251128092113-8629f088d78f h1:j6SjP98MoWFFX9TwB1/nFYEkayqHQsrtE66Ll2C+oT0=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20251128092113-8629f088d78f/go.mod h1:Aa/kD1XB+OV/7rRxMQrjcPCB4b0pKyLH0gsTrtuHi38=
|
||||
github.com/scroll-tech/zktrie v0.8.4 h1:UagmnZ4Z3ITCk+aUq9NQZJNAwnWl4gSxsLb2Nl7IgRE=
|
||||
github.com/scroll-tech/zktrie v0.8.4/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
|
||||
github.com/secure-systems-lab/go-securesystemslib v0.4.0 h1:b23VGrQhTA8cN2CbBw7/FulN9fTtqYUdS5+Oxzt+DUE=
|
||||
|
||||
@@ -34,7 +34,7 @@ services:
|
||||
|
||||
# Sets up the genesis configuration for the go-ethereum client from a JSON file.
|
||||
geth-genesis:
|
||||
image: "ethereum/client-go:v1.13.14"
|
||||
image: "ethereum/client-go:v1.14.0"
|
||||
command: --datadir=/data/execution init /data/execution/genesis.json
|
||||
volumes:
|
||||
- data:/data
|
||||
@@ -80,7 +80,7 @@ services:
|
||||
# Runs the go-ethereum execution client with the specified, unlocked account and necessary
|
||||
# APIs to allow for proof-of-stake consensus via Prysm.
|
||||
geth:
|
||||
image: "ethereum/client-go:v1.13.14"
|
||||
image: "ethereum/client-go:v1.14.0"
|
||||
command:
|
||||
- --http
|
||||
- --http.api=eth,net,web3
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
FROM ethereum/client-go:v1.13.14
|
||||
FROM ethereum/client-go:v1.14.0
|
||||
|
||||
COPY password /l1geth/
|
||||
COPY genesis.json /l1geth/
|
||||
|
||||
@@ -167,13 +167,13 @@ func (t *TestcontainerApps) GetPoSL1EndPoint() (string, error) {
|
||||
return contrainer.PortEndpoint(context.Background(), "8545/tcp", "http")
|
||||
}
|
||||
|
||||
// GetPoSL1Client returns a ethclient by dialing running PoS L1 client
|
||||
func (t *TestcontainerApps) GetPoSL1Client() (*ethclient.Client, error) {
|
||||
// GetPoSL1Client returns a raw rpc client by dialing the L1 node
|
||||
func (t *TestcontainerApps) GetPoSL1Client() (*rpc.Client, error) {
|
||||
endpoint, err := t.GetPoSL1EndPoint()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ethclient.Dial(endpoint)
|
||||
return rpc.Dial(endpoint)
|
||||
}
|
||||
|
||||
// GetDBEndPoint returns the endpoint of the running postgres container
|
||||
@@ -221,7 +221,6 @@ func (t *TestcontainerApps) GetGormDBClient() (*gorm.DB, error) {
|
||||
|
||||
// GetL2GethClient returns a ethclient by dialing running L2Geth
|
||||
func (t *TestcontainerApps) GetL2GethClient() (*ethclient.Client, error) {
|
||||
|
||||
rpcCli, err := t.GetL2Client()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
@@ -3,7 +3,6 @@ package testcontainers
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"gorm.io/gorm"
|
||||
)
|
||||
@@ -14,7 +13,6 @@ func TestNewTestcontainerApps(t *testing.T) {
|
||||
err error
|
||||
endpoint string
|
||||
gormDBclient *gorm.DB
|
||||
ethclient *ethclient.Client
|
||||
)
|
||||
|
||||
testApps := NewTestcontainerApps()
|
||||
@@ -32,17 +30,17 @@ func TestNewTestcontainerApps(t *testing.T) {
|
||||
endpoint, err = testApps.GetL2GethEndPoint()
|
||||
assert.NoError(t, err)
|
||||
assert.NotEmpty(t, endpoint)
|
||||
ethclient, err = testApps.GetL2GethClient()
|
||||
l2RawClient, err := testApps.GetL2Client()
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, ethclient)
|
||||
assert.NotNil(t, l2RawClient)
|
||||
|
||||
assert.NoError(t, testApps.StartPoSL1Container())
|
||||
endpoint, err = testApps.GetPoSL1EndPoint()
|
||||
assert.NoError(t, err)
|
||||
assert.NotEmpty(t, endpoint)
|
||||
ethclient, err = testApps.GetPoSL1Client()
|
||||
l1RawClient, err := testApps.GetPoSL1Client()
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, ethclient)
|
||||
assert.NotNil(t, l1RawClient)
|
||||
|
||||
assert.NoError(t, testApps.StartWeb3SignerContainer(1))
|
||||
endpoint, err = testApps.GetWeb3SignerEndpoint()
|
||||
|
||||
@@ -17,7 +17,6 @@ var (
|
||||
&MetricsPort,
|
||||
&ServicePortFlag,
|
||||
&Genesis,
|
||||
&RevertFlag,
|
||||
}
|
||||
// RollupRelayerFlags contains flags only used in rollup-relayer
|
||||
RollupRelayerFlags = []cli.Flag{
|
||||
@@ -27,10 +26,6 @@ var (
|
||||
ProposerToolFlags = []cli.Flag{
|
||||
&StartL2BlockFlag,
|
||||
}
|
||||
RevertFlag = cli.BoolFlag{
|
||||
Name: "revert",
|
||||
Usage: "To revert the batch",
|
||||
}
|
||||
// ConfigFileFlag load json type config file.
|
||||
ConfigFileFlag = cli.StringFlag{
|
||||
Name: "config",
|
||||
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
"runtime/debug"
|
||||
)
|
||||
|
||||
var tag = "v4.6.3"
|
||||
var tag = "v4.7.10"
|
||||
|
||||
var commit = func() string {
|
||||
if info, ok := debug.ReadBuildInfo(); ok {
|
||||
|
||||
@@ -37,7 +37,12 @@ coordinator_tool:
|
||||
localsetup: coordinator_api ## Local setup: build coordinator_api, copy config, and setup releases
|
||||
mkdir -p build/bin/conf
|
||||
@echo "Copying configuration files..."
|
||||
cp -fL $(CURDIR)/conf/config.json $(CURDIR)/build/bin/conf/config.template.json
|
||||
@if [ -f "$(PWD)/conf/config.template.json" ]; then \
|
||||
SRC="$(PWD)/conf/config.template.json"; \
|
||||
else \
|
||||
SRC="$(CURDIR)/conf/config.json"; \
|
||||
fi; \
|
||||
cp -fL "$$SRC" "$(CURDIR)/build/bin/conf/config.template.json"
|
||||
@echo "Setting up releases..."
|
||||
cd $(CURDIR)/build && bash setup_releases.sh
|
||||
|
||||
|
||||
@@ -6,6 +6,9 @@ if [ -z "${SCROLL_ZKVM_VERSION}" ]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# default fork name from env or "galileo"
|
||||
SCROLL_FORK_NAME="${SCROLL_FORK_NAME:-galileov2}"
|
||||
|
||||
# set ASSET_DIR by reading from config.json
|
||||
CONFIG_FILE="bin/conf/config.template.json"
|
||||
if [ ! -f "$CONFIG_FILE" ]; then
|
||||
@@ -28,7 +31,13 @@ for ((i=0; i<$VERIFIER_COUNT; i++)); do
|
||||
# extract assets_path for current verifier
|
||||
ASSETS_PATH=$(jq -r ".prover_manager.verifier.verifiers[$i].assets_path" "$CONFIG_FILE")
|
||||
FORK_NAME=$(jq -r ".prover_manager.verifier.verifiers[$i].fork_name" "$CONFIG_FILE")
|
||||
|
||||
|
||||
# skip if this verifier's fork doesn't match the target fork
|
||||
if [ "$FORK_NAME" != "$SCROLL_FORK_NAME" ]; then
|
||||
echo "Expect $SCROLL_FORK_NAME, skip current fork ($FORK_NAME)"
|
||||
continue
|
||||
fi
|
||||
|
||||
if [ "$ASSETS_PATH" = "null" ]; then
|
||||
echo "Warning: Could not find assets_path for verifier $i, skipping..."
|
||||
continue
|
||||
|
||||
@@ -10,13 +10,18 @@
|
||||
"min_prover_version": "v4.4.45",
|
||||
"verifiers": [
|
||||
{
|
||||
"assets_path": "assets",
|
||||
"features": "legacy_witness:openvm_13",
|
||||
"assets_path": "assets_feynman",
|
||||
"fork_name": "feynman"
|
||||
},
|
||||
{
|
||||
"assets_path": "assets",
|
||||
"fork_name": "galileo"
|
||||
}
|
||||
},
|
||||
{
|
||||
"assets_path": "assets_v2",
|
||||
"fork_name": "galileoV2"
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
|
||||
@@ -9,8 +9,8 @@ require (
|
||||
github.com/google/uuid v1.6.0
|
||||
github.com/mitchellh/mapstructure v1.5.0
|
||||
github.com/prometheus/client_golang v1.19.0
|
||||
github.com/scroll-tech/da-codec v0.9.0
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20251117065849-b5c3dd1aa975
|
||||
github.com/scroll-tech/da-codec v0.10.0
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20251128092113-8629f088d78f
|
||||
github.com/shopspring/decimal v1.3.1
|
||||
github.com/stretchr/testify v1.10.0
|
||||
github.com/urfave/cli/v2 v2.25.7
|
||||
|
||||
@@ -253,10 +253,10 @@ github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik=
|
||||
github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
|
||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/scroll-tech/da-codec v0.9.0 h1:UvHNdSJuVhi/j9pOH7SXkRck52/zeE8T35Fx6qTPW70=
|
||||
github.com/scroll-tech/da-codec v0.9.0/go.mod h1:w+vwIvNiWdiNHVE9yIY2Klx6G4s+SQhEJVSmVG/IsEQ=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20251117065849-b5c3dd1aa975 h1:KluZffkRRJ4K9UyvH/r2g8Lp16/NSK8j26MR33hHmoQ=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20251117065849-b5c3dd1aa975/go.mod h1:6BVek7YliYh+YeHOSjguPw9GT9BhVBfThArxzVlpqdQ=
|
||||
github.com/scroll-tech/da-codec v0.10.0 h1:IPHxyTyXTWPV0Q+DZ08cod2fWkhUvrfysmj/VBpB+WU=
|
||||
github.com/scroll-tech/da-codec v0.10.0/go.mod h1:MBlIP4wCXPcUDZ/Ci2B7n/2IbVU1WBo9OTFTZ5ffE0U=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20251128092113-8629f088d78f h1:j6SjP98MoWFFX9TwB1/nFYEkayqHQsrtE66Ll2C+oT0=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20251128092113-8629f088d78f/go.mod h1:Aa/kD1XB+OV/7rRxMQrjcPCB4b0pKyLH0gsTrtuHi38=
|
||||
github.com/scroll-tech/zktrie v0.8.4 h1:UagmnZ4Z3ITCk+aUq9NQZJNAwnWl4gSxsLb2Nl7IgRE=
|
||||
github.com/scroll-tech/zktrie v0.8.4/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
|
||||
github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKlUeu/erjjvaPEYiI=
|
||||
|
||||
@@ -69,12 +69,12 @@ type AssetConfig struct {
|
||||
ForkName string `json:"fork_name"`
|
||||
Vkfile string `json:"vk_file,omitempty"`
|
||||
MinProverVersion string `json:"min_prover_version,omitempty"`
|
||||
Features string `json:"features,omitempty"`
|
||||
}
|
||||
|
||||
// VerifierConfig load zk verifier config.
|
||||
type VerifierConfig struct {
|
||||
MinProverVersion string `json:"min_prover_version"`
|
||||
Features string `json:"features,omitempty"`
|
||||
Verifiers []AssetConfig `json:"verifiers"`
|
||||
}
|
||||
|
||||
|
||||
@@ -141,13 +141,6 @@ func DumpVk(forkName, filePath string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Set dynamic feature flags that control libzkp runtime behavior
|
||||
func SetDynamicFeature(feats string) {
|
||||
cFeats := goToCString(feats)
|
||||
defer freeCString(cFeats)
|
||||
C.set_dynamic_feature(cFeats)
|
||||
}
|
||||
|
||||
// UnivTaskCompatibilityFix calls the universal task compatibility fix function
|
||||
func UniversalTaskCompatibilityFix(taskJSON string) (string, error) {
|
||||
cTaskJSON := goToCString(taskJSON)
|
||||
|
||||
@@ -56,8 +56,6 @@ char* gen_wrapped_proof(char* proof_json, char* metadata, char* vk, size_t vk_le
|
||||
// Release memory allocated for a string returned by gen_wrapped_proof
|
||||
void release_string(char* string_ptr);
|
||||
|
||||
void set_dynamic_feature(const char* feats);
|
||||
|
||||
// Universal task compatibility fix function
|
||||
char* univ_task_compatibility_fix(char* task_json);
|
||||
|
||||
|
||||
@@ -311,9 +311,12 @@ func (bp *BatchProverTask) getBatchTaskDetail(dbBatch *orm.Batch, chunkProofs []
|
||||
if !bp.validiumMode() {
|
||||
dbBatchCodecVersion := encoding.CodecVersion(dbBatch.CodecVersion)
|
||||
switch dbBatchCodecVersion {
|
||||
case encoding.CodecV3, encoding.CodecV4, encoding.CodecV6, encoding.CodecV7, encoding.CodecV8, encoding.CodecV9:
|
||||
default:
|
||||
case 0:
|
||||
log.Warn("the codec version is 0, if it is not under integration test we have encountered an error here")
|
||||
return taskDetail, nil
|
||||
case encoding.CodecV3, encoding.CodecV4, encoding.CodecV6, encoding.CodecV7, encoding.CodecV8, encoding.CodecV9, encoding.CodecV10:
|
||||
default:
|
||||
return nil, fmt.Errorf("Unsupported codec version <%d>", dbBatchCodecVersion)
|
||||
}
|
||||
|
||||
codec, err := encoding.CodecFromVersion(encoding.CodecVersion(dbBatch.CodecVersion))
|
||||
@@ -335,7 +338,7 @@ func (bp *BatchProverTask) getBatchTaskDetail(dbBatch *orm.Batch, chunkProofs []
|
||||
taskDetail.KzgProof = &message.Byte48{Big: hexutil.Big(*new(big.Int).SetBytes(dbBatch.BlobDataProof[112:160]))}
|
||||
taskDetail.KzgCommitment = &message.Byte48{Big: hexutil.Big(*new(big.Int).SetBytes(dbBatch.BlobDataProof[64:112]))}
|
||||
} else {
|
||||
log.Debug("Apply validium mode for batch proving task")
|
||||
log.Info("Apply validium mode for batch proving task")
|
||||
codec := cutils.FromVersion(version)
|
||||
batchHeader, decodeErr := codec.DABatchForTaskFromBytes(dbBatch.BatchHeader)
|
||||
if decodeErr != nil {
|
||||
|
||||
@@ -155,7 +155,7 @@ func NewSubmitProofReceiverLogic(cfg *config.ProverManager, chainCfg *params.Cha
|
||||
// HandleZkProof handle a ZkProof submitted from a prover.
|
||||
// For now only proving/verifying error will lead to setting status as skipped.
|
||||
// db/unmarshal errors will not because they are errors on the business logic side.
|
||||
func (m *ProofReceiverLogic) HandleZkProof(ctx *gin.Context, proofParameter coordinatorType.SubmitProofParameter) error {
|
||||
func (m *ProofReceiverLogic) HandleZkProof(ctx *gin.Context, proofParameter coordinatorType.SubmitProofParameter) (rerr error) {
|
||||
m.proofReceivedTotal.Inc()
|
||||
pk := ctx.GetString(coordinatorType.PublicKey)
|
||||
if len(pk) == 0 {
|
||||
@@ -172,6 +172,18 @@ func (m *ProofReceiverLogic) HandleZkProof(ctx *gin.Context, proofParameter coor
|
||||
return ErrValidatorFailureProverTaskEmpty
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if rerr != nil && types.ProverProveStatus(proverTask.ProvingStatus) == types.ProverAssigned {
|
||||
// trigger a last-chance closing of current task if some routine had missed it
|
||||
log.Warn("last chance proof recover triggerred",
|
||||
"proofID", proofParameter.TaskID,
|
||||
"err", rerr,
|
||||
)
|
||||
m.proofRecover(ctx.Copy(), proverTask, types.ProverTaskFailureTypeUndefined, proofParameter)
|
||||
}
|
||||
|
||||
}()
|
||||
|
||||
proofTime := time.Since(proverTask.CreatedAt)
|
||||
proofTimeSec := uint64(proofTime.Seconds())
|
||||
|
||||
@@ -311,6 +323,20 @@ func (m *ProofReceiverLogic) validator(ctx context.Context, proverTask *orm.Prov
|
||||
}
|
||||
}()
|
||||
|
||||
// Internally we overide the timeout failure:
|
||||
// if prover task FailureType is SessionInfoFailureTimeout, the submit proof is timeout, but we still accept it
|
||||
if types.ProverProveStatus(proverTask.ProvingStatus) == types.ProverProofInvalid &&
|
||||
types.ProverTaskFailureType(proverTask.FailureType) == types.ProverTaskFailureTypeTimeout {
|
||||
m.validateFailureProverTaskTimeout.Inc()
|
||||
proverTask.ProvingStatus = int16(types.ProverAssigned)
|
||||
|
||||
proofTime := time.Since(proverTask.CreatedAt)
|
||||
proofTimeSec := uint64(proofTime.Seconds())
|
||||
log.Warn("proof submit proof have timeout", "hash", proofParameter.TaskID, "taskType", proverTask.TaskType,
|
||||
"proverName", proverTask.ProverName, "proverPublicKey", pk, "proofTime", proofTimeSec)
|
||||
|
||||
}
|
||||
|
||||
// Ensure this prover is eligible to participate in the prover task.
|
||||
if types.ProverProveStatus(proverTask.ProvingStatus) == types.ProverProofValid ||
|
||||
types.ProverProveStatus(proverTask.ProvingStatus) == types.ProverProofInvalid {
|
||||
@@ -328,9 +354,6 @@ func (m *ProofReceiverLogic) validator(ctx context.Context, proverTask *orm.Prov
|
||||
return ErrValidatorFailureProverTaskCannotSubmitTwice
|
||||
}
|
||||
|
||||
proofTime := time.Since(proverTask.CreatedAt)
|
||||
proofTimeSec := uint64(proofTime.Seconds())
|
||||
|
||||
if proofParameter.Status != int(coordinatorType.StatusOk) {
|
||||
// Temporarily replace "panic" with "pa-nic" to prevent triggering the alert based on logs.
|
||||
failureMsg := strings.Replace(proofParameter.FailureMsg, "panic", "pa-nic", -1)
|
||||
@@ -346,14 +369,6 @@ func (m *ProofReceiverLogic) validator(ctx context.Context, proverTask *orm.Prov
|
||||
return ErrValidatorFailureProofMsgStatusNotOk
|
||||
}
|
||||
|
||||
// if prover task FailureType is SessionInfoFailureTimeout, the submit proof is timeout, need skip it
|
||||
if types.ProverTaskFailureType(proverTask.FailureType) == types.ProverTaskFailureTypeTimeout {
|
||||
m.validateFailureProverTaskTimeout.Inc()
|
||||
log.Info("proof submit proof have timeout, skip this submit proof", "hash", proofParameter.TaskID, "taskType", proverTask.TaskType,
|
||||
"proverName", proverTask.ProverName, "proverPublicKey", pk, "proofTime", proofTimeSec)
|
||||
return ErrValidatorFailureProofTimeout
|
||||
}
|
||||
|
||||
// store the proof to prover task
|
||||
if updateTaskProofErr := m.updateProverTaskProof(ctx, proverTask, proofParameter); updateTaskProofErr != nil {
|
||||
log.Warn("update prover task proof failure", "hash", proofParameter.TaskID, "proverPublicKey", pk,
|
||||
@@ -368,6 +383,7 @@ func (m *ProofReceiverLogic) validator(ctx context.Context, proverTask *orm.Prov
|
||||
"taskType", proverTask.TaskType, "proverName", proverTask.ProverName, "proverPublicKey", pk)
|
||||
return ErrValidatorFailureTaskHaveVerifiedSuccess
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -384,7 +400,7 @@ func (m *ProofReceiverLogic) closeProofTask(ctx context.Context, proverTask *orm
|
||||
log.Info("proof close task update proof status", "hash", proverTask.TaskID, "proverPublicKey", proverTask.ProverPublicKey,
|
||||
"taskType", message.ProofType(proverTask.TaskType).String(), "status", types.ProvingTaskVerified.String())
|
||||
|
||||
if err := m.updateProofStatus(ctx, proverTask, proofParameter, types.ProverProofValid, types.ProverTaskFailureTypeUndefined, proofTimeSec); err != nil {
|
||||
if err := m.updateProofStatus(ctx, proverTask, proofParameter, types.ProverProofValid, types.ProverTaskFailureType(proverTask.FailureType), proofTimeSec); err != nil {
|
||||
log.Error("failed to updated proof status ProvingTaskVerified", "hash", proverTask.TaskID, "proverPublicKey", proverTask.ProverPublicKey, "error", err)
|
||||
return err
|
||||
}
|
||||
@@ -445,6 +461,9 @@ func (m *ProofReceiverLogic) updateProofStatus(ctx context.Context, proverTask *
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// sync status and failture type into proverTask
|
||||
proverTask.ProvingStatus = int16(status)
|
||||
proverTask.FailureType = int16(failureType)
|
||||
|
||||
if status == types.ProverProofValid && message.ProofType(proofParameter.TaskType) == message.ProofTypeChunk {
|
||||
if checkReadyErr := m.checkAreAllChunkProofsReady(ctx, proverTask.TaskID); checkReadyErr != nil {
|
||||
|
||||
@@ -29,6 +29,7 @@ type rustCircuitConfig struct {
|
||||
Version uint `json:"version"`
|
||||
ForkName string `json:"fork_name"`
|
||||
AssetsPath string `json:"assets_path"`
|
||||
Features string `json:"features,omitempty"`
|
||||
}
|
||||
|
||||
var validiumMode bool
|
||||
@@ -47,6 +48,7 @@ func newRustCircuitConfig(cfg config.AssetConfig) *rustCircuitConfig {
|
||||
Version: uint(ver),
|
||||
AssetsPath: cfg.AssetsPath,
|
||||
ForkName: cfg.ForkName,
|
||||
Features: cfg.Features,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -82,9 +84,6 @@ func NewVerifier(cfg *config.VerifierConfig, useValidiumMode bool) (*Verifier, e
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if cfg.Features != "" {
|
||||
libzkp.SetDynamicFeature(cfg.Features)
|
||||
}
|
||||
libzkp.InitVerifier(string(configBytes))
|
||||
|
||||
v := &Verifier{
|
||||
|
||||
@@ -31,6 +31,8 @@ func Version(hardForkName string, ValidiumMode bool) (uint8, error) {
|
||||
stfVersion = 8
|
||||
case "galileo":
|
||||
stfVersion = 9
|
||||
case "galileov2":
|
||||
stfVersion = 10
|
||||
default:
|
||||
return 0, errors.New("unknown fork name " + canonicalName)
|
||||
}
|
||||
|
||||
@@ -9,7 +9,7 @@ scroll-zkvm-types = { workspace = true, features = ["scroll"] }
|
||||
scroll-zkvm-verifier.workspace = true
|
||||
|
||||
alloy-primitives.workspace = true #depress the effect of "native-keccak"
|
||||
sbv-primitives = {workspace = true, features = ["scroll-compress-ratio", "scroll"]}
|
||||
sbv-primitives = {workspace = true, features = ["scroll-compress-info", "scroll"]}
|
||||
sbv-core = { workspace = true, features = ["scroll"] }
|
||||
base64.workspace = true
|
||||
serde.workspace = true
|
||||
|
||||
@@ -1,35 +1,57 @@
|
||||
pub mod proofs;
|
||||
pub mod tasks;
|
||||
pub use tasks::ProvingTaskExt;
|
||||
pub mod verifier;
|
||||
use verifier::HardForkName;
|
||||
pub use verifier::{TaskType, VerifierConfig};
|
||||
mod utils;
|
||||
|
||||
use sbv_primitives::B256;
|
||||
use scroll_zkvm_types::utils::vec_as_base64;
|
||||
use scroll_zkvm_types::{utils::vec_as_base64, version::Version};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_json::value::RawValue;
|
||||
use std::path::Path;
|
||||
use std::{collections::HashMap, path::Path, sync::OnceLock};
|
||||
use tasks::chunk_interpreter::{ChunkInterpreter, TryFromWithInterpreter};
|
||||
|
||||
/// global features: use legacy encoding for witness
|
||||
static mut LEGACY_WITNESS_ENCODING: bool = false;
|
||||
|
||||
pub(crate) fn witness_use_legacy_mode() -> bool {
|
||||
unsafe { LEGACY_WITNESS_ENCODING }
|
||||
pub(crate) fn witness_use_legacy_mode(fork_name: &str) -> eyre::Result<bool> {
|
||||
ADDITIONAL_FEATURES
|
||||
.get()
|
||||
.and_then(|features| features.get(fork_name))
|
||||
.map(|cfg| cfg.legacy_witness_encoding)
|
||||
.ok_or_else(|| {
|
||||
eyre::eyre!(
|
||||
"can not find features setting for unrecognized fork {}",
|
||||
fork_name
|
||||
)
|
||||
})
|
||||
}
|
||||
|
||||
pub fn set_dynamic_feature(feats: &str) {
|
||||
for feat_s in feats.split(':') {
|
||||
match feat_s.trim().to_lowercase().as_str() {
|
||||
"legacy_witness" => {
|
||||
tracing::info!("set witness encoding for legacy mode");
|
||||
unsafe {
|
||||
// the function is only called while initialize step
|
||||
LEGACY_WITNESS_ENCODING = true;
|
||||
#[derive(Debug, Default, Clone)]
|
||||
struct FeatureOptions {
|
||||
legacy_witness_encoding: bool,
|
||||
for_openvm_13_prover: bool,
|
||||
}
|
||||
|
||||
static ADDITIONAL_FEATURES: OnceLock<HashMap<HardForkName, FeatureOptions>> = OnceLock::new();
|
||||
|
||||
impl FeatureOptions {
|
||||
pub fn new(feats: &str) -> Self {
|
||||
let mut ret: Self = Default::default();
|
||||
|
||||
for feat_s in feats.split(':') {
|
||||
match feat_s.trim().to_lowercase().as_str() {
|
||||
"legacy_witness" => {
|
||||
tracing::info!("set witness encoding for legacy mode");
|
||||
ret.legacy_witness_encoding = true;
|
||||
}
|
||||
"openvm_13" => {
|
||||
tracing::info!("set prover should use openvm 13");
|
||||
ret.for_openvm_13_prover = true;
|
||||
}
|
||||
s => tracing::warn!("unrecognized dynamic feature: {s}"),
|
||||
}
|
||||
s => tracing::warn!("unrecognized dynamic feature: {s}"),
|
||||
}
|
||||
ret
|
||||
}
|
||||
}
|
||||
|
||||
@@ -112,35 +134,56 @@ pub fn gen_universal_task(
|
||||
let mut task = serde_json::from_str::<ChunkProvingTask>(task_json)?;
|
||||
// normailze fork name field in task
|
||||
task.fork_name = task.fork_name.to_lowercase();
|
||||
let version = Version::from(task.version);
|
||||
// always respect the fork_name_str (which has been normalized) being passed
|
||||
// if the fork_name wrapped in task is not match, consider it a malformed task
|
||||
if fork_name_str != task.fork_name.as_str() {
|
||||
eyre::bail!("fork name in chunk task not match the calling arg, expected {fork_name_str}, get {}", task.fork_name);
|
||||
}
|
||||
if fork_name_str != version.fork.as_str() {
|
||||
eyre::bail!(
|
||||
"given task version, expected fork={fork_name_str}, got={version_fork}",
|
||||
version_fork = version.fork.as_str()
|
||||
);
|
||||
}
|
||||
let (pi_hash, metadata, u_task) =
|
||||
utils::panic_catch(move || gen_universal_chunk_task(task, fork_name_str.into()))
|
||||
utils::panic_catch(move || gen_universal_chunk_task(task))
|
||||
.map_err(|e| eyre::eyre!("caught panic in chunk task{e}"))??;
|
||||
(pi_hash, AnyMetaData::Chunk(metadata), u_task)
|
||||
}
|
||||
x if x == TaskType::Batch as i32 => {
|
||||
let mut task = serde_json::from_str::<BatchProvingTask>(task_json)?;
|
||||
task.fork_name = task.fork_name.to_lowercase();
|
||||
let version = Version::from(task.version);
|
||||
if fork_name_str != task.fork_name.as_str() {
|
||||
eyre::bail!("fork name in batch task not match the calling arg, expected {fork_name_str}, get {}", task.fork_name);
|
||||
}
|
||||
if fork_name_str != version.fork.as_str() {
|
||||
eyre::bail!(
|
||||
"given task version, expected fork={fork_name_str}, got={version_fork}",
|
||||
version_fork = version.fork.as_str()
|
||||
);
|
||||
}
|
||||
let (pi_hash, metadata, u_task) =
|
||||
utils::panic_catch(move || gen_universal_batch_task(task, fork_name_str.into()))
|
||||
utils::panic_catch(move || gen_universal_batch_task(task))
|
||||
.map_err(|e| eyre::eyre!("caught panic in chunk task{e}"))??;
|
||||
(pi_hash, AnyMetaData::Batch(metadata), u_task)
|
||||
}
|
||||
x if x == TaskType::Bundle as i32 => {
|
||||
let mut task = serde_json::from_str::<BundleProvingTask>(task_json)?;
|
||||
task.fork_name = task.fork_name.to_lowercase();
|
||||
let version = Version::from(task.version);
|
||||
if fork_name_str != task.fork_name.as_str() {
|
||||
eyre::bail!("fork name in bundle task not match the calling arg, expected {fork_name_str}, get {}", task.fork_name);
|
||||
}
|
||||
if fork_name_str != version.fork.as_str() {
|
||||
eyre::bail!(
|
||||
"given task version, expected fork={fork_name_str}, got={version_fork}",
|
||||
version_fork = version.fork.as_str()
|
||||
);
|
||||
}
|
||||
let (pi_hash, metadata, u_task) =
|
||||
utils::panic_catch(move || gen_universal_bundle_task(task, fork_name_str.into()))
|
||||
utils::panic_catch(move || gen_universal_bundle_task(task))
|
||||
.map_err(|e| eyre::eyre!("caught panic in chunk task{e}"))??;
|
||||
(pi_hash, AnyMetaData::Bundle(metadata), u_task)
|
||||
}
|
||||
@@ -148,11 +191,26 @@ pub fn gen_universal_task(
|
||||
};
|
||||
|
||||
u_task.vk = Vec::from(expected_vk);
|
||||
let fork_name = u_task.fork_name.clone();
|
||||
let mut u_task_ext = ProvingTaskExt::new(u_task);
|
||||
|
||||
// set additional settings from global features
|
||||
if let Some(cfg) = ADDITIONAL_FEATURES
|
||||
.get()
|
||||
.and_then(|features| features.get(&fork_name))
|
||||
{
|
||||
u_task_ext.use_openvm_13 = cfg.for_openvm_13_prover;
|
||||
} else {
|
||||
tracing::warn!(
|
||||
"can not found features setting for unrecognized fork {}",
|
||||
fork_name
|
||||
);
|
||||
}
|
||||
|
||||
Ok((
|
||||
pi_hash,
|
||||
serde_json::to_string(&metadata)?,
|
||||
serde_json::to_string(&u_task)?,
|
||||
serde_json::to_string(&u_task_ext)?,
|
||||
))
|
||||
}
|
||||
|
||||
@@ -183,7 +241,26 @@ pub fn gen_wrapped_proof(proof_json: &str, metadata: &str, vk: &[u8]) -> eyre::R
|
||||
/// init verifier
|
||||
pub fn verifier_init(config: &str) -> eyre::Result<()> {
|
||||
let cfg: VerifierConfig = serde_json::from_str(config)?;
|
||||
ADDITIONAL_FEATURES
|
||||
.set(HashMap::from_iter(cfg.circuits.iter().map(|config| {
|
||||
tracing::info!(
|
||||
"start setting features [{:?}] for fork {}",
|
||||
config.features,
|
||||
config.fork_name
|
||||
);
|
||||
(
|
||||
config.fork_name.to_lowercase(),
|
||||
config
|
||||
.features
|
||||
.as_ref()
|
||||
.map(|features| FeatureOptions::new(features.as_str()))
|
||||
.unwrap_or_default(),
|
||||
)
|
||||
})))
|
||||
.map_err(|c| eyre::eyre!("Fail to init additional features: {c:?}"))?;
|
||||
|
||||
verifier::init(cfg);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
||||
@@ -140,8 +140,6 @@ impl ProofMetadata for ChunkProofMetadata {
|
||||
pub struct BatchProofMetadata {
|
||||
/// The batch information describing the list of chunks.
|
||||
pub batch_info: BatchInfo,
|
||||
/// The [`scroll_zkvm_types::batch::BatchHeader`]'s digest.
|
||||
pub batch_hash: B256,
|
||||
}
|
||||
|
||||
impl ProofMetadata for BatchProofMetadata {
|
||||
@@ -217,7 +215,7 @@ impl<Metadata: ProofMetadata> PersistableProof for WrappedProof<Metadata> {
|
||||
mod tests {
|
||||
use base64::{prelude::BASE64_STANDARD, Engine};
|
||||
use sbv_primitives::B256;
|
||||
use scroll_zkvm_types::{bundle::BundleInfo, proof::EvmProof, public_inputs::ForkName};
|
||||
use scroll_zkvm_types::{bundle::BundleInfo, proof::EvmProof};
|
||||
|
||||
use super::*;
|
||||
|
||||
@@ -255,7 +253,7 @@ mod tests {
|
||||
msg_queue_hash: B256::repeat_byte(6),
|
||||
encryption_key: None,
|
||||
};
|
||||
let bundle_pi_hash = bundle_info.pi_hash(ForkName::EuclidV1);
|
||||
let bundle_pi_hash = bundle_info.pi_hash_euclidv1();
|
||||
BundleProofMetadata {
|
||||
bundle_info,
|
||||
bundle_pi_hash,
|
||||
|
||||
@@ -14,7 +14,7 @@ use crate::{
|
||||
utils::panic_catch,
|
||||
};
|
||||
use sbv_primitives::B256;
|
||||
use scroll_zkvm_types::public_inputs::{ForkName, MultiVersionPublicInputs, Version};
|
||||
use scroll_zkvm_types::public_inputs::{MultiVersionPublicInputs, Version};
|
||||
|
||||
fn encode_task_to_witness<T: serde::Serialize>(task: &T) -> eyre::Result<Vec<u8>> {
|
||||
let config = bincode::config::standard();
|
||||
@@ -35,17 +35,37 @@ fn check_aggregation_proofs<Metadata: MultiVersionPublicInputs>(
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[derive(serde::Deserialize, serde::Serialize)]
|
||||
pub struct ProvingTaskExt {
|
||||
#[serde(flatten)]
|
||||
task: ProvingTask,
|
||||
#[serde(default)]
|
||||
pub use_openvm_13: bool,
|
||||
}
|
||||
|
||||
impl From<ProvingTaskExt> for ProvingTask {
|
||||
fn from(wrap_t: ProvingTaskExt) -> Self {
|
||||
wrap_t.task
|
||||
}
|
||||
}
|
||||
|
||||
impl ProvingTaskExt {
|
||||
pub fn new(task: ProvingTask) -> Self {
|
||||
Self {
|
||||
task,
|
||||
use_openvm_13: false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Generate required staff for chunk proving
|
||||
pub fn gen_universal_chunk_task(
|
||||
task: ChunkProvingTask,
|
||||
fork_name: ForkName,
|
||||
) -> eyre::Result<(B256, ChunkProofMetadata, ProvingTask)> {
|
||||
let chunk_total_gas = task.stats().total_gas_used;
|
||||
let chunk_info = task.precheck_and_build_metadata()?;
|
||||
let proving_task = task.try_into()?;
|
||||
let expected_pi_hash = chunk_info.pi_hash_by_fork(fork_name);
|
||||
let (proving_task, chunk_info, chunk_pi_hash) = task.into_proving_task_with_precheck()?;
|
||||
Ok((
|
||||
expected_pi_hash,
|
||||
chunk_pi_hash,
|
||||
ChunkProofMetadata {
|
||||
chunk_info,
|
||||
chunk_total_gas,
|
||||
@@ -57,18 +77,11 @@ pub fn gen_universal_chunk_task(
|
||||
/// Generate required staff for batch proving
|
||||
pub fn gen_universal_batch_task(
|
||||
task: BatchProvingTask,
|
||||
fork_name: ForkName,
|
||||
) -> eyre::Result<(B256, BatchProofMetadata, ProvingTask)> {
|
||||
let batch_info = task.precheck_and_build_metadata()?;
|
||||
let proving_task = task.try_into()?;
|
||||
let expected_pi_hash = batch_info.pi_hash_by_fork(fork_name);
|
||||
|
||||
let (proving_task, batch_info, batch_pi_hash) = task.into_proving_task_with_precheck()?;
|
||||
Ok((
|
||||
expected_pi_hash,
|
||||
BatchProofMetadata {
|
||||
batch_info,
|
||||
batch_hash: expected_pi_hash,
|
||||
},
|
||||
batch_pi_hash,
|
||||
BatchProofMetadata { batch_info },
|
||||
proving_task,
|
||||
))
|
||||
}
|
||||
@@ -76,17 +89,13 @@ pub fn gen_universal_batch_task(
|
||||
/// Generate required staff for bundle proving
|
||||
pub fn gen_universal_bundle_task(
|
||||
task: BundleProvingTask,
|
||||
fork_name: ForkName,
|
||||
) -> eyre::Result<(B256, BundleProofMetadata, ProvingTask)> {
|
||||
let bundle_info = task.precheck_and_build_metadata()?;
|
||||
let proving_task = task.try_into()?;
|
||||
let expected_pi_hash = bundle_info.pi_hash_by_fork(fork_name);
|
||||
|
||||
let (proving_task, bundle_info, bundle_pi_hash) = task.into_proving_task_with_precheck()?;
|
||||
Ok((
|
||||
expected_pi_hash,
|
||||
bundle_pi_hash,
|
||||
BundleProofMetadata {
|
||||
bundle_info,
|
||||
bundle_pi_hash: expected_pi_hash,
|
||||
bundle_pi_hash,
|
||||
},
|
||||
proving_task,
|
||||
))
|
||||
|
||||
@@ -3,15 +3,15 @@ use eyre::Result;
|
||||
use sbv_primitives::{B256, U256};
|
||||
use scroll_zkvm_types::{
|
||||
batch::{
|
||||
build_point_eval_witness, BatchHeader, BatchHeaderV6, BatchHeaderV7, BatchHeaderV8,
|
||||
BatchHeaderValidium, BatchInfo, BatchWitness, Envelope, EnvelopeV6, EnvelopeV7, EnvelopeV8,
|
||||
LegacyBatchWitness, ReferenceHeader, N_BLOB_BYTES,
|
||||
build_point_eval_witness, BatchHeader, BatchHeaderV6, BatchHeaderV7, BatchHeaderValidium,
|
||||
BatchInfo, BatchWitness, Envelope, EnvelopeV6, EnvelopeV7, LegacyBatchWitness,
|
||||
ReferenceHeader, N_BLOB_BYTES,
|
||||
},
|
||||
chunk::ChunkInfo,
|
||||
public_inputs::{ForkName, Version},
|
||||
public_inputs::{ForkName, MultiVersionPublicInputs, Version},
|
||||
task::ProvingTask,
|
||||
utils::{to_rkyv_bytes, RancorError},
|
||||
version::{Domain, STFVersion},
|
||||
version::{Codec, Domain, STFVersion},
|
||||
};
|
||||
|
||||
use crate::proofs::ChunkProof;
|
||||
@@ -26,23 +26,32 @@ pub struct BatchHeaderValidiumWithHash {
|
||||
batch_hash: B256,
|
||||
}
|
||||
|
||||
/// Define variable batch header type, since BatchHeaderV6 can not
|
||||
/// be decoded as V7 we can always has correct deserialization
|
||||
/// Notice: V6 header MUST be put above V7 since untagged enum
|
||||
/// try to decode each defination in order
|
||||
/// Parse header types passed from golang side and adapt to the
|
||||
/// definition in zkvm-prover's types
|
||||
/// We distinguish the header type in golang side according to the STF
|
||||
/// version, i.e. v6, v7-v10 (current), and validium
|
||||
/// And adapt it to the corresponding batch header type used in zkvm-prover's witness
|
||||
/// definition, i.e. v6, v7 (current), and validium
|
||||
#[derive(Clone, serde::Deserialize, serde::Serialize)]
|
||||
#[serde(untagged)]
|
||||
#[allow(non_camel_case_types)]
|
||||
pub enum BatchHeaderV {
|
||||
/// Header for validium mode.
|
||||
Validium(BatchHeaderValidiumWithHash),
|
||||
/// Header for scroll's STF version v6.
|
||||
V6(BatchHeaderV6),
|
||||
V7_8(BatchHeaderV7),
|
||||
/// Header for scroll's STF versions v7 - v10.
|
||||
///
|
||||
/// Since the codec essentially is unchanged for the above STF versions, we do not define new
|
||||
/// variants, instead re-using the [`BatchHeaderV7`] variant.
|
||||
V7_to_V10(BatchHeaderV7),
|
||||
}
|
||||
|
||||
impl core::fmt::Display for BatchHeaderV {
|
||||
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
|
||||
match self {
|
||||
BatchHeaderV::V6(_) => write!(f, "V6"),
|
||||
BatchHeaderV::V7_8(_) => write!(f, "V7_8"),
|
||||
BatchHeaderV::V7_to_V10(_) => write!(f, "V7 - V10"),
|
||||
BatchHeaderV::Validium(_) => write!(f, "Validium"),
|
||||
}
|
||||
}
|
||||
@@ -52,33 +61,29 @@ impl BatchHeaderV {
|
||||
pub fn batch_hash(&self) -> B256 {
|
||||
match self {
|
||||
BatchHeaderV::V6(h) => h.batch_hash(),
|
||||
BatchHeaderV::V7_8(h) => h.batch_hash(),
|
||||
BatchHeaderV::V7_to_V10(h) => h.batch_hash(),
|
||||
BatchHeaderV::Validium(h) => h.header.batch_hash(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn must_v6_header(&self) -> &BatchHeaderV6 {
|
||||
pub fn to_zkvm_batch_header_v6(&self) -> &BatchHeaderV6 {
|
||||
match self {
|
||||
BatchHeaderV::V6(h) => h,
|
||||
_ => unreachable!("A header of {} is considered to be v6", self),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn must_v7_header(&self) -> &BatchHeaderV7 {
|
||||
pub fn to_zkvm_batch_header_v7_to_v10(&self) -> &BatchHeaderV7 {
|
||||
match self {
|
||||
BatchHeaderV::V7_8(h) => h,
|
||||
_ => unreachable!("A header of {} is considered to be v7", self),
|
||||
BatchHeaderV::V7_to_V10(h) => h,
|
||||
_ => unreachable!(
|
||||
"A header of {} is considered to be in [v7, v8, v9, v10]",
|
||||
self
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn must_v8_header(&self) -> &BatchHeaderV8 {
|
||||
match self {
|
||||
BatchHeaderV::V7_8(h) => h,
|
||||
_ => unreachable!("A header of {} is considered to be v8", self),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn must_validium_header(&self) -> &BatchHeaderValidium {
|
||||
pub fn to_zkvm_batch_header_validium(&self) -> &BatchHeaderValidium {
|
||||
match self {
|
||||
BatchHeaderV::Validium(h) => &h.header,
|
||||
_ => unreachable!("A header of {} is considered to be validium", self),
|
||||
@@ -110,35 +115,55 @@ pub struct BatchProvingTask {
|
||||
pub fork_name: String,
|
||||
}
|
||||
|
||||
impl TryFrom<BatchProvingTask> for ProvingTask {
|
||||
type Error = eyre::Error;
|
||||
|
||||
fn try_from(value: BatchProvingTask) -> Result<Self> {
|
||||
let witness = value.build_guest_input();
|
||||
let serialized_witness = if crate::witness_use_legacy_mode() {
|
||||
impl BatchProvingTask {
|
||||
pub fn into_proving_task_with_precheck(self) -> Result<(ProvingTask, BatchInfo, B256)> {
|
||||
let (witness, metadata, batch_pi_hash) = self.precheck()?;
|
||||
let serialized_witness = if crate::witness_use_legacy_mode(&self.fork_name)? {
|
||||
let legacy_witness = LegacyBatchWitness::from(witness);
|
||||
to_rkyv_bytes::<RancorError>(&legacy_witness)?.into_vec()
|
||||
} else {
|
||||
super::encode_task_to_witness(&witness)?
|
||||
};
|
||||
|
||||
Ok(ProvingTask {
|
||||
identifier: value.batch_header.batch_hash().to_string(),
|
||||
fork_name: value.fork_name,
|
||||
aggregated_proofs: value
|
||||
let proving_task = ProvingTask {
|
||||
identifier: self.batch_header.batch_hash().to_string(),
|
||||
fork_name: self.fork_name,
|
||||
aggregated_proofs: self
|
||||
.chunk_proofs
|
||||
.into_iter()
|
||||
.map(|w_proof| w_proof.proof.into_stark_proof().expect("expect root proof"))
|
||||
.collect(),
|
||||
serialized_witness: vec![serialized_witness],
|
||||
vk: Vec::new(),
|
||||
})
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
impl BatchProvingTask {
|
||||
fn build_guest_input(&self) -> BatchWitness {
|
||||
let version = Version::from(self.version);
|
||||
Ok((proving_task, metadata, batch_pi_hash))
|
||||
}
|
||||
|
||||
fn build_guest_input(&self, version: Version) -> BatchWitness {
|
||||
tracing::info!(
|
||||
"Handling batch task for input, version byte {}, Version data: {:?}",
|
||||
self.version,
|
||||
version
|
||||
);
|
||||
// sanity check for if result of header type parsing match to version
|
||||
match &self.batch_header {
|
||||
BatchHeaderV::Validium(_) => assert!(
|
||||
version.is_validium(),
|
||||
"version {:?} is not match with parsed header, get validium header but version is not validium", version,
|
||||
),
|
||||
BatchHeaderV::V6(_) => assert_eq!(version.fork, ForkName::EuclidV1,
|
||||
"hardfork mismatch for da-codec@v6 header: found={:?}, expected={:?}",
|
||||
version.fork,
|
||||
ForkName::EuclidV1,
|
||||
),
|
||||
BatchHeaderV::V7_to_V10(_) => assert!(
|
||||
matches!(version.fork, ForkName::EuclidV2 | ForkName::Feynman | ForkName::Galileo | ForkName::GalileoV2),
|
||||
"hardfork mismatch for da-codec@v7/8/9/10 header: found={}, expected={:?}",
|
||||
version.fork,
|
||||
[ForkName::EuclidV2, ForkName::Feynman, ForkName::Galileo, ForkName::GalileoV2],
|
||||
),
|
||||
}
|
||||
|
||||
let point_eval_witness = if !version.is_validium() {
|
||||
// sanity check: calculate point eval needed and compare with task input
|
||||
@@ -146,44 +171,21 @@ impl BatchProvingTask {
|
||||
let blob = point_eval::to_blob(&self.blob_bytes);
|
||||
let commitment = point_eval::blob_to_kzg_commitment(&blob);
|
||||
let versioned_hash = point_eval::get_versioned_hash(&commitment);
|
||||
let challenge_digest = match &self.batch_header {
|
||||
BatchHeaderV::V6(_) => {
|
||||
assert_eq!(
|
||||
version.fork,
|
||||
ForkName::EuclidV1,
|
||||
"hardfork mismatch for da-codec@v6 header: found={:?}, expected={:?}",
|
||||
version.fork,
|
||||
ForkName::EuclidV1,
|
||||
);
|
||||
EnvelopeV6::from_slice(self.blob_bytes.as_slice())
|
||||
|
||||
let padded_blob_bytes = {
|
||||
let mut padded_blob_bytes = self.blob_bytes.to_vec();
|
||||
padded_blob_bytes.resize(N_BLOB_BYTES, 0);
|
||||
padded_blob_bytes
|
||||
};
|
||||
let challenge_digest = match version.codec {
|
||||
Codec::V6 => {
|
||||
// notice v6 do not use padded blob bytes
|
||||
<EnvelopeV6 as Envelope>::from_slice(self.blob_bytes.as_slice())
|
||||
.challenge_digest(versioned_hash)
|
||||
}
|
||||
BatchHeaderV::V7_8(_) => {
|
||||
let padded_blob_bytes = {
|
||||
let mut padded_blob_bytes = self.blob_bytes.to_vec();
|
||||
padded_blob_bytes.resize(N_BLOB_BYTES, 0);
|
||||
padded_blob_bytes
|
||||
};
|
||||
|
||||
match version.fork {
|
||||
ForkName::EuclidV2 => {
|
||||
<EnvelopeV7 as Envelope>::from_slice(padded_blob_bytes.as_slice())
|
||||
.challenge_digest(versioned_hash)
|
||||
}
|
||||
ForkName::Feynman => {
|
||||
<EnvelopeV8 as Envelope>::from_slice(padded_blob_bytes.as_slice())
|
||||
.challenge_digest(versioned_hash)
|
||||
}
|
||||
fork_name => unreachable!(
|
||||
"hardfork mismatch for da-codec@v7 header: found={}, expected={:?}",
|
||||
fork_name,
|
||||
[ForkName::EuclidV2, ForkName::Feynman],
|
||||
),
|
||||
}
|
||||
}
|
||||
BatchHeaderV::Validium(_) => unreachable!("version!=validium"),
|
||||
Codec::V7 => <EnvelopeV7 as Envelope>::from_slice(padded_blob_bytes.as_slice())
|
||||
.challenge_digest(versioned_hash),
|
||||
};
|
||||
|
||||
let (proof, _) = point_eval::get_kzg_proof(&blob, challenge_digest);
|
||||
|
||||
(commitment.to_bytes(), proof.to_bytes(), challenge_digest)
|
||||
@@ -229,16 +231,25 @@ impl BatchProvingTask {
|
||||
|
||||
let reference_header = match (version.domain, version.stf_version) {
|
||||
(Domain::Scroll, STFVersion::V6) => {
|
||||
ReferenceHeader::V6(*self.batch_header.must_v6_header())
|
||||
}
|
||||
(Domain::Scroll, STFVersion::V7) => {
|
||||
ReferenceHeader::V7(*self.batch_header.must_v7_header())
|
||||
}
|
||||
(Domain::Scroll, STFVersion::V8) => {
|
||||
ReferenceHeader::V8(*self.batch_header.must_v8_header())
|
||||
ReferenceHeader::V6(*self.batch_header.to_zkvm_batch_header_v6())
|
||||
}
|
||||
// The da-codec for STF versions v7, v8, v9, v10 is identical. In zkvm-prover we do not
|
||||
// create additional variants to indicate the identical behaviour of codec. Instead we
|
||||
// add a separate variant for the STF version.
|
||||
//
|
||||
// We handle the different STF versions here however build the same batch header since
|
||||
// that type does not change. The batch header's version byte constructed in the
|
||||
// coordinator actually defines the STF version (v7, v8 or v9, v10) and we can derive
|
||||
// the hard-fork (e.g. feynman or galileo) and the codec from the version
|
||||
// byte.
|
||||
//
|
||||
// Refer [`scroll_zkvm_types::public_inputs::Version`].
|
||||
(
|
||||
Domain::Scroll,
|
||||
STFVersion::V7 | STFVersion::V8 | STFVersion::V9 | STFVersion::V10,
|
||||
) => ReferenceHeader::V7_V8_V9(*self.batch_header.to_zkvm_batch_header_v7_to_v10()),
|
||||
(Domain::Validium, STFVersion::V1) => {
|
||||
ReferenceHeader::Validium(*self.batch_header.must_validium_header())
|
||||
ReferenceHeader::Validium(*self.batch_header.to_zkvm_batch_header_validium())
|
||||
}
|
||||
(domain, stf_version) => {
|
||||
unreachable!("unsupported domain={domain:?},stf-version={stf_version:?}")
|
||||
@@ -273,18 +284,20 @@ impl BatchProvingTask {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn precheck_and_build_metadata(&self) -> Result<BatchInfo> {
|
||||
pub fn precheck(&self) -> Result<(BatchWitness, BatchInfo, B256)> {
|
||||
// for every aggregation task, there are two steps needed to build the metadata:
|
||||
// 1. generate data for metadata from the witness
|
||||
// 2. validate every adjacent proof pair
|
||||
let witness = self.build_guest_input();
|
||||
let version = Version::from(self.version);
|
||||
let witness = self.build_guest_input(version);
|
||||
let metadata = BatchInfo::from(&witness);
|
||||
super::check_aggregation_proofs(
|
||||
witness.chunk_infos.as_slice(),
|
||||
Version::from(self.version),
|
||||
)?;
|
||||
let pi_hash = metadata.pi_hash_by_version(version);
|
||||
|
||||
Ok(metadata)
|
||||
Ok((witness, metadata, pi_hash))
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
use eyre::Result;
|
||||
use sbv_primitives::B256;
|
||||
use scroll_zkvm_types::{
|
||||
bundle::{BundleInfo, BundleWitness, LegacyBundleWitness},
|
||||
public_inputs::Version,
|
||||
public_inputs::{MultiVersionPublicInputs, Version},
|
||||
task::ProvingTask,
|
||||
utils::{to_rkyv_bytes, RancorError},
|
||||
};
|
||||
@@ -24,6 +25,30 @@ pub struct BundleProvingTask {
|
||||
}
|
||||
|
||||
impl BundleProvingTask {
|
||||
pub fn into_proving_task_with_precheck(self) -> Result<(ProvingTask, BundleInfo, B256)> {
|
||||
let (witness, bundle_info, bundle_pi_hash) = self.precheck()?;
|
||||
let serialized_witness = if crate::witness_use_legacy_mode(&self.fork_name)? {
|
||||
let legacy = LegacyBundleWitness::from(witness);
|
||||
to_rkyv_bytes::<RancorError>(&legacy)?.into_vec()
|
||||
} else {
|
||||
super::encode_task_to_witness(&witness)?
|
||||
};
|
||||
|
||||
let proving_task = ProvingTask {
|
||||
identifier: self.identifier(),
|
||||
fork_name: self.fork_name,
|
||||
aggregated_proofs: self
|
||||
.batch_proofs
|
||||
.into_iter()
|
||||
.map(|w_proof| w_proof.proof.into_stark_proof().expect("expect root proof"))
|
||||
.collect(),
|
||||
serialized_witness: vec![serialized_witness],
|
||||
vk: Vec::new(),
|
||||
};
|
||||
|
||||
Ok((proving_task, bundle_info, bundle_pi_hash))
|
||||
}
|
||||
|
||||
fn identifier(&self) -> String {
|
||||
assert!(!self.batch_proofs.is_empty(), "{BUNDLE_SANITY_MSG}",);
|
||||
|
||||
@@ -32,19 +57,20 @@ impl BundleProvingTask {
|
||||
.first()
|
||||
.expect(BUNDLE_SANITY_MSG)
|
||||
.metadata
|
||||
.batch_info
|
||||
.batch_hash,
|
||||
self.batch_proofs
|
||||
.last()
|
||||
.expect(BUNDLE_SANITY_MSG)
|
||||
.metadata
|
||||
.batch_info
|
||||
.batch_hash,
|
||||
);
|
||||
|
||||
format!("{first}-{last}")
|
||||
}
|
||||
|
||||
fn build_guest_input(&self) -> BundleWitness {
|
||||
let version = Version::from(self.version);
|
||||
fn build_guest_input(&self, version: Version) -> BundleWitness {
|
||||
BundleWitness {
|
||||
version: version.as_version_byte(),
|
||||
batch_proofs: self.batch_proofs.iter().map(|proof| proof.into()).collect(),
|
||||
@@ -57,43 +83,19 @@ impl BundleProvingTask {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn precheck_and_build_metadata(&self) -> Result<BundleInfo> {
|
||||
fn precheck(&self) -> Result<(BundleWitness, BundleInfo, B256)> {
|
||||
// for every aggregation task, there are two steps needed to build the metadata:
|
||||
// 1. generate data for metadata from the witness
|
||||
// 2. validate every adjacent proof pair
|
||||
let witness = self.build_guest_input();
|
||||
let version = Version::from(self.version);
|
||||
let witness = self.build_guest_input(version);
|
||||
let metadata = BundleInfo::from(&witness);
|
||||
super::check_aggregation_proofs(
|
||||
witness.batch_infos.as_slice(),
|
||||
Version::from(self.version),
|
||||
)?;
|
||||
let pi_hash = metadata.pi_hash_by_version(version);
|
||||
|
||||
Ok(metadata)
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<BundleProvingTask> for ProvingTask {
|
||||
type Error = eyre::Error;
|
||||
|
||||
fn try_from(value: BundleProvingTask) -> Result<Self> {
|
||||
let witness = value.build_guest_input();
|
||||
let serialized_witness = if crate::witness_use_legacy_mode() {
|
||||
let legacy = LegacyBundleWitness::from(witness);
|
||||
to_rkyv_bytes::<RancorError>(&legacy)?.into_vec()
|
||||
} else {
|
||||
super::encode_task_to_witness(&witness)?
|
||||
};
|
||||
|
||||
Ok(ProvingTask {
|
||||
identifier: value.identifier(),
|
||||
fork_name: value.fork_name,
|
||||
aggregated_proofs: value
|
||||
.batch_proofs
|
||||
.into_iter()
|
||||
.map(|w_proof| w_proof.proof.into_stark_proof().expect("expect root proof"))
|
||||
.collect(),
|
||||
serialized_witness: vec![serialized_witness],
|
||||
vk: Vec::new(),
|
||||
})
|
||||
Ok((witness, metadata, pi_hash))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3,9 +3,9 @@ use sbv_core::BlockWitness;
|
||||
use sbv_primitives::{types::consensus::BlockHeader, B256};
|
||||
use scroll_zkvm_types::{
|
||||
chunk::{execute, ChunkInfo, ChunkWitness, LegacyChunkWitness, ValidiumInputs},
|
||||
public_inputs::{MultiVersionPublicInputs, Version},
|
||||
task::ProvingTask,
|
||||
utils::{to_rkyv_bytes, RancorError},
|
||||
version::Version,
|
||||
};
|
||||
|
||||
use super::chunk_interpreter::*;
|
||||
@@ -94,28 +94,6 @@ pub struct ChunkDetails {
|
||||
pub total_gas_used: u64,
|
||||
}
|
||||
|
||||
impl TryFrom<ChunkProvingTask> for ProvingTask {
|
||||
type Error = eyre::Error;
|
||||
|
||||
fn try_from(value: ChunkProvingTask) -> Result<Self> {
|
||||
let witness = value.build_guest_input();
|
||||
let serialized_witness = if crate::witness_use_legacy_mode() {
|
||||
let legacy_witness = LegacyChunkWitness::from(witness);
|
||||
to_rkyv_bytes::<RancorError>(&legacy_witness)?.into_vec()
|
||||
} else {
|
||||
super::encode_task_to_witness(&witness)?
|
||||
};
|
||||
|
||||
Ok(ProvingTask {
|
||||
identifier: value.identifier(),
|
||||
fork_name: value.fork_name,
|
||||
aggregated_proofs: Vec::new(),
|
||||
serialized_witness: vec![serialized_witness],
|
||||
vk: Vec::new(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl ChunkProvingTask {
|
||||
pub fn stats(&self) -> ChunkDetails {
|
||||
let num_blocks = self.block_witnesses.len();
|
||||
@@ -137,6 +115,26 @@ impl ChunkProvingTask {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn into_proving_task_with_precheck(self) -> Result<(ProvingTask, ChunkInfo, B256)> {
|
||||
let (witness, chunk_info, chunk_pi_hash) = self.precheck()?;
|
||||
let serialized_witness = if crate::witness_use_legacy_mode(&self.fork_name)? {
|
||||
let legacy_witness = LegacyChunkWitness::from(witness);
|
||||
to_rkyv_bytes::<RancorError>(&legacy_witness)?.into_vec()
|
||||
} else {
|
||||
super::encode_task_to_witness(&witness)?
|
||||
};
|
||||
|
||||
let proving_task = ProvingTask {
|
||||
identifier: self.identifier(),
|
||||
fork_name: self.fork_name,
|
||||
aggregated_proofs: Vec::new(),
|
||||
serialized_witness: vec![serialized_witness],
|
||||
vk: Vec::new(),
|
||||
};
|
||||
|
||||
Ok((proving_task, chunk_info, chunk_pi_hash))
|
||||
}
|
||||
|
||||
fn identifier(&self) -> String {
|
||||
assert!(!self.block_witnesses.is_empty(), "{CHUNK_SANITY_MSG}",);
|
||||
|
||||
@@ -156,9 +154,7 @@ impl ChunkProvingTask {
|
||||
format!("{first}-{last}")
|
||||
}
|
||||
|
||||
fn build_guest_input(&self) -> ChunkWitness {
|
||||
let version = Version::from(self.version);
|
||||
|
||||
fn build_guest_input(&self, version: Version) -> ChunkWitness {
|
||||
if version.is_validium() {
|
||||
assert!(self.validium_inputs.is_some());
|
||||
ChunkWitness::new(
|
||||
@@ -182,11 +178,13 @@ impl ChunkProvingTask {
|
||||
self.block_witnesses[0].states.push(node);
|
||||
}
|
||||
|
||||
pub fn precheck_and_build_metadata(&self) -> Result<ChunkInfo> {
|
||||
let witness = self.build_guest_input();
|
||||
let ret = ChunkInfo::try_from(witness).map_err(|e| eyre::eyre!("{e}"))?;
|
||||
assert_eq!(ret.post_msg_queue_hash, self.post_msg_queue_hash);
|
||||
Ok(ret)
|
||||
fn precheck(&self) -> Result<(ChunkWitness, ChunkInfo, B256)> {
|
||||
let version = Version::from(self.version);
|
||||
let witness = self.build_guest_input(version);
|
||||
let chunk_info = ChunkInfo::try_from(witness.clone()).map_err(|e| eyre::eyre!("{e}"))?;
|
||||
assert_eq!(chunk_info.post_msg_queue_hash, self.post_msg_queue_hash);
|
||||
let chunk_pi_hash = chunk_info.pi_hash_by_version(version);
|
||||
Ok((witness, chunk_info, chunk_pi_hash))
|
||||
}
|
||||
|
||||
/// this method check the validate of current task (there may be missing storage node)
|
||||
@@ -214,7 +212,7 @@ impl ChunkProvingTask {
|
||||
let err_parse_re = regex::Regex::new(pattern)?;
|
||||
let mut attempts = 0;
|
||||
loop {
|
||||
let witness = self.build_guest_input();
|
||||
let witness = self.build_guest_input(Version::euclid_v2());
|
||||
|
||||
match execute(witness) {
|
||||
Ok(_) => return Ok(()),
|
||||
|
||||
@@ -44,6 +44,8 @@ pub struct CircuitConfig {
|
||||
pub version: u8,
|
||||
pub fork_name: String,
|
||||
pub assets_path: String,
|
||||
#[serde(default)]
|
||||
pub features: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
@@ -51,7 +53,7 @@ pub struct VerifierConfig {
|
||||
pub circuits: Vec<CircuitConfig>,
|
||||
}
|
||||
|
||||
type HardForkName = String;
|
||||
pub(crate) type HardForkName = String;
|
||||
|
||||
type VerifierType = Arc<Mutex<dyn ProofVerifier + Send>>;
|
||||
static VERIFIERS: OnceLock<HashMap<HardForkName, VerifierType>> = OnceLock::new();
|
||||
|
||||
@@ -275,10 +275,3 @@ pub unsafe extern "C" fn release_string(ptr: *mut c_char) {
|
||||
let _ = CString::from_raw(ptr);
|
||||
}
|
||||
}
|
||||
|
||||
/// # Safety
|
||||
#[no_mangle]
|
||||
pub unsafe extern "C" fn set_dynamic_feature(feats: *const c_char) {
|
||||
let feats_str = c_char_to_str(feats);
|
||||
libzkp::set_dynamic_feature(feats_str);
|
||||
}
|
||||
|
||||
@@ -8,6 +8,7 @@ edition.workspace = true
|
||||
[dependencies]
|
||||
scroll-zkvm-types.workspace = true
|
||||
scroll-zkvm-prover.workspace = true
|
||||
libzkp = { path = "../libzkp"}
|
||||
scroll-proving-sdk = { git = "https://github.com/scroll-tech/scroll-proving-sdk.git", rev = "05648db" }
|
||||
serde.workspace = true
|
||||
serde_json.workspace = true
|
||||
|
||||
@@ -12,6 +12,7 @@ use scroll_proving_sdk::{
|
||||
ProvingService,
|
||||
},
|
||||
};
|
||||
use scroll_zkvm_types::ProvingTask;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::{
|
||||
collections::HashMap,
|
||||
@@ -143,7 +144,6 @@ impl LocalProverConfig {
|
||||
|
||||
#[derive(Clone, Serialize, Deserialize)]
|
||||
pub struct CircuitConfig {
|
||||
pub hard_fork_name: String,
|
||||
/// The path to save assets for a specified hard fork phase
|
||||
pub workspace_path: String,
|
||||
#[serde(flatten)]
|
||||
@@ -273,6 +273,8 @@ impl LocalProver {
|
||||
let created_at = duration.as_secs() as f64 + duration.subsec_nanos() as f64 * 1e-9;
|
||||
|
||||
let prover_task = UniversalHandler::get_task_from_input(&req.input)?;
|
||||
let is_openvm_13 = prover_task.use_openvm_13;
|
||||
let prover_task: ProvingTask = prover_task.into();
|
||||
let vk = hex::encode(&prover_task.vk);
|
||||
let handler = if let Some(handler) = self.handlers.get(&vk) {
|
||||
handler.clone()
|
||||
@@ -300,7 +302,7 @@ impl LocalProver {
|
||||
.await?;
|
||||
let circuits_handler = Arc::new(Mutex::new(UniversalHandler::new(
|
||||
&asset_path,
|
||||
req.proof_type,
|
||||
is_openvm_13,
|
||||
)?));
|
||||
self.handlers.insert(vk, circuits_handler.clone());
|
||||
circuits_handler
|
||||
|
||||
@@ -3,7 +3,7 @@ use std::path::Path;
|
||||
use super::CircuitsHandler;
|
||||
use async_trait::async_trait;
|
||||
use eyre::Result;
|
||||
use scroll_proving_sdk::prover::ProofType;
|
||||
use libzkp::ProvingTaskExt;
|
||||
use scroll_zkvm_prover::{Prover, ProverConfig};
|
||||
use scroll_zkvm_types::ProvingTask;
|
||||
use tokio::sync::Mutex;
|
||||
@@ -16,14 +16,15 @@ pub struct UniversalHandler {
|
||||
unsafe impl Send for UniversalHandler {}
|
||||
|
||||
impl UniversalHandler {
|
||||
pub fn new(workspace_path: impl AsRef<Path>, _proof_type: ProofType) -> Result<Self> {
|
||||
pub fn new(workspace_path: impl AsRef<Path>, is_openvm_v13: bool) -> Result<Self> {
|
||||
let path_app_exe = workspace_path.as_ref().join("app.vmexe");
|
||||
let path_app_config = workspace_path.as_ref().join("openvm.toml");
|
||||
let segment_len = Some((1 << 22) - 100);
|
||||
let segment_len = Some((1 << 21) - 100);
|
||||
let config = ProverConfig {
|
||||
path_app_config,
|
||||
path_app_exe,
|
||||
segment_len,
|
||||
is_openvm_v13,
|
||||
};
|
||||
|
||||
let prover = Prover::setup(config, None)?;
|
||||
@@ -36,7 +37,7 @@ impl UniversalHandler {
|
||||
&mut self.prover
|
||||
}
|
||||
|
||||
pub fn get_task_from_input(input: &str) -> Result<ProvingTask> {
|
||||
pub fn get_task_from_input(input: &str) -> Result<ProvingTaskExt> {
|
||||
Ok(serde_json::from_str(input)?)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
{
|
||||
"dsn": "postgres://postgres:postgres@localhost:5432/scroll?sslmode=disable",
|
||||
"dsn": "postgres://localhost/scroll?sslmode=disable",
|
||||
"driver_name": "postgres",
|
||||
"maxOpenNum": 200,
|
||||
"maxIdleNum": 20
|
||||
|
||||
@@ -8,7 +8,7 @@ require (
|
||||
github.com/jmoiron/sqlx v1.3.5
|
||||
github.com/lib/pq v1.10.9
|
||||
github.com/pressly/goose/v3 v3.16.0
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20251117065849-b5c3dd1aa975
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20251128092113-8629f088d78f
|
||||
github.com/stretchr/testify v1.10.0
|
||||
github.com/urfave/cli/v2 v2.25.7
|
||||
)
|
||||
|
||||
@@ -121,8 +121,8 @@ github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU
|
||||
github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
|
||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20251117065849-b5c3dd1aa975 h1:KluZffkRRJ4K9UyvH/r2g8Lp16/NSK8j26MR33hHmoQ=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20251117065849-b5c3dd1aa975/go.mod h1:6BVek7YliYh+YeHOSjguPw9GT9BhVBfThArxzVlpqdQ=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20251128092113-8629f088d78f h1:j6SjP98MoWFFX9TwB1/nFYEkayqHQsrtE66Ll2C+oT0=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20251128092113-8629f088d78f/go.mod h1:Aa/kD1XB+OV/7rRxMQrjcPCB4b0pKyLH0gsTrtuHi38=
|
||||
github.com/segmentio/asm v1.2.0 h1:9BQrFxC+YOHJlTlHGkTrFWf59nbL3XnCoFLTwDCI7ys=
|
||||
github.com/segmentio/asm v1.2.0/go.mod h1:BqMnlJP91P8d+4ibuonYZw9mfnzI9HfxselHZr5aAcs=
|
||||
github.com/sethvargo/go-retry v0.2.4 h1:T+jHEQy/zKJf5s95UkguisicE0zuF9y7+/vgz08Ocec=
|
||||
|
||||
@@ -1413,16 +1413,14 @@ github.com/scroll-tech/da-codec v0.1.3-0.20250609154559-8935de62c148 h1:cyK1ifU2
|
||||
github.com/scroll-tech/da-codec v0.1.3-0.20250609154559-8935de62c148/go.mod h1:gz5x3CsLy5htNTbv4PWRPBU9nSAujfx1U2XtFcXoFuk=
|
||||
github.com/scroll-tech/da-codec v0.1.3-0.20250626091118-58b899494da6/go.mod h1:Z6kN5u2khPhiqHyk172kGB7o38bH/nj7Ilrb/46wZGg=
|
||||
github.com/scroll-tech/da-codec v0.1.3-0.20250825071838-cddc263e5ef6/go.mod h1:Z6kN5u2khPhiqHyk172kGB7o38bH/nj7Ilrb/46wZGg=
|
||||
github.com/scroll-tech/da-codec v0.9.0 h1:UvHNdSJuVhi/j9pOH7SXkRck52/zeE8T35Fx6qTPW70=
|
||||
github.com/scroll-tech/da-codec v0.9.0/go.mod h1:w+vwIvNiWdiNHVE9yIY2Klx6G4s+SQhEJVSmVG/IsEQ=
|
||||
github.com/scroll-tech/ecies-go/v2 v2.0.10-beta.1/go.mod h1:A+pHaITd+ogBm4Rk35xebF9OPiyMYlFlgqBOiY5PSjg=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240607130425-e2becce6a1a4/go.mod h1:byf/mZ8jLYUCnUePTicjJWn+RvKdxDn7buS6glTnMwQ=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240821074444-b3fa00861e5e/go.mod h1:swB5NSp8pKNDuYsTxfR08bHS6L56i119PBx8fxvV8Cs=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20241010064814-3d88e870ae22/go.mod h1:r9FwtxCtybMkTbWYCyBuevT9TW3zHmOTHqD082Uh+Oo=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20250206083728-ea43834c198f/go.mod h1:Ik3OBLl7cJxPC+CFyCBYNXBPek4wpdzkWehn/y5qLM8=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20250225152658-bcfdb48dd939/go.mod h1:AgU8JJxC7+nfs7R7ma35AU7dMAGW7wCw3dRZRefIKyQ=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20251117071111-47c22325665e h1:7U1/JilCPIqJTYNgKzlfBEkRNORRwJ8+PCmdR8/XK6A=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20251117071111-47c22325665e/go.mod h1:6BVek7YliYh+YeHOSjguPw9GT9BhVBfThArxzVlpqdQ=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20251128092359-25d5bf6b817b h1:pMQKnroJoS/FeL1aOWkz7/u1iBHUP8PWjZstNuzoUGE=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20251128092359-25d5bf6b817b/go.mod h1:Aa/kD1XB+OV/7rRxMQrjcPCB4b0pKyLH0gsTrtuHi38=
|
||||
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I=
|
||||
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
|
||||
github.com/segmentio/kafka-go v0.1.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo=
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -66,17 +66,26 @@ func action(ctx *cli.Context) error {
|
||||
registry := prometheus.DefaultRegisterer
|
||||
observability.Server(ctx, db)
|
||||
|
||||
l1client, err := ethclient.Dial(cfg.L1Config.Endpoint)
|
||||
// Init L1 connection
|
||||
l1RpcClient, err := rpc.Dial(cfg.L1Config.Endpoint)
|
||||
if err != nil {
|
||||
log.Crit("failed to connect l1 geth", "config file", cfgFile, "error", err)
|
||||
log.Crit("failed to dial raw RPC client to L1 endpoint", "endpoint", cfg.L1Config.Endpoint, "error", err)
|
||||
}
|
||||
l1client := ethclient.NewClient(l1RpcClient)
|
||||
|
||||
// sanity check config
|
||||
if cfg.L1Config.RelayerConfig.GasOracleConfig.L1BaseFeeLimit == 0 || cfg.L1Config.RelayerConfig.GasOracleConfig.L1BlobBaseFeeLimit == 0 {
|
||||
log.Crit("gas-oracle `l1_base_fee_limit` and `l1_blob_base_fee_limit` configs must be set")
|
||||
}
|
||||
|
||||
l1watcher := watcher.NewL1WatcherClient(ctx.Context, l1client, cfg.L1Config.StartHeight, db, registry)
|
||||
// Init watcher and relayer
|
||||
l1watcher := watcher.NewL1WatcherClient(ctx.Context, l1RpcClient, cfg.L1Config.StartHeight, db, registry)
|
||||
|
||||
l1relayer, err := relayer.NewLayer1Relayer(ctx.Context, db, cfg.L1Config.RelayerConfig, relayer.ServiceTypeL1GasOracle, registry)
|
||||
if err != nil {
|
||||
log.Crit("failed to create new l1 relayer", "config file", cfgFile, "error", err)
|
||||
}
|
||||
|
||||
// Start l1 watcher process
|
||||
go utils.LoopWithContext(subCtx, 10*time.Second, func(ctx context.Context) {
|
||||
// Fetch the latest block number to decrease the delay when fetching gas prices
|
||||
|
||||
@@ -144,14 +144,6 @@ func action(ctx *cli.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
if ctx.Bool(utils.RevertFlag.Name) {
|
||||
err = l2relayer.RevertBatch(7)
|
||||
if err != nil {
|
||||
log.Crit("failed to revert batch", "error", err)
|
||||
}
|
||||
os.Exit(0)
|
||||
}
|
||||
|
||||
// Watcher loop to fetch missing blocks
|
||||
go utils.LoopWithContext(subCtx, 2*time.Second, func(ctx context.Context) {
|
||||
number, loopErr := rutils.GetLatestConfirmedBlockNumber(ctx, l2ethClient, cfg.L2Config.Confirmations)
|
||||
|
||||
@@ -21,7 +21,9 @@
|
||||
"check_committed_batches_window_minutes": 5,
|
||||
"l1_base_fee_default": 15000000000,
|
||||
"l1_blob_base_fee_default": 1,
|
||||
"l1_blob_base_fee_threshold": 0
|
||||
"l1_blob_base_fee_threshold": 0,
|
||||
"l1_base_fee_limit": 20000000000,
|
||||
"l1_blob_base_fee_limit": 20000000000
|
||||
},
|
||||
"gas_oracle_sender_signer_config": {
|
||||
"signer_type": "PrivateKey",
|
||||
@@ -56,7 +58,8 @@
|
||||
"min_batches": 1,
|
||||
"max_batches": 6,
|
||||
"timeout": 7200,
|
||||
"backlog_max": 75
|
||||
"backlog_max": 75,
|
||||
"blob_fee_tolerance": 500000000
|
||||
},
|
||||
"gas_oracle_config": {
|
||||
"min_gas_price": 0,
|
||||
|
||||
@@ -1,86 +0,0 @@
|
||||
{
|
||||
"l2_config": {
|
||||
"confirmations": "0x1",
|
||||
"endpoint": "http://localhost:8545",
|
||||
"l2_message_queue_address": "0x5300000000000000000000000000000000000000",
|
||||
"relayer_config": {
|
||||
"validium_mode": false,
|
||||
"rollup_contract_address": "0x5FC8d32690cc91D4c39d9d3abcBD16989F875707",
|
||||
"sender_config": {
|
||||
"endpoint": "http://localhost:8544",
|
||||
"escalate_blocks": 1,
|
||||
"confirmations": "0x0",
|
||||
"escalate_multiple_num": 2,
|
||||
"escalate_multiple_den": 1,
|
||||
"max_gas_price": 1000000000000,
|
||||
"max_blob_gas_price": 10000000000000,
|
||||
"tx_type": "DynamicFeeTx",
|
||||
"check_pending_time": 1,
|
||||
"min_gas_tip": 100000000,
|
||||
"max_pending_blob_txs": 3,
|
||||
"fusaka_timestamp": 9999999999999
|
||||
},
|
||||
"batch_submission": {
|
||||
"min_batches": 1,
|
||||
"max_batches": 1,
|
||||
"timeout": 8400,
|
||||
"backlog_max": 0
|
||||
},
|
||||
"gas_oracle_config": {
|
||||
"min_gas_price": 0,
|
||||
"gas_price_diff": 50000
|
||||
},
|
||||
"chain_monitor": {
|
||||
"enabled": false,
|
||||
"timeout": 3,
|
||||
"try_times": 5,
|
||||
"base_url": "http://localhost:8750"
|
||||
},
|
||||
"enable_test_env_bypass_features": true,
|
||||
"test_env_bypass_only_until_fork_boundary": false,
|
||||
"finalize_batch_without_proof_timeout_sec": 200,
|
||||
"finalize_bundle_without_proof_timeout_sec": 1000000,
|
||||
"gas_oracle_sender_signer_config": {
|
||||
"signer_type": "PrivateKey",
|
||||
"private_key_signer_config": {
|
||||
"private_key": "ac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80"
|
||||
}
|
||||
},
|
||||
"commit_sender_signer_config": {
|
||||
"signer_type": "PrivateKey",
|
||||
"private_key_signer_config": {
|
||||
"private_key": "ac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80"
|
||||
}
|
||||
},
|
||||
"finalize_sender_signer_config": {
|
||||
"signer_type": "PrivateKey",
|
||||
"private_key_signer_config": {
|
||||
"private_key": "ac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80"
|
||||
}
|
||||
}
|
||||
},
|
||||
"chunk_proposer_config": {
|
||||
"propose_interval_milliseconds": 100,
|
||||
"max_l2_gas_per_chunk": 20000000,
|
||||
"chunk_timeout_sec": 30,
|
||||
"max_uncompressed_batch_bytes_size": 4194304
|
||||
},
|
||||
"batch_proposer_config": {
|
||||
"propose_interval_milliseconds": 1000,
|
||||
"batch_timeout_sec": 300,
|
||||
"max_chunks_per_batch": 1,
|
||||
"max_uncompressed_batch_bytes_size": 4194304
|
||||
},
|
||||
"bundle_proposer_config": {
|
||||
"max_batch_num_per_bundle": 1,
|
||||
"bundle_timeout_sec": 300
|
||||
}
|
||||
},
|
||||
"db_config": {
|
||||
"driver_name": "postgres",
|
||||
"dsn": "postgres://postgres:postgres@localhost:5432/scroll?sslmode=disable",
|
||||
"maxOpenNum": 200,
|
||||
"maxIdleNum": 20
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,136 +0,0 @@
|
||||
{
|
||||
"config": {
|
||||
"chainId": 534351,
|
||||
"homesteadBlock": 0,
|
||||
"eip150Block": 0,
|
||||
"eip150Hash": "0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||
"eip155Block": 0,
|
||||
"eip158Block": 0,
|
||||
"byzantiumBlock": 0,
|
||||
"constantinopleBlock": 0,
|
||||
"petersburgBlock": 0,
|
||||
"istanbulBlock": 0,
|
||||
"berlinBlock": 0,
|
||||
"londonBlock": 0,
|
||||
"archimedesBlock": 0,
|
||||
"shanghaiBlock": 0,
|
||||
"bernoulliBlock": 0,
|
||||
"curieBlock": 0,
|
||||
"darwinTime": 0,
|
||||
"darwinV2Time": 0,
|
||||
"euclidTime": 0,
|
||||
"euclidV2Time": 0,
|
||||
"feynmanTime": 0,
|
||||
"clique": {
|
||||
"period": 3,
|
||||
"epoch": 30000
|
||||
},
|
||||
"systemContract": {
|
||||
"period": 1,
|
||||
"blocks_per_second": 2,
|
||||
"system_contract_address": "0xC706Ba9fa4fedF4507CB7A898b4766c1bbf9be57",
|
||||
"system_contract_slot": "0x0000000000000000000000000000000000000000000000000000000000000067"
|
||||
},
|
||||
"scroll": {
|
||||
"useZktrie": true,
|
||||
"maxTxPayloadBytesPerBlock": 122880,
|
||||
"feeVaultAddress": "0x5300000000000000000000000000000000000005",
|
||||
"l1Config": {
|
||||
"l1ChainId": "11155111",
|
||||
"l1MessageQueueAddress": "0xF0B2293F5D834eAe920c6974D50957A1732de763",
|
||||
"l1MessageQueueV2Address": "0xA0673eC0A48aa924f067F1274EcD281A10c5f19F",
|
||||
"l1MessageQueueV2DeploymentBlock": 7773746,
|
||||
"scrollChainAddress": "0x2D567EcE699Eabe5afCd141eDB7A4f2D0D6ce8a0",
|
||||
"l2SystemConfigAddress": "0xF444cF06A3E3724e20B35c2989d3942ea8b59124",
|
||||
"numL1MessagesPerBlock": "10"
|
||||
},
|
||||
"genesisStateRoot": "0x20695989e9038823e35f0e88fbc44659ffdbfa1fe89fbeb2689b43f15fa64cb5",
|
||||
"missingHeaderFieldsSHA256": "0xa02354c12ca0f918bf4768255af9ed13c137db7e56252348f304b17bb4088924"
|
||||
}
|
||||
},
|
||||
"nonce": "0x0",
|
||||
"timestamp": "0x6490fdd2",
|
||||
"extraData": "0x",
|
||||
"gasLimit": "0x1312D00",
|
||||
"baseFeePerGas": "0x0",
|
||||
"difficulty": "0x0",
|
||||
"mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||
"coinbase": "0x0000000000000000000000000000000000000000",
|
||||
"alloc": {
|
||||
"0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266": {
|
||||
"balance": "0xD3C21BCECCEDA1000000"
|
||||
},
|
||||
"0x70997970C51812dc3A010C7d01b50e0d17dc79C8": {
|
||||
"balance": "0xD3C21BCECCEDA1000000"
|
||||
},
|
||||
"0x3C44CdDdB6a900fa2b585dd299e03d12FA4293BC": {
|
||||
"balance": "0xD3C21BCECCEDA1000000"
|
||||
},
|
||||
"0x90F79bf6EB2c4f870365E785982E1f101E93b906": {
|
||||
"balance": "0xD3C21BCECCEDA1000000"
|
||||
},
|
||||
"0x15d34AAf54267DB7D7c367839AAf71A00a2C6A65": {
|
||||
"balance": "0xD3C21BCECCEDA1000000"
|
||||
},
|
||||
"0x9965507D1a55bcC2695C58ba16FB37d819B0A4dc": {
|
||||
"balance": "0xD3C21BCECCEDA1000000"
|
||||
},
|
||||
"0x976EA74026E726554dB657fA54763abd0C3a0aa9": {
|
||||
"balance": "0xD3C21BCECCEDA1000000"
|
||||
},
|
||||
"0x14dC79964da2C08b23698B3D3cc7Ca32193d9955": {
|
||||
"balance": "0xD3C21BCECCEDA1000000"
|
||||
},
|
||||
"0x23618e81E3f5cdF7f54C3d65f7FBc0aBf5B21E8f": {
|
||||
"balance": "0xD3C21BCECCEDA1000000"
|
||||
},
|
||||
"0xa0Ee7A142d267C1f36714E4a8F75612F20a79720": {
|
||||
"balance": "0xD3C21BCECCEDA1000000"
|
||||
},
|
||||
"0xBcd4042DE499D14e55001CcbB24a551F3b954096": {
|
||||
"balance": "0xD3C21BCECCEDA1000000"
|
||||
},
|
||||
"0x71bE63f3384f5fb98995898A86B02Fb2426c5788": {
|
||||
"balance": "0xD3C21BCECCEDA1000000"
|
||||
},
|
||||
"0xFABB0ac9d68B0B445fB7357272Ff202C5651694a": {
|
||||
"balance": "0xD3C21BCECCEDA1000000"
|
||||
},
|
||||
"0x1CBd3b2770909D4e10f157cABC84C7264073C9Ec": {
|
||||
"balance": "0xD3C21BCECCEDA1000000"
|
||||
},
|
||||
"0xdF3e18d64BC6A983f673Ab319CCaE4f1a57C7097": {
|
||||
"balance": "0xD3C21BCECCEDA1000000"
|
||||
},
|
||||
"0xcd3B766CCDd6AE721141F452C550Ca635964ce71": {
|
||||
"balance": "0xD3C21BCECCEDA1000000"
|
||||
},
|
||||
"0x2546BcD3c84621e976D8185a91A922aE77ECEc30": {
|
||||
"balance": "0xD3C21BCECCEDA1000000"
|
||||
},
|
||||
"0xbDA5747bFD65F08deb54cb465eB87D40e51B197E": {
|
||||
"balance": "0xD3C21BCECCEDA1000000"
|
||||
},
|
||||
"0xdD2FD4581271e230360230F9337D5c0430Bf44C0": {
|
||||
"balance": "0xD3C21BCECCEDA1000000"
|
||||
},
|
||||
"0x8626f6940E2eb28930eFb4CeF49B2d1F2C9C1199": {
|
||||
"balance": "0xD3C21BCECCEDA1000000"
|
||||
},
|
||||
"0x5300000000000000000000000000000000000002": {
|
||||
"balance": "0xd3c21bcecceda1000000",
|
||||
"storage": {
|
||||
"0x01": "0x000000000000000000000000000000000000000000000000000000003758e6b0",
|
||||
"0x02": "0x0000000000000000000000000000000000000000000000000000000000000038",
|
||||
"0x03": "0x000000000000000000000000000000000000000000000000000000003e95ba80",
|
||||
"0x04": "0x0000000000000000000000005300000000000000000000000000000000000003",
|
||||
"0x05": "0x000000000000000000000000000000000000000000000000000000008390c2c1",
|
||||
"0x06": "0x00000000000000000000000000000000000000000000000000000069cf265bfe",
|
||||
"0x07": "0x00000000000000000000000000000000000000000000000000000000168b9aa3"
|
||||
}
|
||||
}
|
||||
},
|
||||
"number": "0x0",
|
||||
"gasUsed": "0x0",
|
||||
"parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000"
|
||||
}
|
||||
@@ -15,8 +15,8 @@ require (
|
||||
github.com/holiman/uint256 v1.3.2
|
||||
github.com/mitchellh/mapstructure v1.5.0
|
||||
github.com/prometheus/client_golang v1.16.0
|
||||
github.com/scroll-tech/da-codec v0.9.0
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20251117065849-b5c3dd1aa975
|
||||
github.com/scroll-tech/da-codec v0.10.0
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20251128092113-8629f088d78f
|
||||
github.com/smartystreets/goconvey v1.8.0
|
||||
github.com/spf13/viper v1.19.0
|
||||
github.com/stretchr/testify v1.10.0
|
||||
@@ -51,7 +51,7 @@ require (
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.3 // indirect
|
||||
github.com/crate-crypto/go-eth-kzg v1.4.0 // indirect
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||
github.com/deckarep/golang-set v0.0.0-20180603214616-504e848d77ea // indirect
|
||||
github.com/deckarep/golang-set v1.8.0 // indirect
|
||||
github.com/edsrzf/mmap-go v1.0.0 // indirect
|
||||
github.com/ethereum/c-kzg-4844/v2 v2.1.5 // indirect
|
||||
github.com/fjl/memsize v0.0.2 // indirect
|
||||
|
||||
@@ -88,8 +88,8 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/deckarep/golang-set v0.0.0-20180603214616-504e848d77ea h1:j4317fAZh7X6GqbFowYdYdI0L9bwxL07jyPZIdepyZ0=
|
||||
github.com/deckarep/golang-set v0.0.0-20180603214616-504e848d77ea/go.mod h1:93vsz/8Wt4joVM7c2AVqh+YRMiUSc14yDtF28KmMOgQ=
|
||||
github.com/deckarep/golang-set v1.8.0 h1:sk9/l/KqpunDwP7pSjUg0keiOOLEnOBHzykLrsPppp4=
|
||||
github.com/deckarep/golang-set v1.8.0/go.mod h1:5nI87KwE7wgsBU1F4GKAw2Qod7p5kyS383rP6+o6qqo=
|
||||
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
|
||||
github.com/edsrzf/mmap-go v1.0.0 h1:CEBF7HpRnUCSJgGUb5h1Gm7e3VkmVDrR8lvWVLtrOFw=
|
||||
github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M=
|
||||
@@ -287,10 +287,10 @@ github.com/sagikazarmark/locafero v0.4.0 h1:HApY1R9zGo4DBgr7dqsTH/JJxLTTsOt7u6ke
|
||||
github.com/sagikazarmark/locafero v0.4.0/go.mod h1:Pe1W6UlPYUk/+wc/6KFhbORCfqzgYEpgQ3O5fPuL3H4=
|
||||
github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE=
|
||||
github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ=
|
||||
github.com/scroll-tech/da-codec v0.9.0 h1:UvHNdSJuVhi/j9pOH7SXkRck52/zeE8T35Fx6qTPW70=
|
||||
github.com/scroll-tech/da-codec v0.9.0/go.mod h1:w+vwIvNiWdiNHVE9yIY2Klx6G4s+SQhEJVSmVG/IsEQ=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20251117065849-b5c3dd1aa975 h1:KluZffkRRJ4K9UyvH/r2g8Lp16/NSK8j26MR33hHmoQ=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20251117065849-b5c3dd1aa975/go.mod h1:6BVek7YliYh+YeHOSjguPw9GT9BhVBfThArxzVlpqdQ=
|
||||
github.com/scroll-tech/da-codec v0.10.0 h1:IPHxyTyXTWPV0Q+DZ08cod2fWkhUvrfysmj/VBpB+WU=
|
||||
github.com/scroll-tech/da-codec v0.10.0/go.mod h1:MBlIP4wCXPcUDZ/Ci2B7n/2IbVU1WBo9OTFTZ5ffE0U=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20251128092113-8629f088d78f h1:j6SjP98MoWFFX9TwB1/nFYEkayqHQsrtE66Ll2C+oT0=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20251128092113-8629f088d78f/go.mod h1:Aa/kD1XB+OV/7rRxMQrjcPCB4b0pKyLH0gsTrtuHi38=
|
||||
github.com/scroll-tech/zktrie v0.8.4 h1:UagmnZ4Z3ITCk+aUq9NQZJNAwnWl4gSxsLb2Nl7IgRE=
|
||||
github.com/scroll-tech/zktrie v0.8.4/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
|
||||
github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKlUeu/erjjvaPEYiI=
|
||||
|
||||
@@ -48,6 +48,10 @@ type BatchSubmission struct {
|
||||
TimeoutSec int64 `json:"timeout"`
|
||||
// The maximum number of pending batches to keep in the backlog.
|
||||
BacklogMax int64 `json:"backlog_max"`
|
||||
// BlobFeeTolerance is the absolute tolerance (in wei) added to the target blob fee.
|
||||
// If the current fee is below target + tolerance, we proceed with submission.
|
||||
// This prevents skipping submission when the price difference is negligible.
|
||||
BlobFeeTolerance uint64 `json:"blob_fee_tolerance"`
|
||||
}
|
||||
|
||||
// ChainMonitor this config is used to get batch status from chain_monitor API.
|
||||
@@ -109,6 +113,10 @@ type GasOracleConfig struct {
|
||||
L1BaseFeeDefault uint64 `json:"l1_base_fee_default"`
|
||||
L1BlobBaseFeeDefault uint64 `json:"l1_blob_base_fee_default"`
|
||||
|
||||
// Upper limit values for gas oracle updates
|
||||
L1BaseFeeLimit uint64 `json:"l1_base_fee_limit"`
|
||||
L1BlobBaseFeeLimit uint64 `json:"l1_blob_base_fee_limit"`
|
||||
|
||||
// L1BlobBaseFeeThreshold the threshold of L1 blob base fee to enter the default gas price mode
|
||||
L1BlobBaseFeeThreshold uint64 `json:"l1_blob_base_fee_threshold"`
|
||||
}
|
||||
|
||||
@@ -167,7 +167,7 @@ func (b *BlobUploader) constructBlobCodec(dbBatch *orm.Batch) (*kzg4844.Blob, er
|
||||
Chunks: chunks,
|
||||
}
|
||||
|
||||
case encoding.CodecV7, encoding.CodecV8, encoding.CodecV9:
|
||||
case encoding.CodecV7, encoding.CodecV8, encoding.CodecV9, encoding.CodecV10:
|
||||
encodingBatch = &encoding.Batch{
|
||||
Index: dbBatch.Index,
|
||||
ParentBatchHash: common.HexToHash(dbBatch.ParentBatchHash),
|
||||
@@ -242,10 +242,12 @@ func (b *BlobUploader) GetFirstUnuploadedBatchByPlatform(ctx context.Context, st
|
||||
break
|
||||
}
|
||||
|
||||
if len(batch.CommitTxHash) == 0 {
|
||||
log.Debug("got batch not committed for blob uploading", "batch_index", batchIndex, "platform", platform.String())
|
||||
return nil, nil
|
||||
}
|
||||
// disable this check to upload blobs before it's committed. This is to
|
||||
// alleviate the case nodes try to fetch the blob from s3 before its uploaded.
|
||||
// if len(batch.CommitTxHash) == 0 {
|
||||
// log.Debug("got batch not committed for blob uploading", "batch_index", batchIndex, "platform", platform.String())
|
||||
// return nil, nil
|
||||
// }
|
||||
|
||||
return batch, nil
|
||||
}
|
||||
|
||||
@@ -173,6 +173,18 @@ func (r *Layer1Relayer) ProcessGasPriceOracle() {
|
||||
} else if err != nil {
|
||||
return
|
||||
}
|
||||
// Cap base fee update at the configured upper limit
|
||||
if limit := r.cfg.GasOracleConfig.L1BaseFeeLimit; baseFee > limit {
|
||||
log.Error("L1 base fee exceed max limit, set to max limit", "baseFee", baseFee, "maxLimit", limit)
|
||||
r.metrics.rollupL1RelayerGasPriceOracleFeeOverLimitTotal.Inc()
|
||||
baseFee = limit
|
||||
}
|
||||
// Cap blob base fee update at the configured upper limit
|
||||
if limit := r.cfg.GasOracleConfig.L1BlobBaseFeeLimit; blobBaseFee > limit {
|
||||
log.Error("L1 blob base fee exceed max limit, set to max limit", "blobBaseFee", blobBaseFee, "maxLimit", limit)
|
||||
r.metrics.rollupL1RelayerGasPriceOracleFeeOverLimitTotal.Inc()
|
||||
blobBaseFee = limit
|
||||
}
|
||||
data, err := r.l1GasOracleABI.Pack("setL1BaseFeeAndBlobBaseFee", new(big.Int).SetUint64(baseFee), new(big.Int).SetUint64(blobBaseFee))
|
||||
if err != nil {
|
||||
log.Error("Failed to pack setL1BaseFeeAndBlobBaseFee", "block.Hash", block.Hash, "block.Height", block.Number, "block.BaseFee", baseFee, "block.BlobBaseFee", blobBaseFee, "err", err)
|
||||
|
||||
@@ -8,11 +8,12 @@ import (
|
||||
)
|
||||
|
||||
type l1RelayerMetrics struct {
|
||||
rollupL1RelayerGasPriceOraclerRunTotal prometheus.Counter
|
||||
rollupL1RelayerLatestBaseFee prometheus.Gauge
|
||||
rollupL1RelayerLatestBlobBaseFee prometheus.Gauge
|
||||
rollupL1UpdateGasOracleConfirmedTotal prometheus.Counter
|
||||
rollupL1UpdateGasOracleConfirmedFailedTotal prometheus.Counter
|
||||
rollupL1RelayerGasPriceOraclerRunTotal prometheus.Counter
|
||||
rollupL1RelayerLatestBaseFee prometheus.Gauge
|
||||
rollupL1RelayerLatestBlobBaseFee prometheus.Gauge
|
||||
rollupL1UpdateGasOracleConfirmedTotal prometheus.Counter
|
||||
rollupL1UpdateGasOracleConfirmedFailedTotal prometheus.Counter
|
||||
rollupL1RelayerGasPriceOracleFeeOverLimitTotal prometheus.Counter
|
||||
}
|
||||
|
||||
var (
|
||||
@@ -43,6 +44,10 @@ func initL1RelayerMetrics(reg prometheus.Registerer) *l1RelayerMetrics {
|
||||
Name: "rollup_layer1_update_gas_oracle_confirmed_failed_total",
|
||||
Help: "The total number of updating layer1 gas oracle confirmed failed",
|
||||
}),
|
||||
rollupL1RelayerGasPriceOracleFeeOverLimitTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "rollup_layer1_gas_price_oracle_fee_over_limit_total",
|
||||
Help: "The total number of times when a gas price oracle fee update went over the configured limit",
|
||||
}),
|
||||
}
|
||||
})
|
||||
return l1RelayerMetric
|
||||
|
||||
@@ -123,19 +123,18 @@ func NewLayer2Relayer(ctx context.Context, l2Client *ethclient.Client, db *gorm.
|
||||
|
||||
switch serviceType {
|
||||
case ServiceTypeL2RollupRelayer:
|
||||
// commitSenderAddr, err := addrFromSignerConfig(cfg.CommitSenderSignerConfig)
|
||||
// if err != nil {
|
||||
// return nil, fmt.Errorf("failed to parse addr from commit sender config, err: %v", err)
|
||||
// }
|
||||
// finalizeSenderAddr, err := addrFromSignerConfig(cfg.FinalizeSenderSignerConfig)
|
||||
// if err != nil {
|
||||
// return nil, fmt.Errorf("failed to parse addr from finalize sender config, err: %v", err)
|
||||
// }
|
||||
// if commitSenderAddr == finalizeSenderAddr {
|
||||
// return nil, fmt.Errorf("commit and finalize sender addresses must be different. Got: Commit=%s, Finalize=%s", commitSenderAddr.Hex(), finalizeSenderAddr.Hex())
|
||||
// }
|
||||
commitSenderAddr, err := addrFromSignerConfig(cfg.CommitSenderSignerConfig)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse addr from commit sender config, err: %v", err)
|
||||
}
|
||||
finalizeSenderAddr, err := addrFromSignerConfig(cfg.FinalizeSenderSignerConfig)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse addr from finalize sender config, err: %v", err)
|
||||
}
|
||||
if commitSenderAddr == finalizeSenderAddr {
|
||||
return nil, fmt.Errorf("commit and finalize sender addresses must be different. Got: Commit=%s, Finalize=%s", commitSenderAddr.Hex(), finalizeSenderAddr.Hex())
|
||||
}
|
||||
|
||||
var err error
|
||||
commitSender, err = sender.NewSender(ctx, cfg.SenderConfig, cfg.CommitSenderSignerConfig, "l2_relayer", "commit_sender", types.SenderTypeCommitBatch, db, reg)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("new commit sender failed, err: %w", err)
|
||||
@@ -340,28 +339,6 @@ func (r *Layer2Relayer) commitGenesisBatch(batchHash string, batchHeader []byte,
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Layer2Relayer) RevertBatch(batchIndex uint64) error {
|
||||
batch, err := r.batchOrm.GetBatchByIndex(r.ctx, batchIndex)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get batch header by index: %v", err)
|
||||
}
|
||||
|
||||
calldata, packErr := r.l1RollupABI.Pack("revertBatch", batch.BatchHeader)
|
||||
if packErr != nil {
|
||||
return fmt.Errorf("failed to pack rollup revertBatch with batch header: %v. error: %v", common.Bytes2Hex(batch.BatchHeader), packErr)
|
||||
}
|
||||
|
||||
// submit genesis batch to L1 rollup contract
|
||||
log.Info("--------------Morty------------", "calldata", common.Bytes2Hex(calldata))
|
||||
txHash, _, err := r.commitSender.SendTransaction("revertBatch_"+batch.Hash, &r.cfg.RollupContractAddress, calldata, nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to send import genesis batch tx to L1, error: %v", err)
|
||||
}
|
||||
log.Info("RevertBatch transaction sent", "contract", r.cfg.RollupContractAddress, "txHash", txHash, "batchIndex", batch.Index, "validium", r.cfg.ValidiumMode)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ProcessPendingBatches processes the pending batches by sending commitBatch transactions to layer 1.
|
||||
// Pending batches are submitted if one of the following conditions is met:
|
||||
// - the first batch is too old -> forceSubmit
|
||||
@@ -475,6 +452,7 @@ func (r *Layer2Relayer) ProcessPendingBatches() {
|
||||
// The next call of ProcessPendingBatches will then start with the batch with the different codec version.
|
||||
batchesToSubmitLen := len(batchesToSubmit)
|
||||
if batchesToSubmitLen > 0 && batchesToSubmit[batchesToSubmitLen-1].Batch.CodecVersion != dbBatch.CodecVersion {
|
||||
forceSubmit = true
|
||||
break
|
||||
}
|
||||
|
||||
@@ -511,7 +489,7 @@ func (r *Layer2Relayer) ProcessPendingBatches() {
|
||||
|
||||
codecVersion := encoding.CodecVersion(firstBatch.CodecVersion)
|
||||
switch codecVersion {
|
||||
case encoding.CodecV7, encoding.CodecV8, encoding.CodecV9:
|
||||
case encoding.CodecV7, encoding.CodecV8, encoding.CodecV9, encoding.CodecV10:
|
||||
if r.cfg.ValidiumMode {
|
||||
if len(batchesToSubmit) != 1 {
|
||||
log.Error("validium mode only supports committing one batch at a time", "codecVersion", codecVersion, "start index", firstBatch.Index, "end index", lastBatch.Index, "batches count", len(batchesToSubmit))
|
||||
@@ -770,7 +748,7 @@ func (r *Layer2Relayer) finalizeBundle(bundle *orm.Bundle, withProof bool) error
|
||||
|
||||
var calldata []byte
|
||||
switch encoding.CodecVersion(bundle.CodecVersion) {
|
||||
case encoding.CodecV7, encoding.CodecV8, encoding.CodecV9:
|
||||
case encoding.CodecV7, encoding.CodecV8, encoding.CodecV9, encoding.CodecV10:
|
||||
if r.cfg.ValidiumMode {
|
||||
calldata, err = r.constructFinalizeBundlePayloadValidium(dbBatch, endChunk, aggProof)
|
||||
if err != nil {
|
||||
@@ -1073,7 +1051,7 @@ func (r *Layer2Relayer) constructCommitBatchPayloadValidium(batch *dbBatchWithCh
|
||||
commitment := common.HexToHash(lastChunk.EndBlockHash)
|
||||
|
||||
var version uint8
|
||||
if encoding.CodecVersion(batch.Batch.CodecVersion) == encoding.CodecV8 || encoding.CodecVersion(batch.Batch.CodecVersion) == encoding.CodecV9 {
|
||||
if encoding.CodecVersion(batch.Batch.CodecVersion) == encoding.CodecV8 || encoding.CodecVersion(batch.Batch.CodecVersion) == encoding.CodecV9 || encoding.CodecVersion(batch.Batch.CodecVersion) == encoding.CodecV10 {
|
||||
// Validium version line starts with v1,
|
||||
// but rollup-relayer behavior follows v8.
|
||||
version = 1
|
||||
@@ -1277,16 +1255,20 @@ func (r *Layer2Relayer) skipSubmitByFee(oldest time.Time, metrics *l2RelayerMetr
|
||||
target := calculateTargetPrice(windowSec, r.batchStrategy, oldest, hist)
|
||||
current := hist[len(hist)-1]
|
||||
|
||||
// apply absolute tolerance offset to target
|
||||
tolerance := new(big.Int).SetUint64(r.cfg.BatchSubmission.BlobFeeTolerance)
|
||||
threshold := new(big.Int).Add(target, tolerance)
|
||||
|
||||
currentFloat, _ := current.Float64()
|
||||
targetFloat, _ := target.Float64()
|
||||
metrics.rollupL2RelayerCurrentBlobPrice.Set(currentFloat)
|
||||
metrics.rollupL2RelayerTargetBlobPrice.Set(targetFloat)
|
||||
|
||||
// if current fee > target and still inside the timeout window, skip
|
||||
if current.Cmp(target) > 0 && time.Since(oldest) < time.Duration(windowSec)*time.Second {
|
||||
// if current fee > threshold (target + tolerance) and still inside the timeout window, skip
|
||||
if current.Cmp(threshold) > 0 && time.Since(oldest) < time.Duration(windowSec)*time.Second {
|
||||
return true, fmt.Errorf(
|
||||
"blob-fee above target & window not yet passed; current=%s target=%s age=%s",
|
||||
current.String(), target.String(), time.Since(oldest),
|
||||
"blob-fee above threshold & window not yet passed; current=%s target=%s threshold=%s tolerance=%s age=%s",
|
||||
current.String(), target.String(), threshold.String(), tolerance.String(), time.Since(oldest),
|
||||
)
|
||||
}
|
||||
|
||||
|
||||
@@ -2,6 +2,7 @@ package sender
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/big"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum"
|
||||
@@ -118,7 +119,7 @@ func (s *Sender) estimateGasLimit(to *common.Address, data []byte, sidecar *type
|
||||
|
||||
gasLimitWithoutAccessList, err := s.client.EstimateGas(s.ctx, msg)
|
||||
if err != nil {
|
||||
log.Error("estimateGasLimit EstimateGas failure without access list", "error", err)
|
||||
log.Error("estimateGasLimit EstimateGas failure without access list", "error", err, "msg", fmt.Sprintf("%+v", msg))
|
||||
return 0, nil, err
|
||||
}
|
||||
|
||||
|
||||
@@ -13,7 +13,7 @@ import (
|
||||
"github.com/holiman/uint256"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/consensus/misc"
|
||||
"github.com/scroll-tech/go-ethereum/common/hexutil"
|
||||
gethTypes "github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/scroll-tech/go-ethereum/crypto/kzg4844"
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
@@ -28,7 +28,6 @@ import (
|
||||
"scroll-tech/rollup/internal/config"
|
||||
"scroll-tech/rollup/internal/orm"
|
||||
"scroll-tech/rollup/internal/utils"
|
||||
cutils "scroll-tech/common/utils"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -68,7 +67,8 @@ type FeeData struct {
|
||||
// Sender Transaction sender to send transaction to l1/l2
|
||||
type Sender struct {
|
||||
config *config.SenderConfig
|
||||
gethClient *gethclient.Client
|
||||
rpcClient *rpc.Client // Raw RPC client
|
||||
gethClient *gethclient.Client // Client to use for CreateAccessList
|
||||
client *ethclient.Client // The client to retrieve on chain data (read-only)
|
||||
writeClients []*ethclient.Client // The clients to send transactions to (write operations)
|
||||
transactionSigner *TransactionSigner
|
||||
@@ -142,6 +142,7 @@ func NewSender(ctx context.Context, config *config.SenderConfig, signerConfig *c
|
||||
sender := &Sender{
|
||||
ctx: ctx,
|
||||
config: config,
|
||||
rpcClient: rpcClient,
|
||||
gethClient: gethclient.New(rpcClient),
|
||||
client: client,
|
||||
writeClients: writeClients,
|
||||
@@ -321,13 +322,6 @@ func (s *Sender) SendTransaction(contextID string, target *common.Address, data
|
||||
version = gethTypes.BlobSidecarVersion1
|
||||
}
|
||||
|
||||
versionedBlobHash, err := cutils.CalculateVersionedBlobHash(*blobs[0])
|
||||
if err != nil {
|
||||
log.Error("failed to calculate versioned blob hash", "err", err)
|
||||
return common.Hash{}, 0, fmt.Errorf("failed to calculate versioned blob hash, err: %w", err)
|
||||
}
|
||||
log.Info("--------------Morty------------", "versionedBlobHash", common.Bytes2Hex(versionedBlobHash[:]))
|
||||
|
||||
sidecar, err = makeSidecar(version, blobs)
|
||||
if err != nil {
|
||||
log.Error("failed to make sidecar for blob transaction", "error", err)
|
||||
@@ -356,13 +350,6 @@ func (s *Sender) SendTransaction(contextID string, target *common.Address, data
|
||||
return common.Hash{}, 0, fmt.Errorf("failed to insert transaction, err: %w", err)
|
||||
}
|
||||
|
||||
rawTx, err := signedTx.MarshalBinary()
|
||||
if err != nil {
|
||||
log.Error("failed to marshal signed tx", "err", err)
|
||||
return common.Hash{}, 0, fmt.Errorf("failed to marshal signed tx, err: %w", err)
|
||||
}
|
||||
log.Info("--------------Morty------------", "rawTx", common.Bytes2Hex(rawTx))
|
||||
|
||||
if err := s.sendTransactionToMultipleClients(signedTx); err != nil {
|
||||
// Delete the transaction from the pending transaction table if it fails to send.
|
||||
if updateErr := s.pendingTransactionOrm.DeleteTransactionByTxHash(s.ctx, signedTx.Hash()); updateErr != nil {
|
||||
@@ -601,7 +588,6 @@ func (s *Sender) createReplacingTransaction(tx *gethTypes.Transaction, baseFee,
|
||||
|
||||
// but don't exceed maxGasPrice
|
||||
if gasFeeCap.Cmp(maxGasPrice) > 0 {
|
||||
log.Info("adjusted gas fee cap to max gas price", "original", originalGasFeeCap.Uint64(), "gasFeeCap", gasFeeCap.Uint64(), "maxGasPrice", maxGasPrice.Uint64())
|
||||
gasFeeCap = maxGasPrice
|
||||
}
|
||||
|
||||
@@ -618,7 +604,6 @@ func (s *Sender) createReplacingTransaction(tx *gethTypes.Transaction, baseFee,
|
||||
|
||||
// but don't exceed maxBlobGasPrice
|
||||
if blobGasFeeCap.Cmp(maxBlobGasPrice) > 0 {
|
||||
log.Info("adjusted blob gas fee cap to max blob gas price", "original", originalBlobGasFeeCap.Uint64(), "blobGasFeeCap", blobGasFeeCap.Uint64(), "maxBlobGasPrice", maxBlobGasPrice.Uint64())
|
||||
blobGasFeeCap = maxBlobGasPrice
|
||||
}
|
||||
|
||||
@@ -695,8 +680,6 @@ func (s *Sender) checkPendingTransaction() {
|
||||
receipt, err := s.client.TransactionReceipt(s.ctx, originalTx.Hash())
|
||||
if err == nil { // tx confirmed.
|
||||
if receipt.BlockNumber.Uint64() <= confirmed {
|
||||
// Record metrics before updating the database
|
||||
|
||||
if dbTxErr := s.db.Transaction(func(dbTX *gorm.DB) error {
|
||||
// Update the status of the transaction to TxStatusConfirmed.
|
||||
if updateErr := s.pendingTransactionOrm.UpdateTransactionStatusByTxHash(s.ctx, originalTx.Hash(), types.TxStatusConfirmed, dbTX); updateErr != nil {
|
||||
@@ -860,8 +843,19 @@ func (s *Sender) getBlockNumberAndTimestampAndBaseFeeAndBlobFee(ctx context.Cont
|
||||
|
||||
var blobBaseFee uint64
|
||||
if excess := header.ExcessBlobGas; excess != nil {
|
||||
blobBaseFee = misc.CalcBlobFee(*excess).Uint64()
|
||||
// Leave it up to the L1 node to compute the correct blob base fee.
|
||||
// Previously we would compute it locally using `CalcBlobFee`, but
|
||||
// that approach requires syncing any future L1 configuration changes.
|
||||
// Note: The fetched blob base fee might not correspond to the block
|
||||
// that we fetched in the previous step, but this is acceptable.
|
||||
var blobBaseFeeHex hexutil.Big
|
||||
if err := s.rpcClient.CallContext(ctx, &blobBaseFeeHex, "eth_blobBaseFee"); err != nil {
|
||||
return 0, 0, 0, 0, fmt.Errorf("failed to call eth_blobBaseFee, err: %w", err)
|
||||
}
|
||||
// A correct L1 node could not return a value that overflows uint64
|
||||
blobBaseFee = blobBaseFeeHex.ToInt().Uint64()
|
||||
}
|
||||
|
||||
// header.Number.Uint64() returns the pendingBlockNumber, so we minus 1 to get the latestBlockNumber.
|
||||
return header.Number.Uint64() - 1, header.Time, baseFee, blobBaseFee, nil
|
||||
}
|
||||
|
||||
@@ -19,8 +19,6 @@ type senderMetrics struct {
|
||||
currentGasPrice *prometheus.GaugeVec
|
||||
currentBlobGasFeeCap *prometheus.GaugeVec
|
||||
currentGasLimit *prometheus.GaugeVec
|
||||
txConfirmationLatency *prometheus.HistogramVec
|
||||
txResendCount *prometheus.HistogramVec
|
||||
}
|
||||
|
||||
var (
|
||||
|
||||
@@ -21,6 +21,7 @@ import (
|
||||
gethTypes "github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/scroll-tech/go-ethereum/crypto"
|
||||
"github.com/scroll-tech/go-ethereum/crypto/kzg4844"
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"github.com/scroll-tech/go-ethereum/rpc"
|
||||
"github.com/stretchr/testify/assert"
|
||||
@@ -94,8 +95,9 @@ func setupEnv(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, migrate.ResetDB(sqlDB))
|
||||
|
||||
l1Client, err := testApps.GetPoSL1Client()
|
||||
l1RawClient, err := testApps.GetPoSL1Client()
|
||||
assert.NoError(t, err)
|
||||
l1Client := ethclient.NewClient(l1RawClient)
|
||||
|
||||
chainID, err := l1Client.ChainID(context.Background())
|
||||
assert.NoError(t, err)
|
||||
|
||||
@@ -245,11 +245,13 @@ func (p *ChunkProposer) ProposeChunk() error {
|
||||
// Ensure all blocks in the same chunk use the same hardfork name
|
||||
// If a different hardfork name is found, truncate the blocks slice at that point
|
||||
hardforkName := encoding.GetHardforkName(p.chainCfg, blocks[0].Header.Number.Uint64(), blocks[0].Header.Time)
|
||||
hardforkBoundary := false
|
||||
for i := 1; i < len(blocks); i++ {
|
||||
currentHardfork := encoding.GetHardforkName(p.chainCfg, blocks[i].Header.Number.Uint64(), blocks[i].Header.Time)
|
||||
if currentHardfork != hardforkName {
|
||||
blocks = blocks[:i]
|
||||
// Truncate blocks at hardfork boundary
|
||||
blocks = blocks[:i]
|
||||
hardforkBoundary = true
|
||||
break
|
||||
}
|
||||
}
|
||||
@@ -321,6 +323,19 @@ func (p *ChunkProposer) ProposeChunk() error {
|
||||
return fmt.Errorf("failed to calculate chunk metrics: %w", calcErr)
|
||||
}
|
||||
|
||||
// No breaking condition met, but hardfork boundary reached
|
||||
if hardforkBoundary {
|
||||
log.Info("hardfork boundary reached, proposing chunk",
|
||||
"block count", len(chunk.Blocks),
|
||||
"codec version", codecVersion,
|
||||
"start block number", chunk.Blocks[0].Header.Number,
|
||||
"end block number", chunk.Blocks[len(chunk.Blocks)-1].Header.Number)
|
||||
|
||||
p.recordAllChunkMetrics(metrics)
|
||||
return p.updateDBChunkInfo(&chunk, codecVersion, metrics)
|
||||
}
|
||||
|
||||
// No breaking condition met, check for timeout
|
||||
currentTimeSec := uint64(time.Now().Unix())
|
||||
if metrics.FirstBlockTimestamp+p.cfg.ChunkTimeoutSec < currentTimeSec {
|
||||
log.Info("first block timeout reached",
|
||||
|
||||
@@ -19,6 +19,8 @@ import (
|
||||
"scroll-tech/rollup/internal/utils"
|
||||
)
|
||||
|
||||
func newUint64(val uint64) *uint64 { return &val }
|
||||
|
||||
func testChunkProposerLimitsCodecV7(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
@@ -26,6 +28,7 @@ func testChunkProposerLimitsCodecV7(t *testing.T) {
|
||||
chunkTimeoutSec uint64
|
||||
expectedChunksLen int
|
||||
expectedBlocksInFirstChunk int // only be checked when expectedChunksLen > 0
|
||||
GalileoTime *uint64
|
||||
}{
|
||||
{
|
||||
name: "NoLimitReached",
|
||||
@@ -62,6 +65,14 @@ func testChunkProposerLimitsCodecV7(t *testing.T) {
|
||||
expectedChunksLen: 1,
|
||||
expectedBlocksInFirstChunk: 1,
|
||||
},
|
||||
{
|
||||
name: "SingleBlockByForkBoundary",
|
||||
maxL2Gas: 20_000_000,
|
||||
chunkTimeoutSec: 1000000000000,
|
||||
expectedChunksLen: 1,
|
||||
expectedBlocksInFirstChunk: 1,
|
||||
GalileoTime: newUint64(1669364525), // timestamp of `block2`
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
@@ -78,11 +89,26 @@ func testChunkProposerLimitsCodecV7(t *testing.T) {
|
||||
_, err = chunkOrm.InsertChunk(context.Background(), &encoding.Chunk{Blocks: []*encoding.Block{{Header: &gethTypes.Header{Number: big.NewInt(0)}}}}, encoding.CodecV0, utils.ChunkMetrics{})
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Initialize the chunk proposer.
|
||||
chainConfig := ¶ms.ChainConfig{
|
||||
LondonBlock: big.NewInt(0),
|
||||
BernoulliBlock: big.NewInt(0),
|
||||
CurieBlock: big.NewInt(0),
|
||||
DarwinTime: new(uint64),
|
||||
DarwinV2Time: new(uint64),
|
||||
EuclidTime: new(uint64),
|
||||
EuclidV2Time: new(uint64),
|
||||
FeynmanTime: new(uint64),
|
||||
GalileoTime: tt.GalileoTime,
|
||||
}
|
||||
|
||||
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
|
||||
MaxL2GasPerChunk: tt.maxL2Gas,
|
||||
ChunkTimeoutSec: tt.chunkTimeoutSec,
|
||||
MaxUncompressedBatchBytesSize: math.MaxUint64,
|
||||
}, encoding.CodecV7, ¶ms.ChainConfig{LondonBlock: big.NewInt(0), BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), DarwinTime: new(uint64), DarwinV2Time: new(uint64), EuclidTime: new(uint64), EuclidV2Time: new(uint64)}, db, nil)
|
||||
}, encoding.CodecV7, chainConfig, db, nil)
|
||||
|
||||
// Run one round of chunk proposing.
|
||||
cp.TryProposeChunk()
|
||||
|
||||
chunks, err := chunkOrm.GetChunksGEIndex(context.Background(), 1, 0)
|
||||
|
||||
@@ -3,13 +3,15 @@ package watcher
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/big"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/scroll-tech/go-ethereum/consensus/misc"
|
||||
"github.com/scroll-tech/go-ethereum/common/hexutil"
|
||||
gethTypes "github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"github.com/scroll-tech/go-ethereum/rpc"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/common/types"
|
||||
@@ -20,7 +22,8 @@ import (
|
||||
// L1WatcherClient will listen for smart contract events from Eth L1.
|
||||
type L1WatcherClient struct {
|
||||
ctx context.Context
|
||||
client *ethclient.Client
|
||||
rpcClient *rpc.Client // Raw RPC client
|
||||
client *ethclient.Client // Go SDK RPC client
|
||||
l1BlockOrm *orm.L1Block
|
||||
|
||||
// The height of the block that the watcher has retrieved header rlp
|
||||
@@ -30,7 +33,7 @@ type L1WatcherClient struct {
|
||||
}
|
||||
|
||||
// NewL1WatcherClient returns a new instance of L1WatcherClient.
|
||||
func NewL1WatcherClient(ctx context.Context, client *ethclient.Client, startHeight uint64, db *gorm.DB, reg prometheus.Registerer) *L1WatcherClient {
|
||||
func NewL1WatcherClient(ctx context.Context, rpcClient *rpc.Client, startHeight uint64, db *gorm.DB, reg prometheus.Registerer) *L1WatcherClient {
|
||||
l1BlockOrm := orm.NewL1Block(db)
|
||||
savedL1BlockHeight, err := l1BlockOrm.GetLatestL1BlockHeight(ctx)
|
||||
if err != nil {
|
||||
@@ -43,7 +46,8 @@ func NewL1WatcherClient(ctx context.Context, client *ethclient.Client, startHeig
|
||||
|
||||
return &L1WatcherClient{
|
||||
ctx: ctx,
|
||||
client: client,
|
||||
rpcClient: rpcClient,
|
||||
client: ethclient.NewClient(rpcClient),
|
||||
l1BlockOrm: l1BlockOrm,
|
||||
|
||||
processedBlockHeight: savedL1BlockHeight,
|
||||
@@ -80,7 +84,17 @@ func (w *L1WatcherClient) FetchBlockHeader(blockHeight uint64) error {
|
||||
|
||||
var blobBaseFee uint64
|
||||
if excess := block.ExcessBlobGas; excess != nil {
|
||||
blobBaseFee = misc.CalcBlobFee(*excess).Uint64()
|
||||
// Leave it up to the L1 node to compute the correct blob base fee.
|
||||
// Previously we would compute it locally using `CalcBlobFee`, but
|
||||
// that approach requires syncing any future L1 configuration changes.
|
||||
// Note: The fetched blob base fee might not correspond to the block
|
||||
// that we fetched in the previous step, but this is acceptable.
|
||||
var blobBaseFeeHex hexutil.Big
|
||||
if err := w.rpcClient.CallContext(w.ctx, &blobBaseFeeHex, "eth_blobBaseFee"); err != nil {
|
||||
return fmt.Errorf("failed to call eth_blobBaseFee, err: %w", err)
|
||||
}
|
||||
// A correct L1 node could not return a value that overflows uint64
|
||||
blobBaseFee = blobBaseFeeHex.ToInt().Uint64()
|
||||
}
|
||||
|
||||
l1Block := orm.L1Block{
|
||||
|
||||
@@ -21,10 +21,10 @@ import (
|
||||
|
||||
func setupL1Watcher(t *testing.T) (*L1WatcherClient, *gorm.DB) {
|
||||
db := setupDB(t)
|
||||
client, err := testApps.GetPoSL1Client()
|
||||
l1RawClient, err := testApps.GetPoSL1Client()
|
||||
assert.NoError(t, err)
|
||||
l1Cfg := cfg.L1Config
|
||||
watcher := NewL1WatcherClient(context.Background(), client, l1Cfg.StartHeight, db, nil)
|
||||
watcher := NewL1WatcherClient(context.Background(), l1RawClient, l1Cfg.StartHeight, db, nil)
|
||||
return watcher, db
|
||||
}
|
||||
|
||||
|
||||
@@ -251,32 +251,3 @@ func (o *PendingTransaction) GetMaxNonceBySenderAddress(ctx context.Context, sen
|
||||
|
||||
return result.Nonce, nil
|
||||
}
|
||||
|
||||
// GetTransactionByHash retrieves a transaction by its hash.
|
||||
func (o *PendingTransaction) GetTransactionByHash(ctx context.Context, hash common.Hash) (*PendingTransaction, error) {
|
||||
var transaction PendingTransaction
|
||||
db := o.db.WithContext(ctx)
|
||||
db = db.Model(&PendingTransaction{})
|
||||
db = db.Where("hash = ?", hash.String())
|
||||
if err := db.First(&transaction).Error; err != nil {
|
||||
if errors.Is(err, gorm.ErrRecordNotFound) {
|
||||
return nil, fmt.Errorf("transaction not found with hash: %s", hash.String())
|
||||
}
|
||||
return nil, fmt.Errorf("failed to get transaction by hash, hash: %v, err: %w", hash, err)
|
||||
}
|
||||
return &transaction, nil
|
||||
}
|
||||
|
||||
// CountTransactionsByContextIDAndNonce counts the number of transactions with the same context_id and nonce.
|
||||
// This is useful for tracking how many times a transaction has been resent.
|
||||
func (o *PendingTransaction) CountTransactionsByContextIDAndNonce(ctx context.Context, contextID string, nonce uint64) (int64, error) {
|
||||
var count int64
|
||||
db := o.db.WithContext(ctx)
|
||||
db = db.Model(&PendingTransaction{})
|
||||
db = db.Where("context_id = ?", contextID)
|
||||
db = db.Where("nonce = ?", nonce)
|
||||
if err := db.Count(&count).Error; err != nil {
|
||||
return 0, fmt.Errorf("failed to count transactions by context_id and nonce, context_id: %s, nonce: %d, err: %w", contextID, nonce, err)
|
||||
}
|
||||
return count, nil
|
||||
}
|
||||
|
||||
@@ -186,7 +186,7 @@ func encodeBatchHeaderValidium(b *encoding.Batch, codecVersion encoding.CodecVer
|
||||
)
|
||||
|
||||
var version uint8
|
||||
if codecVersion == encoding.CodecV8 || codecVersion == encoding.CodecV9 {
|
||||
if codecVersion == encoding.CodecV8 || codecVersion == encoding.CodecV9 || codecVersion == encoding.CodecV10 {
|
||||
// Validium version line starts with v1,
|
||||
// but rollup-relayer behavior follows v8.
|
||||
version = 1
|
||||
|
||||
@@ -19,6 +19,7 @@ import (
|
||||
"github.com/scroll-tech/go-ethereum/crypto"
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"github.com/scroll-tech/go-ethereum/rpc"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"gorm.io/gorm"
|
||||
|
||||
@@ -37,8 +38,9 @@ var (
|
||||
rollupApp *bcmd.MockApp
|
||||
|
||||
// clients
|
||||
l1Client *ethclient.Client
|
||||
l2Client *ethclient.Client
|
||||
l1RawClient *rpc.Client
|
||||
l1Client *ethclient.Client
|
||||
l2Client *ethclient.Client
|
||||
|
||||
l1Auth *bind.TransactOpts
|
||||
l2Auth *bind.TransactOpts
|
||||
@@ -91,8 +93,9 @@ func setupEnv(t *testing.T) {
|
||||
assert.NoError(t, testApps.StartPoSL1Container())
|
||||
rollupApp = bcmd.NewRollupApp(testApps, "../conf/config.json")
|
||||
|
||||
l1Client, err = testApps.GetPoSL1Client()
|
||||
l1RawClient, err = testApps.GetPoSL1Client()
|
||||
assert.NoError(t, err)
|
||||
l1Client = ethclient.NewClient(l1RawClient)
|
||||
l2Client, err = testApps.GetL2GethClient()
|
||||
assert.NoError(t, err)
|
||||
l1GethChainID, err = l1Client.ChainID(context.Background())
|
||||
|
||||
@@ -36,7 +36,7 @@ func testImportL1GasPrice(t *testing.T) {
|
||||
// Create L1Watcher
|
||||
startHeight, err := l1Client.BlockNumber(context.Background())
|
||||
assert.NoError(t, err)
|
||||
l1Watcher := watcher.NewL1WatcherClient(context.Background(), l1Client, startHeight-1, db, nil)
|
||||
l1Watcher := watcher.NewL1WatcherClient(context.Background(), l1RawClient, startHeight-1, db, nil)
|
||||
|
||||
// fetch new blocks
|
||||
number, err := l1Client.BlockNumber(context.Background())
|
||||
@@ -110,7 +110,7 @@ func testImportDefaultL1GasPriceDueToL1GasPriceSpike(t *testing.T) {
|
||||
// Create L1Watcher
|
||||
startHeight, err := l1Client.BlockNumber(context.Background())
|
||||
assert.NoError(t, err)
|
||||
l1Watcher := watcher.NewL1WatcherClient(context.Background(), l1Client, startHeight-2, db, nil)
|
||||
l1Watcher := watcher.NewL1WatcherClient(context.Background(), l1RawClient, startHeight-2, db, nil)
|
||||
|
||||
// fetch new blocks
|
||||
number, err := l1Client.BlockNumber(context.Background())
|
||||
|
||||
84
rollup/tests/integration_tool/block_fetching.go
Normal file
84
rollup/tests/integration_tool/block_fetching.go
Normal file
@@ -0,0 +1,84 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math/big"
|
||||
|
||||
"github.com/scroll-tech/da-codec/encoding"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"github.com/scroll-tech/go-ethereum/rpc"
|
||||
)
|
||||
|
||||
func fetchAndStoreBlocks(ctx context.Context, from, to uint64) ([]*encoding.Block, error) {
|
||||
validiumMode := cfg.ValidiumMode
|
||||
cfg := cfg.FetchConfig
|
||||
client, err := rpc.Dial(cfg.Endpoint)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to connect l2 geth, endpoint %s, err %v", cfg.Endpoint, err)
|
||||
}
|
||||
defer client.Close()
|
||||
|
||||
ethCli := ethclient.NewClient(client)
|
||||
var blocks []*encoding.Block
|
||||
for number := from; number <= to; number++ {
|
||||
log.Debug("retrieving block", "height", number)
|
||||
block, err := ethCli.BlockByNumber(ctx, new(big.Int).SetUint64(number))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to BlockByNumber: %v. number: %v", err, number)
|
||||
}
|
||||
|
||||
blockTxs := block.Transactions()
|
||||
|
||||
var count int
|
||||
for _, tx := range blockTxs {
|
||||
if tx.IsL1MessageTx() {
|
||||
count++
|
||||
}
|
||||
}
|
||||
log.Info("retrieved block", "height", block.Header().Number, "hash", block.Header().Hash().String(), "L1 message count", count)
|
||||
|
||||
// use original (encrypted) L1 message txs in validium mode
|
||||
if validiumMode {
|
||||
var txs []*types.Transaction
|
||||
|
||||
if count > 0 {
|
||||
log.Info("Fetching encrypted messages in validium mode")
|
||||
err = client.CallContext(ctx, &txs, "scroll_getL1MessagesInBlock", block.Hash(), "synced")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get L1 messages: %v, block hash: %v", err, block.Hash().Hex())
|
||||
}
|
||||
}
|
||||
|
||||
// sanity check
|
||||
if len(txs) != count {
|
||||
return nil, fmt.Errorf("L1 message count mismatch: expected %d, got %d", count, len(txs))
|
||||
}
|
||||
|
||||
for ii := 0; ii < count; ii++ {
|
||||
// sanity check
|
||||
if blockTxs[ii].AsL1MessageTx().QueueIndex != txs[ii].AsL1MessageTx().QueueIndex {
|
||||
return nil, fmt.Errorf("L1 message queue index mismatch at index %d: expected %d, got %d", ii, blockTxs[ii].AsL1MessageTx().QueueIndex, txs[ii].AsL1MessageTx().QueueIndex)
|
||||
}
|
||||
|
||||
log.Info("Replacing L1 message tx in validium mode", "index", ii, "queueIndex", txs[ii].AsL1MessageTx().QueueIndex, "decryptedTxHash", blockTxs[ii].Hash().Hex(), "originalTxHash", txs[ii].Hash().Hex())
|
||||
blockTxs[ii] = txs[ii]
|
||||
}
|
||||
}
|
||||
|
||||
withdrawRoot, err3 := ethCli.StorageAt(ctx, cfg.L2MessageQueueAddress, cfg.WithdrawTrieRootSlot, big.NewInt(int64(number)))
|
||||
if err3 != nil {
|
||||
return nil, fmt.Errorf("failed to get withdrawRoot: %v. number: %v", err3, number)
|
||||
}
|
||||
blocks = append(blocks, &encoding.Block{
|
||||
Header: block.Header(),
|
||||
Transactions: encoding.TxsToTxsData(blockTxs),
|
||||
WithdrawRoot: common.BytesToHash(withdrawRoot),
|
||||
})
|
||||
}
|
||||
|
||||
return blocks, nil
|
||||
}
|
||||
@@ -42,13 +42,21 @@ func randomPickKfromN(n, k int, rng *rand.Rand) []int {
|
||||
return ret
|
||||
}
|
||||
|
||||
func importData(ctx context.Context, beginBlk, endBlk uint64, chkNum, batchNum, bundleNum int, seed int64) (*importRecord, error) {
|
||||
func importData(ctx context.Context, beginBlk, endBlk uint64, blocks []*encoding.Block, chkNum, batchNum, bundleNum int, seed int64) (*importRecord, error) {
|
||||
|
||||
db, err := database.InitDB(cfg.DBConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(blocks) > 0 {
|
||||
log.Info("import block")
|
||||
blockOrm := orm.NewL2Block(db)
|
||||
if err := blockOrm.InsertL2Blocks(ctx, blocks); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
ret := &importRecord{}
|
||||
// Create a new random source with the provided seed
|
||||
source := rand.NewSource(seed)
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/scroll-tech/da-codec/encoding"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"github.com/urfave/cli/v2"
|
||||
|
||||
@@ -40,12 +41,6 @@ var seedFlag = cli.Int64Flag{
|
||||
Value: 0,
|
||||
}
|
||||
|
||||
var codecFlag = cli.IntFlag{
|
||||
Name: "codec",
|
||||
Usage: "codec version, valid from 6, default(auto) is 0",
|
||||
Value: 0,
|
||||
}
|
||||
|
||||
func parseThreeIntegers(value string) (int, int, int, error) {
|
||||
// Split the input string by comma
|
||||
parts := strings.Split(value, ",")
|
||||
@@ -84,10 +79,21 @@ func parseThreeIntegers(value string) (int, int, int, error) {
|
||||
return values[0], values[1], values[2], nil
|
||||
}
|
||||
|
||||
type fetchConfig struct {
|
||||
// node url.
|
||||
Endpoint string `json:"endpoint"`
|
||||
// The L2MessageQueue contract address deployed on layer 2 chain.
|
||||
L2MessageQueueAddress common.Address `json:"l2_message_queue_address"`
|
||||
// The WithdrawTrieRootSlot in L2MessageQueue contract.
|
||||
WithdrawTrieRootSlot common.Hash `json:"withdraw_trie_root_slot,omitempty"`
|
||||
}
|
||||
|
||||
// load a comptabile type of config for rollup
|
||||
type config struct {
|
||||
DBConfig *database.Config `json:"db_config"`
|
||||
FetchConfig *fetchConfig `json:"fetch_config,omitempty"`
|
||||
ValidiumMode bool `json:"validium_mode"`
|
||||
CodecVersion int `json:"codec_version"`
|
||||
}
|
||||
|
||||
func init() {
|
||||
@@ -97,7 +103,7 @@ func init() {
|
||||
app.Name = "integration-test-tool"
|
||||
app.Usage = "The Scroll L2 Integration Test Tool"
|
||||
app.Version = version.Version
|
||||
app.Flags = append(app.Flags, &codecFlag, &seedFlag, &outputNumFlag, &outputPathFlag)
|
||||
app.Flags = append(app.Flags, &seedFlag, &outputNumFlag, &outputPathFlag)
|
||||
app.Flags = append(app.Flags, utils.CommonFlags...)
|
||||
app.Before = func(ctx *cli.Context) error {
|
||||
if err := utils.LogSetup(ctx); err != nil {
|
||||
@@ -120,13 +126,13 @@ func newConfig(file string) (*config, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cfg := &config{}
|
||||
err = json.Unmarshal(buf, cfg)
|
||||
loadCfg := &config{}
|
||||
err = json.Unmarshal(buf, loadCfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return cfg, nil
|
||||
return loadCfg, nil
|
||||
}
|
||||
|
||||
func action(ctx *cli.Context) error {
|
||||
@@ -135,9 +141,8 @@ func action(ctx *cli.Context) error {
|
||||
return fmt.Errorf("specify begin and end block number")
|
||||
}
|
||||
|
||||
codecFl := ctx.Int(codecFlag.Name)
|
||||
if codecFl != 0 {
|
||||
switch codecFl {
|
||||
if cfg.CodecVersion != 0 {
|
||||
switch cfg.CodecVersion {
|
||||
case 6:
|
||||
codecCfg = encoding.CodecV6
|
||||
case 7:
|
||||
@@ -146,8 +151,10 @@ func action(ctx *cli.Context) error {
|
||||
codecCfg = encoding.CodecV8
|
||||
case 9:
|
||||
codecCfg = encoding.CodecV9
|
||||
case 10:
|
||||
codecCfg = encoding.CodecV10
|
||||
default:
|
||||
return fmt.Errorf("invalid codec version %d", codecFl)
|
||||
return fmt.Errorf("invalid codec version %d", cfg.CodecVersion)
|
||||
}
|
||||
log.Info("set codec", "version", codecCfg)
|
||||
}
|
||||
@@ -161,6 +168,14 @@ func action(ctx *cli.Context) error {
|
||||
return fmt.Errorf("invalid begin block number: %w", err)
|
||||
}
|
||||
|
||||
var import_blocks []*encoding.Block
|
||||
if cfg.FetchConfig != nil {
|
||||
import_blocks, err = fetchAndStoreBlocks(ctx.Context, beginBlk, endBlk)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
chkNum, batchNum, bundleNum, err := parseThreeIntegers(ctx.String(outputNumFlag.Name))
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -174,7 +189,7 @@ func action(ctx *cli.Context) error {
|
||||
|
||||
outputPath := ctx.String(outputPathFlag.Name)
|
||||
log.Info("output", "Seed", seed, "file", outputPath)
|
||||
ret, err := importData(ctx.Context, beginBlk, endBlk, chkNum, batchNum, bundleNum, seed)
|
||||
ret, err := importData(ctx.Context, beginBlk, endBlk, import_blocks, chkNum, batchNum, bundleNum, seed)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -5,8 +5,8 @@ go 1.22
|
||||
toolchain go1.22.2
|
||||
|
||||
require (
|
||||
github.com/scroll-tech/da-codec v0.9.0
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20251117065849-b5c3dd1aa975
|
||||
github.com/scroll-tech/da-codec v0.10.0
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20251128092113-8629f088d78f
|
||||
github.com/stretchr/testify v1.10.0
|
||||
gorm.io/gorm v1.25.7-0.20240204074919-46816ad31dde
|
||||
)
|
||||
|
||||
@@ -93,10 +93,10 @@ github.com/rjeczalik/notify v0.9.1 h1:CLCKso/QK1snAlnhNR/CNvNiFU2saUtjV0bx3EwNeC
|
||||
github.com/rjeczalik/notify v0.9.1/go.mod h1:rKwnCoCGeuQnwBtTSPL9Dad03Vh2n40ePRrjvIXnJho=
|
||||
github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
|
||||
github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
|
||||
github.com/scroll-tech/da-codec v0.9.0 h1:UvHNdSJuVhi/j9pOH7SXkRck52/zeE8T35Fx6qTPW70=
|
||||
github.com/scroll-tech/da-codec v0.9.0/go.mod h1:w+vwIvNiWdiNHVE9yIY2Klx6G4s+SQhEJVSmVG/IsEQ=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20251117065849-b5c3dd1aa975 h1:KluZffkRRJ4K9UyvH/r2g8Lp16/NSK8j26MR33hHmoQ=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20251117065849-b5c3dd1aa975/go.mod h1:6BVek7YliYh+YeHOSjguPw9GT9BhVBfThArxzVlpqdQ=
|
||||
github.com/scroll-tech/da-codec v0.10.0 h1:IPHxyTyXTWPV0Q+DZ08cod2fWkhUvrfysmj/VBpB+WU=
|
||||
github.com/scroll-tech/da-codec v0.10.0/go.mod h1:MBlIP4wCXPcUDZ/Ci2B7n/2IbVU1WBo9OTFTZ5ffE0U=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20251128092113-8629f088d78f h1:j6SjP98MoWFFX9TwB1/nFYEkayqHQsrtE66Ll2C+oT0=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20251128092113-8629f088d78f/go.mod h1:Aa/kD1XB+OV/7rRxMQrjcPCB4b0pKyLH0gsTrtuHi38=
|
||||
github.com/scroll-tech/zktrie v0.8.4 h1:UagmnZ4Z3ITCk+aUq9NQZJNAwnWl4gSxsLb2Nl7IgRE=
|
||||
github.com/scroll-tech/zktrie v0.8.4/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
|
||||
github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKlUeu/erjjvaPEYiI=
|
||||
|
||||
@@ -9,6 +9,9 @@ ifndef END_BLOCK
|
||||
$(error END_BLOCK is not set. Define it in .make.env or pass END_BLOCK=<end_block>)
|
||||
endif
|
||||
|
||||
BLOCK_PRE_MIGRATIONS := $(wildcard conf/*.sql)
|
||||
.OPTIONAL: $(BLOCK_PRE_MIGRATIONS)
|
||||
|
||||
all: setup_db test_tool import_data
|
||||
|
||||
clean:
|
||||
@@ -25,6 +28,11 @@ check_vars: | conf
|
||||
exit 1; \
|
||||
fi
|
||||
|
||||
migration_blocks: $(BLOCK_PRE_MIGRATIONS)
|
||||
ifneq ($(strip $(BLOCK_PRE_MIGRATIONS)),)
|
||||
GOOSE_MIGRATION_DIR=conf ${GOOSE_CMD} up-to 100
|
||||
endif
|
||||
|
||||
setup_db: clean
|
||||
docker compose up --detach
|
||||
@echo "Waiting for PostgreSQL to be ready..."
|
||||
@@ -42,19 +50,21 @@ setup_db: clean
|
||||
fi; \
|
||||
done
|
||||
${GOOSE_CMD} up
|
||||
GOOSE_MIGRATION_DIR=conf ${GOOSE_CMD} up-to 100
|
||||
|
||||
reset_db:
|
||||
${GOOSE_CMD} down-to 0
|
||||
${GOOSE_CMD} up
|
||||
|
||||
test_tool:
|
||||
go build -o $(PWD)/build/bin/e2e_tool ../../rollup/tests/integration_tool
|
||||
|
||||
build/bin/e2e_tool: test_tool
|
||||
|
||||
import_data_euclid: build/bin/e2e_tool check_vars
|
||||
build/bin/e2e_tool --config conf/config.json --codec 7 ${BEGIN_BLOCK} ${END_BLOCK}
|
||||
import_data: build/bin/e2e_tool check_vars migration_blocks
|
||||
build/bin/e2e_tool --config conf/config.json ${BEGIN_BLOCK} ${END_BLOCK}
|
||||
|
||||
import_data: build/bin/e2e_tool check_vars
|
||||
build/bin/e2e_tool --config conf/config.json --codec 8 ${BEGIN_BLOCK} ${END_BLOCK}
|
||||
reimport_data: reset_db import_data
|
||||
|
||||
coordinator_setup:
|
||||
$(MAKE) -C ../../coordinator localsetup
|
||||
SCROLL_FORK_NAME=${SCROLL_FORK_NAME} $(MAKE) -C ../../coordinator localsetup
|
||||
cp -f conf/genesis.json ../../coordinator/build/bin/conf
|
||||
@@ -1,2 +1,3 @@
|
||||
BEGIN_BLOCK?=35
|
||||
END_BLOCK?=49
|
||||
END_BLOCK?=49
|
||||
SCROLL_FORK_NAME=feynman
|
||||
@@ -5,5 +5,6 @@
|
||||
"maxOpenNum": 5,
|
||||
"maxIdleNum": 1
|
||||
},
|
||||
"validium_mode": true
|
||||
"validium_mode": true,
|
||||
"codec_version": 8
|
||||
}
|
||||
40
tests/prover-e2e/cloak-xen/config.template.json
Normal file
40
tests/prover-e2e/cloak-xen/config.template.json
Normal file
@@ -0,0 +1,40 @@
|
||||
{
|
||||
"prover_manager": {
|
||||
"provers_per_session": 1,
|
||||
"session_attempts": 5,
|
||||
"external_prover_threshold": 32,
|
||||
"bundle_collection_time_sec": 180,
|
||||
"batch_collection_time_sec": 180,
|
||||
"chunk_collection_time_sec": 180,
|
||||
"verifier": {
|
||||
"min_prover_version": "v4.4.45",
|
||||
"verifiers": [
|
||||
{
|
||||
"assets_path": "assets",
|
||||
"fork_name": "feynman"
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"db": {
|
||||
"driver_name": "postgres",
|
||||
"dsn": "postgres://dev:dev@localhost/scroll?sslmode=disable",
|
||||
"maxOpenNum": 200,
|
||||
"maxIdleNum": 20
|
||||
},
|
||||
"l2": {
|
||||
"validium_mode": true,
|
||||
"chain_id": 5343513301,
|
||||
"l2geth": {
|
||||
"endpoint": "http://cloak-xen-sequencer.sepolia.scroll.tech:8545/"
|
||||
}
|
||||
},
|
||||
"auth": {
|
||||
"secret": "prover secret key",
|
||||
"challenge_expire_duration_sec": 3600,
|
||||
"login_expire_duration_sec": 3600
|
||||
},
|
||||
"sequencer": {
|
||||
"decryption_key": "<decryption key>"
|
||||
}
|
||||
}
|
||||
@@ -25,7 +25,7 @@ SELECT 'INSERT INTO l2_block (number, hash, parent_hash, header, withdraw_root,
|
||||
quote_literal(transactions) ||
|
||||
');'
|
||||
FROM l2_block
|
||||
WHERE number >= 1 and number <= 49
|
||||
WHERE number >= 15206780 and number <= 15206809
|
||||
ORDER BY number ASC;
|
||||
|
||||
-- Write footer
|
||||
|
||||
@@ -1,2 +1,3 @@
|
||||
BEGIN_BLOCK?=10973711
|
||||
END_BLOCK?=10973721
|
||||
SCROLL_FORK_NAME=feynman
|
||||
@@ -5,5 +5,6 @@
|
||||
"maxOpenNum": 5,
|
||||
"maxIdleNum": 1
|
||||
},
|
||||
"validium_mode": false
|
||||
"validium_mode": false,
|
||||
"codec_version": 8
|
||||
}
|
||||
41
tests/prover-e2e/sepolia-feynman/config.template.json
Normal file
41
tests/prover-e2e/sepolia-feynman/config.template.json
Normal file
@@ -0,0 +1,41 @@
|
||||
{
|
||||
"prover_manager": {
|
||||
"provers_per_session": 1,
|
||||
"session_attempts": 5,
|
||||
"external_prover_threshold": 32,
|
||||
"bundle_collection_time_sec": 180,
|
||||
"batch_collection_time_sec": 180,
|
||||
"chunk_collection_time_sec": 180,
|
||||
"verifier": {
|
||||
"min_prover_version": "v4.4.33",
|
||||
"verifiers": [
|
||||
{
|
||||
"features": "legacy_witness:openvm_13",
|
||||
"assets_path": "assets_feynman",
|
||||
"fork_name": "feynman"
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"db": {
|
||||
"driver_name": "postgres",
|
||||
"dsn": "postgres://dev:dev@localhost/scroll?sslmode=disable",
|
||||
"maxOpenNum": 200,
|
||||
"maxIdleNum": 20
|
||||
},
|
||||
"l2": {
|
||||
"validium_mode": false,
|
||||
"chain_id": 534351,
|
||||
"l2geth": {
|
||||
"endpoint": "<serach a public rpc endpoint like alchemy>"
|
||||
}
|
||||
},
|
||||
"auth": {
|
||||
"secret": "prover secret key",
|
||||
"challenge_expire_duration_sec": 3600,
|
||||
"login_expire_duration_sec": 3600
|
||||
},
|
||||
"sequencer": {
|
||||
"decryption_key": "not need"
|
||||
}
|
||||
}
|
||||
109
tests/prover-e2e/sepolia-feynman/genesis.json
Normal file
109
tests/prover-e2e/sepolia-feynman/genesis.json
Normal file
File diff suppressed because one or more lines are too long
3
tests/prover-e2e/sepolia-galileo/.make.env
Normal file
3
tests/prover-e2e/sepolia-galileo/.make.env
Normal file
@@ -0,0 +1,3 @@
|
||||
BEGIN_BLOCK?=15206785
|
||||
END_BLOCK?=15206794
|
||||
SCROLL_FORK_NAME=galileo
|
||||
129
tests/prover-e2e/sepolia-galileo/00100_import_blocks.sql
Normal file
129
tests/prover-e2e/sepolia-galileo/00100_import_blocks.sql
Normal file
File diff suppressed because one or more lines are too long
10
tests/prover-e2e/sepolia-galileo/config.json
Normal file
10
tests/prover-e2e/sepolia-galileo/config.json
Normal file
@@ -0,0 +1,10 @@
|
||||
{
|
||||
"db_config": {
|
||||
"driver_name": "postgres",
|
||||
"dsn": "postgres://dev:dev@localhost:5432/scroll?sslmode=disable",
|
||||
"maxOpenNum": 5,
|
||||
"maxIdleNum": 1
|
||||
},
|
||||
"validium_mode": false,
|
||||
"codec_version": 9
|
||||
}
|
||||
40
tests/prover-e2e/sepolia-galileo/config.template.json
Normal file
40
tests/prover-e2e/sepolia-galileo/config.template.json
Normal file
@@ -0,0 +1,40 @@
|
||||
{
|
||||
"prover_manager": {
|
||||
"provers_per_session": 1,
|
||||
"session_attempts": 5,
|
||||
"external_prover_threshold": 32,
|
||||
"bundle_collection_time_sec": 180,
|
||||
"batch_collection_time_sec": 180,
|
||||
"chunk_collection_time_sec": 180,
|
||||
"verifier": {
|
||||
"min_prover_version": "v4.4.33",
|
||||
"verifiers": [
|
||||
{
|
||||
"assets_path": "assets",
|
||||
"fork_name": "galileo"
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"db": {
|
||||
"driver_name": "postgres",
|
||||
"dsn": "postgres://dev:dev@localhost/scroll?sslmode=disable",
|
||||
"maxOpenNum": 200,
|
||||
"maxIdleNum": 20
|
||||
},
|
||||
"l2": {
|
||||
"validium_mode": false,
|
||||
"chain_id": 534351,
|
||||
"l2geth": {
|
||||
"endpoint": "<serach a public rpc endpoint like alchemy>"
|
||||
}
|
||||
},
|
||||
"auth": {
|
||||
"secret": "prover secret key",
|
||||
"challenge_expire_duration_sec": 3600,
|
||||
"login_expire_duration_sec": 3600
|
||||
},
|
||||
"sequencer": {
|
||||
"decryption_key": "not need"
|
||||
}
|
||||
}
|
||||
110
tests/prover-e2e/sepolia-galileo/genesis.json
Normal file
110
tests/prover-e2e/sepolia-galileo/genesis.json
Normal file
File diff suppressed because one or more lines are too long
3
tests/prover-e2e/sepolia-galileoV2/.make.env
Normal file
3
tests/prover-e2e/sepolia-galileoV2/.make.env
Normal file
@@ -0,0 +1,3 @@
|
||||
BEGIN_BLOCK?=20239245
|
||||
END_BLOCK?=20239250
|
||||
SCROLL_FORK_NAME=galileoV2
|
||||
15
tests/prover-e2e/sepolia-galileoV2/config.json
Normal file
15
tests/prover-e2e/sepolia-galileoV2/config.json
Normal file
@@ -0,0 +1,15 @@
|
||||
{
|
||||
"db_config": {
|
||||
"driver_name": "postgres",
|
||||
"dsn": "postgres://dev:dev@localhost:5432/scroll?sslmode=disable",
|
||||
"maxOpenNum": 5,
|
||||
"maxIdleNum": 1
|
||||
},
|
||||
"fetch_config": {
|
||||
"endpoint": "http://l2-sequencer-galileo-6.devnet.scroll.tech:8545",
|
||||
"l2_message_queue_address": "0x5300000000000000000000000000000000000000"
|
||||
},
|
||||
"validium_mode": false,
|
||||
"codec_version": 10
|
||||
|
||||
}
|
||||
40
tests/prover-e2e/sepolia-galileoV2/config.template.json
Normal file
40
tests/prover-e2e/sepolia-galileoV2/config.template.json
Normal file
@@ -0,0 +1,40 @@
|
||||
{
|
||||
"prover_manager": {
|
||||
"provers_per_session": 1,
|
||||
"session_attempts": 5,
|
||||
"external_prover_threshold": 32,
|
||||
"bundle_collection_time_sec": 180,
|
||||
"batch_collection_time_sec": 180,
|
||||
"chunk_collection_time_sec": 180,
|
||||
"verifier": {
|
||||
"min_prover_version": "v4.4.33",
|
||||
"verifiers": [
|
||||
{
|
||||
"assets_path": "assets",
|
||||
"fork_name": "galileoV2"
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"db": {
|
||||
"driver_name": "postgres",
|
||||
"dsn": "postgres://dev:dev@localhost/scroll?sslmode=disable",
|
||||
"maxOpenNum": 200,
|
||||
"maxIdleNum": 20
|
||||
},
|
||||
"l2": {
|
||||
"validium_mode": false,
|
||||
"chain_id": 534351,
|
||||
"l2geth": {
|
||||
"endpoint": "<serach for a public rpc endpoint like alchemy>"
|
||||
}
|
||||
},
|
||||
"auth": {
|
||||
"secret": "prover secret key",
|
||||
"challenge_expire_duration_sec": 3600,
|
||||
"login_expire_duration_sec": 3600
|
||||
},
|
||||
"sequencer": {
|
||||
"decryption_key": "not need"
|
||||
}
|
||||
}
|
||||
111
tests/prover-e2e/sepolia-galileoV2/genesis.json
Normal file
111
tests/prover-e2e/sepolia-galileoV2/genesis.json
Normal file
File diff suppressed because one or more lines are too long
@@ -62,4 +62,6 @@ test_run:
|
||||
test_e2e_run: ${E2E_HANDLE_SET}
|
||||
GO_TAG=${GO_TAG} GIT_REV=${GIT_REV} ZK_VERSION=${ZK_VERSION} cargo run --release -p prover -- --config ./config.json handle ${E2E_HANDLE_SET}
|
||||
|
||||
test_e2e_run_gpu: ${E2E_HANDLE_SET}
|
||||
GO_TAG=${GO_TAG} GIT_REV=${GIT_REV} ZK_VERSION=${ZK_VERSION} cargo run --release --features cuda -p prover -- --config ./config.json handle ${E2E_HANDLE_SET}
|
||||
|
||||
|
||||
@@ -8,11 +8,7 @@
|
||||
"retry_wait_time_sec": 10,
|
||||
"connection_timeout_sec": 1800
|
||||
},
|
||||
"l2geth": {
|
||||
"endpoint": "<the url of rpc endpoint>"
|
||||
},
|
||||
"prover": {
|
||||
"circuit_type": 2,
|
||||
"supported_proof_types": [
|
||||
1,
|
||||
2,
|
||||
@@ -25,9 +21,16 @@
|
||||
},
|
||||
"circuits": {
|
||||
"feynman": {
|
||||
"hard_fork_name": "feynman",
|
||||
"base_url": "https://circuit-release.s3.us-west-2.amazonaws.com/scroll-zkvm/releases/feynman/",
|
||||
"workspace_path": ".work/feynman"
|
||||
}
|
||||
"workspace_path": ".work/feynman",
|
||||
"base_url": "https://circuit-release.s3.us-west-2.amazonaws.com/scroll-zkvm/releases/feynman/"
|
||||
},
|
||||
"galileo": {
|
||||
"workspace_path": ".work/galileo",
|
||||
"base_url": "https://circuit-release.s3.us-west-2.amazonaws.com/scroll-zkvm/releases/galileo/"
|
||||
},
|
||||
"galileoV2": {
|
||||
"base_url": "https://circuit-release.s3.us-west-2.amazonaws.com/scroll-zkvm/releases/galileov2/",
|
||||
"workspace_path": ".work/galileo"
|
||||
}
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user