Compare commits

..

54 Commits

Author SHA1 Message Date
lightsing
42221c9b2a fix 2026-01-06 13:00:05 +08:00
lightsing
6b4c106eb5 Merge branch 'develop' into feat/axiom
# Conflicts:
#	Cargo.lock
#	Cargo.toml
#	crates/libzkp/src/tasks/batch.rs
#	crates/prover-bin/src/prover.rs
#	tests/prover-e2e/sepolia-galileo/.make.env
#	tests/prover-e2e/sepolia-galileo/00100_import_blocks.sql
#	zkvm-prover/Makefile
2026-01-06 11:05:08 +08:00
Ho
7de388ef1a [Fix] Accept proof submission even it has been timeout (#1764) 2025-12-12 12:18:34 +09:00
Morty
27dd62eac3 feat(rollup-relayer): add blob fee tolerance (#1773) 2025-12-03 21:49:17 +08:00
Ho
22479a7952 [Feat] Galileo v2 (#1771)
Co-authored-by: Péter Garamvölgyi <peter@scroll.io>
2025-12-02 11:04:57 +01:00
lightsing
43f1895ecf fix 2025-12-02 10:51:22 +08:00
Péter Garamvölgyi
690bc01c41 feat: force commit batches at hardfork boundary (#1768) 2025-11-30 20:36:53 +01:00
Péter Garamvölgyi
e75d6c16a9 feat: propose chunk at hardfork boundary (#1767) 2025-11-28 17:21:51 +01:00
Péter Garamvölgyi
752e4e1117 fix: Fix blob fee overflow on rollup-relayer and gas-oracle (#1772) 2025-11-28 15:44:37 +01:00
lightsing
d169990168 update json template 2025-11-28 14:55:25 +08:00
lightsing
d7d29f52a9 fix ssl 2025-11-28 14:42:31 +08:00
lightsing
b5c398a711 use builder 2025-11-28 14:09:29 +08:00
lightsing
5dba980cea fix 2025-11-28 14:05:18 +08:00
lightsing
ea275cbb8a test refactor 2025-11-28 13:56:50 +08:00
lightsing
763d2b7d7d add test_work_run_raw 2025-11-27 19:02:11 +08:00
lightsing
1f263bb730 update 2025-11-27 18:45:14 +08:00
lightsing
d3837daf3a update 2025-11-27 16:04:49 +08:00
lightsing
69a20b610a update 2025-11-27 16:04:42 +08:00
lightsing
48ecc66de8 update 2025-11-27 15:53:39 +08:00
lightsing
9923856c9d add randomized_delay_sec to config template 2025-11-27 15:51:01 +08:00
lightsing
612212da4d add randomized_delay_sec 2025-11-27 15:50:42 +08:00
lightsing
b9156ab149 fix typo 2025-11-27 15:21:16 +08:00
lightsing
056326ee3e ready cloud proving 2025-11-27 15:12:28 +08:00
lightsing
54c9e278c8 also download axiom_program_ids.json 2025-11-27 13:09:04 +08:00
lightsing
7d6aea89aa Merge branch 'refs/heads/fix/macos' into feat/axiom 2025-11-27 13:08:10 +08:00
lightsing
c950ecc213 lock 2025-11-26 17:32:20 +08:00
lightsing
4cc4cc1064 revert 2025-11-26 17:32:08 +08:00
lightsing
ee8e4a39be bump 2025-11-26 17:16:08 +08:00
lightsing
a450fad09d enable full tokio 2025-11-26 17:01:41 +08:00
lightsing
3f95ffc3d7 upgrade, fmt and clippy for edition 2024 2025-11-26 16:37:56 +08:00
lightsing
49c0b1a844 update README 2025-11-26 15:49:34 +08:00
lightsing
970f8d488e debug this 2025-11-26 15:48:57 +08:00
lightsing
bf60d16ea8 fix 2025-11-26 15:37:02 +08:00
lightsing
e962e713d8 log 2025-11-26 15:28:13 +08:00
lightsing
785ce615d5 fix 2025-11-26 15:27:14 +08:00
lightsing
b64015c54d fix 2025-11-26 15:16:39 +08:00
lightsing
4e0573a820 update 2025-11-26 15:11:51 +08:00
lightsing
4bc57bb95e too verbose 2025-11-26 15:05:25 +08:00
lightsing
4bed6845c3 wtf 2025-11-26 15:03:20 +08:00
lightsing
b9846293b2 more logging 2025-11-26 15:00:02 +08:00
lightsing
397f327776 logging 2025-11-26 14:55:54 +08:00
lightsing
fe9ce35249 update 2025-11-26 14:47:37 +08:00
lightsing
4062c554a3 add logging 2025-11-26 13:58:12 +08:00
lightsing
2b38078e02 add test_axiom_e2e_run 2025-11-26 13:49:08 +08:00
lightsing
ab6490ef35 fix 2025-11-26 13:48:40 +08:00
lightsing
d5f9d55075 fix merge 2025-11-26 13:24:35 +08:00
Ho
ecfdbb342d update readme 2025-11-26 12:49:20 +09:00
Ho
98775e0bbb update test stuff 2025-11-26 10:56:10 +09:00
lightsing
6a17d2c715 Merge branch 'develop' into feat/axiom
# Conflicts:
#	Cargo.lock
#	Cargo.toml
#	crates/libzkp/Cargo.toml
#	crates/prover-bin/src/prover.rs
#	crates/prover-bin/src/zk_circuits_handler/universal.rs
#	zkvm-prover/config.json.template
2025-11-25 15:04:25 +08:00
lightsing
22b8ac7204 update 2025-11-18 14:07:38 +08:00
lightsing
8f3346c738 Merge branch 'develop' into feat/axiom
# Conflicts:
#	Cargo.lock
#	Cargo.toml
2025-11-18 13:52:23 +08:00
lightsing
2c7117ebc3 update config template 2025-11-11 15:07:32 +08:00
lightsing
3b3bd5f9ee fix 2025-11-11 14:35:50 +08:00
lightsing
2d12839a8c wip 2025-11-10 15:35:46 +08:00
83 changed files with 3135 additions and 2681 deletions

1
.dockerignore Normal file
View File

@@ -0,0 +1 @@
target/

3365
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -10,40 +10,33 @@ resolver = "2"
[workspace.package]
authors = ["Scroll developers"]
edition = "2021"
edition = "2024"
homepage = "https://scroll.io"
readme = "README.md"
repository = "https://github.com/scroll-tech/scroll"
version = "4.7.1"
[workspace.dependencies]
scroll-zkvm-prover = { git = "https://github.com/scroll-tech/zkvm-prover", tag = "v0.7.0" }
scroll-zkvm-verifier = { git = "https://github.com/scroll-tech/zkvm-prover", tag = "v0.7.0" }
scroll-zkvm-types = { git = "https://github.com/scroll-tech/zkvm-prover", tag = "v0.7.0" }
scroll-zkvm-prover = { git = "https://github.com/scroll-tech/zkvm-prover", tag = "v0.7.1" }
scroll-zkvm-verifier = { git = "https://github.com/scroll-tech/zkvm-prover", tag = "v0.7.1" }
scroll-zkvm-types = { git = "https://github.com/scroll-tech/zkvm-prover", tag = "v0.7.1" }
sbv-primitives = { git = "https://github.com/scroll-tech/stateless-block-verifier", tag = "scroll-v91", features = ["scroll", "rkyv"] }
sbv-utils = { git = "https://github.com/scroll-tech/stateless-block-verifier", tag = "scroll-v91" }
sbv-core = { git = "https://github.com/scroll-tech/stateless-block-verifier", tag = "scroll-v91", features = ["scroll"] }
sbv-primitives = { git = "https://github.com/scroll-tech/stateless-block-verifier", tag = "scroll-v91.2", features = ["scroll", "rkyv"] }
sbv-utils = { git = "https://github.com/scroll-tech/stateless-block-verifier", tag = "scroll-v91.2" }
sbv-core = { git = "https://github.com/scroll-tech/stateless-block-verifier", tag = "scroll-v91.2", features = ["scroll"] }
axiom-sdk = { git = "https://github.com/axiom-crypto/axiom-api-cli.git", tag = "v1.0.9" }
metrics = "0.23.0"
metrics-util = "0.17"
metrics-tracing-context = "0.16.0"
anyhow = "1.0"
alloy = { version = "1", default-features = false }
alloy-primitives = { version = "1.4.1", default-features = false, features = ["tiny-keccak"] }
# also use this to trigger "serde" feature for primitives
alloy-serde = { version = "1", default-features = false }
jiff = "0.2"
serde = { version = "1", default-features = false, features = ["derive"] }
serde_json = { version = "1.0" }
serde_derive = "1.0"
serde_with = "3"
itertools = "0.14"
tiny-keccak = "2.0"
tokio = "1"
tracing = "0.1"
tracing-subscriber = { version = "0.3", features = ["env-filter"] }
eyre = "0.6"
once_cell = "1.20"
base64 = "0.22"

View File

@@ -1,6 +1,6 @@
.PHONY: fmt dev_docker build_test_docker run_test_docker clean update
L2GETH_TAG=scroll-v5.9.7
L2GETH_TAG=scroll-v5.9.17
help: ## Display this help message
@grep -h \

View File

@@ -10,8 +10,8 @@ require (
github.com/go-redis/redis/v8 v8.11.5
github.com/pressly/goose/v3 v3.16.0
github.com/prometheus/client_golang v1.19.0
github.com/scroll-tech/da-codec v0.9.0
github.com/scroll-tech/go-ethereum v1.10.14-0.20251113125950-906b730d541d
github.com/scroll-tech/da-codec v0.10.0
github.com/scroll-tech/go-ethereum v1.10.14-0.20251128092113-8629f088d78f
github.com/stretchr/testify v1.10.0
github.com/urfave/cli/v2 v2.25.7
golang.org/x/sync v0.11.0
@@ -21,7 +21,7 @@ require (
// Hotfix for header hash incompatibility issue.
// PR: https://github.com/scroll-tech/go-ethereum/pull/1133/
// CAUTION: Requires careful handling. When upgrading go-ethereum, ensure this fix remains up-to-date in this branch.
replace github.com/scroll-tech/go-ethereum => github.com/scroll-tech/go-ethereum v1.10.14-0.20251117071111-47c22325665e
replace github.com/scroll-tech/go-ethereum => github.com/scroll-tech/go-ethereum v1.10.14-0.20251128092359-25d5bf6b817b
require (
dario.cat/mergo v1.0.0 // indirect

View File

@@ -309,10 +309,10 @@ github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik=
github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/scroll-tech/da-codec v0.9.0 h1:UvHNdSJuVhi/j9pOH7SXkRck52/zeE8T35Fx6qTPW70=
github.com/scroll-tech/da-codec v0.9.0/go.mod h1:w+vwIvNiWdiNHVE9yIY2Klx6G4s+SQhEJVSmVG/IsEQ=
github.com/scroll-tech/go-ethereum v1.10.14-0.20251117071111-47c22325665e h1:7U1/JilCPIqJTYNgKzlfBEkRNORRwJ8+PCmdR8/XK6A=
github.com/scroll-tech/go-ethereum v1.10.14-0.20251117071111-47c22325665e/go.mod h1:6BVek7YliYh+YeHOSjguPw9GT9BhVBfThArxzVlpqdQ=
github.com/scroll-tech/da-codec v0.10.0 h1:IPHxyTyXTWPV0Q+DZ08cod2fWkhUvrfysmj/VBpB+WU=
github.com/scroll-tech/da-codec v0.10.0/go.mod h1:MBlIP4wCXPcUDZ/Ci2B7n/2IbVU1WBo9OTFTZ5ffE0U=
github.com/scroll-tech/go-ethereum v1.10.14-0.20251128092359-25d5bf6b817b h1:pMQKnroJoS/FeL1aOWkz7/u1iBHUP8PWjZstNuzoUGE=
github.com/scroll-tech/go-ethereum v1.10.14-0.20251128092359-25d5bf6b817b/go.mod h1:Aa/kD1XB+OV/7rRxMQrjcPCB4b0pKyLH0gsTrtuHi38=
github.com/scroll-tech/zktrie v0.8.4 h1:UagmnZ4Z3ITCk+aUq9NQZJNAwnWl4gSxsLb2Nl7IgRE=
github.com/scroll-tech/zktrie v0.8.4/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
github.com/segmentio/asm v1.2.0 h1:9BQrFxC+YOHJlTlHGkTrFWf59nbL3XnCoFLTwDCI7ys=

View File

@@ -15,7 +15,7 @@ require (
github.com/modern-go/reflect2 v1.0.2
github.com/orcaman/concurrent-map v1.0.0
github.com/prometheus/client_golang v1.19.0
github.com/scroll-tech/go-ethereum v1.10.14-0.20251117065849-b5c3dd1aa975
github.com/scroll-tech/go-ethereum v1.10.14-0.20251128092113-8629f088d78f
github.com/stretchr/testify v1.10.0
github.com/testcontainers/testcontainers-go v0.30.0
github.com/testcontainers/testcontainers-go/modules/compose v0.30.0
@@ -184,7 +184,7 @@ require (
github.com/rjeczalik/notify v0.9.1 // indirect
github.com/rs/cors v1.7.0 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/scroll-tech/da-codec v0.9.0 // indirect
github.com/scroll-tech/da-codec v0.10.0 // indirect
github.com/scroll-tech/zktrie v0.8.4 // indirect
github.com/secure-systems-lab/go-securesystemslib v0.4.0 // indirect
github.com/serialx/hashring v0.0.0-20190422032157-8b2912629002 // indirect

View File

@@ -636,10 +636,10 @@ github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik=
github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/scroll-tech/da-codec v0.9.0 h1:UvHNdSJuVhi/j9pOH7SXkRck52/zeE8T35Fx6qTPW70=
github.com/scroll-tech/da-codec v0.9.0/go.mod h1:w+vwIvNiWdiNHVE9yIY2Klx6G4s+SQhEJVSmVG/IsEQ=
github.com/scroll-tech/go-ethereum v1.10.14-0.20251117065849-b5c3dd1aa975 h1:KluZffkRRJ4K9UyvH/r2g8Lp16/NSK8j26MR33hHmoQ=
github.com/scroll-tech/go-ethereum v1.10.14-0.20251117065849-b5c3dd1aa975/go.mod h1:6BVek7YliYh+YeHOSjguPw9GT9BhVBfThArxzVlpqdQ=
github.com/scroll-tech/da-codec v0.10.0 h1:IPHxyTyXTWPV0Q+DZ08cod2fWkhUvrfysmj/VBpB+WU=
github.com/scroll-tech/da-codec v0.10.0/go.mod h1:MBlIP4wCXPcUDZ/Ci2B7n/2IbVU1WBo9OTFTZ5ffE0U=
github.com/scroll-tech/go-ethereum v1.10.14-0.20251128092113-8629f088d78f h1:j6SjP98MoWFFX9TwB1/nFYEkayqHQsrtE66Ll2C+oT0=
github.com/scroll-tech/go-ethereum v1.10.14-0.20251128092113-8629f088d78f/go.mod h1:Aa/kD1XB+OV/7rRxMQrjcPCB4b0pKyLH0gsTrtuHi38=
github.com/scroll-tech/zktrie v0.8.4 h1:UagmnZ4Z3ITCk+aUq9NQZJNAwnWl4gSxsLb2Nl7IgRE=
github.com/scroll-tech/zktrie v0.8.4/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
github.com/secure-systems-lab/go-securesystemslib v0.4.0 h1:b23VGrQhTA8cN2CbBw7/FulN9fTtqYUdS5+Oxzt+DUE=

View File

@@ -34,7 +34,7 @@ services:
# Sets up the genesis configuration for the go-ethereum client from a JSON file.
geth-genesis:
image: "ethereum/client-go:v1.13.14"
image: "ethereum/client-go:v1.14.0"
command: --datadir=/data/execution init /data/execution/genesis.json
volumes:
- data:/data
@@ -80,7 +80,7 @@ services:
# Runs the go-ethereum execution client with the specified, unlocked account and necessary
# APIs to allow for proof-of-stake consensus via Prysm.
geth:
image: "ethereum/client-go:v1.13.14"
image: "ethereum/client-go:v1.14.0"
command:
- --http
- --http.api=eth,net,web3

View File

@@ -1,4 +1,4 @@
FROM ethereum/client-go:v1.13.14
FROM ethereum/client-go:v1.14.0
COPY password /l1geth/
COPY genesis.json /l1geth/

View File

@@ -167,13 +167,13 @@ func (t *TestcontainerApps) GetPoSL1EndPoint() (string, error) {
return contrainer.PortEndpoint(context.Background(), "8545/tcp", "http")
}
// GetPoSL1Client returns a ethclient by dialing running PoS L1 client
func (t *TestcontainerApps) GetPoSL1Client() (*ethclient.Client, error) {
// GetPoSL1Client returns a raw rpc client by dialing the L1 node
func (t *TestcontainerApps) GetPoSL1Client() (*rpc.Client, error) {
endpoint, err := t.GetPoSL1EndPoint()
if err != nil {
return nil, err
}
return ethclient.Dial(endpoint)
return rpc.Dial(endpoint)
}
// GetDBEndPoint returns the endpoint of the running postgres container
@@ -221,7 +221,6 @@ func (t *TestcontainerApps) GetGormDBClient() (*gorm.DB, error) {
// GetL2GethClient returns a ethclient by dialing running L2Geth
func (t *TestcontainerApps) GetL2GethClient() (*ethclient.Client, error) {
rpcCli, err := t.GetL2Client()
if err != nil {
return nil, err

View File

@@ -3,7 +3,6 @@ package testcontainers
import (
"testing"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/stretchr/testify/assert"
"gorm.io/gorm"
)
@@ -14,7 +13,6 @@ func TestNewTestcontainerApps(t *testing.T) {
err error
endpoint string
gormDBclient *gorm.DB
ethclient *ethclient.Client
)
testApps := NewTestcontainerApps()
@@ -32,17 +30,17 @@ func TestNewTestcontainerApps(t *testing.T) {
endpoint, err = testApps.GetL2GethEndPoint()
assert.NoError(t, err)
assert.NotEmpty(t, endpoint)
ethclient, err = testApps.GetL2GethClient()
l2RawClient, err := testApps.GetL2Client()
assert.NoError(t, err)
assert.NotNil(t, ethclient)
assert.NotNil(t, l2RawClient)
assert.NoError(t, testApps.StartPoSL1Container())
endpoint, err = testApps.GetPoSL1EndPoint()
assert.NoError(t, err)
assert.NotEmpty(t, endpoint)
ethclient, err = testApps.GetPoSL1Client()
l1RawClient, err := testApps.GetPoSL1Client()
assert.NoError(t, err)
assert.NotNil(t, ethclient)
assert.NotNil(t, l1RawClient)
assert.NoError(t, testApps.StartWeb3SignerContainer(1))
endpoint, err = testApps.GetWeb3SignerEndpoint()

View File

@@ -5,7 +5,7 @@ import (
"runtime/debug"
)
var tag = "v4.7.5"
var tag = "v4.7.10"
var commit = func() string {
if info, ok := debug.ReadBuildInfo(); ok {

View File

@@ -49,6 +49,8 @@ localsetup: coordinator_api ## Local setup: build coordinator_api, copy config,
@echo "Setting up releases..."
cd $(CURDIR)/build && bash setup_releases.sh
run_coordinator_api: coordinator_api
cd build/bin && ./coordinator_api
#coordinator_api_skip_libzkp:
# go build -ldflags "-X scroll-tech/common/version.ZkVersion=${ZK_VERSION}" -o $(PWD)/build/bin/coordinator_api ./cmd/api

View File

@@ -7,7 +7,7 @@ if [ -z "${SCROLL_ZKVM_VERSION}" ]; then
fi
# default fork name from env or "galileo"
SCROLL_FORK_NAME="${SCROLL_FORK_NAME:-galileo}"
SCROLL_FORK_NAME="${SCROLL_FORK_NAME:-galileov2}"
# set ASSET_DIR by reading from config.json
CONFIG_FILE="bin/conf/config.template.json"
@@ -64,9 +64,10 @@ for ((i=0; i<$VERIFIER_COUNT; i++)); do
wget https://circuit-release.s3.us-west-2.amazonaws.com/scroll-zkvm/releases/$SCROLL_ZKVM_VERSION/verifier/verifier.bin -O ${ASSET_DIR}/verifier.bin
wget https://circuit-release.s3.us-west-2.amazonaws.com/scroll-zkvm/releases/$SCROLL_ZKVM_VERSION/verifier/root_verifier_vk -O ${ASSET_DIR}/root_verifier_vk
wget https://circuit-release.s3.us-west-2.amazonaws.com/scroll-zkvm/releases/$SCROLL_ZKVM_VERSION/verifier/openVmVk.json -O ${ASSET_DIR}/openVmVk.json
wget https://circuit-release.s3.us-west-2.amazonaws.com/scroll-zkvm/releases/$SCROLL_ZKVM_VERSION/axiom_program_ids.json -O ${ASSET_DIR}/axiom_program_ids.json
echo "Completed downloading assets for $FORK_NAME"
echo "---"
done
echo "All verifier assets downloaded successfully"
echo "All verifier assets downloaded successfully"

View File

@@ -17,7 +17,11 @@
{
"assets_path": "assets",
"fork_name": "galileo"
}
},
{
"assets_path": "assets_v2",
"fork_name": "galileoV2"
}
]
}
},

View File

@@ -9,8 +9,8 @@ require (
github.com/google/uuid v1.6.0
github.com/mitchellh/mapstructure v1.5.0
github.com/prometheus/client_golang v1.19.0
github.com/scroll-tech/da-codec v0.9.0
github.com/scroll-tech/go-ethereum v1.10.14-0.20251117065849-b5c3dd1aa975
github.com/scroll-tech/da-codec v0.10.0
github.com/scroll-tech/go-ethereum v1.10.14-0.20251128092113-8629f088d78f
github.com/shopspring/decimal v1.3.1
github.com/stretchr/testify v1.10.0
github.com/urfave/cli/v2 v2.25.7

View File

@@ -253,10 +253,10 @@ github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik=
github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/scroll-tech/da-codec v0.9.0 h1:UvHNdSJuVhi/j9pOH7SXkRck52/zeE8T35Fx6qTPW70=
github.com/scroll-tech/da-codec v0.9.0/go.mod h1:w+vwIvNiWdiNHVE9yIY2Klx6G4s+SQhEJVSmVG/IsEQ=
github.com/scroll-tech/go-ethereum v1.10.14-0.20251117065849-b5c3dd1aa975 h1:KluZffkRRJ4K9UyvH/r2g8Lp16/NSK8j26MR33hHmoQ=
github.com/scroll-tech/go-ethereum v1.10.14-0.20251117065849-b5c3dd1aa975/go.mod h1:6BVek7YliYh+YeHOSjguPw9GT9BhVBfThArxzVlpqdQ=
github.com/scroll-tech/da-codec v0.10.0 h1:IPHxyTyXTWPV0Q+DZ08cod2fWkhUvrfysmj/VBpB+WU=
github.com/scroll-tech/da-codec v0.10.0/go.mod h1:MBlIP4wCXPcUDZ/Ci2B7n/2IbVU1WBo9OTFTZ5ffE0U=
github.com/scroll-tech/go-ethereum v1.10.14-0.20251128092113-8629f088d78f h1:j6SjP98MoWFFX9TwB1/nFYEkayqHQsrtE66Ll2C+oT0=
github.com/scroll-tech/go-ethereum v1.10.14-0.20251128092113-8629f088d78f/go.mod h1:Aa/kD1XB+OV/7rRxMQrjcPCB4b0pKyLH0gsTrtuHi38=
github.com/scroll-tech/zktrie v0.8.4 h1:UagmnZ4Z3ITCk+aUq9NQZJNAwnWl4gSxsLb2Nl7IgRE=
github.com/scroll-tech/zktrie v0.8.4/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKlUeu/erjjvaPEYiI=

View File

@@ -314,7 +314,7 @@ func (bp *BatchProverTask) getBatchTaskDetail(dbBatch *orm.Batch, chunkProofs []
case 0:
log.Warn("the codec version is 0, if it is not under integration test we have encountered an error here")
return taskDetail, nil
case encoding.CodecV3, encoding.CodecV4, encoding.CodecV6, encoding.CodecV7, encoding.CodecV8, encoding.CodecV9:
case encoding.CodecV3, encoding.CodecV4, encoding.CodecV6, encoding.CodecV7, encoding.CodecV8, encoding.CodecV9, encoding.CodecV10:
default:
return nil, fmt.Errorf("Unsupported codec version <%d>", dbBatchCodecVersion)
}

View File

@@ -155,7 +155,7 @@ func NewSubmitProofReceiverLogic(cfg *config.ProverManager, chainCfg *params.Cha
// HandleZkProof handle a ZkProof submitted from a prover.
// For now only proving/verifying error will lead to setting status as skipped.
// db/unmarshal errors will not because they are errors on the business logic side.
func (m *ProofReceiverLogic) HandleZkProof(ctx *gin.Context, proofParameter coordinatorType.SubmitProofParameter) error {
func (m *ProofReceiverLogic) HandleZkProof(ctx *gin.Context, proofParameter coordinatorType.SubmitProofParameter) (rerr error) {
m.proofReceivedTotal.Inc()
pk := ctx.GetString(coordinatorType.PublicKey)
if len(pk) == 0 {
@@ -172,6 +172,18 @@ func (m *ProofReceiverLogic) HandleZkProof(ctx *gin.Context, proofParameter coor
return ErrValidatorFailureProverTaskEmpty
}
defer func() {
if rerr != nil && types.ProverProveStatus(proverTask.ProvingStatus) == types.ProverAssigned {
// trigger a last-chance closing of current task if some routine had missed it
log.Warn("last chance proof recover triggerred",
"proofID", proofParameter.TaskID,
"err", rerr,
)
m.proofRecover(ctx.Copy(), proverTask, types.ProverTaskFailureTypeUndefined, proofParameter)
}
}()
proofTime := time.Since(proverTask.CreatedAt)
proofTimeSec := uint64(proofTime.Seconds())
@@ -311,6 +323,20 @@ func (m *ProofReceiverLogic) validator(ctx context.Context, proverTask *orm.Prov
}
}()
// Internally we overide the timeout failure:
// if prover task FailureType is SessionInfoFailureTimeout, the submit proof is timeout, but we still accept it
if types.ProverProveStatus(proverTask.ProvingStatus) == types.ProverProofInvalid &&
types.ProverTaskFailureType(proverTask.FailureType) == types.ProverTaskFailureTypeTimeout {
m.validateFailureProverTaskTimeout.Inc()
proverTask.ProvingStatus = int16(types.ProverAssigned)
proofTime := time.Since(proverTask.CreatedAt)
proofTimeSec := uint64(proofTime.Seconds())
log.Warn("proof submit proof have timeout", "hash", proofParameter.TaskID, "taskType", proverTask.TaskType,
"proverName", proverTask.ProverName, "proverPublicKey", pk, "proofTime", proofTimeSec)
}
// Ensure this prover is eligible to participate in the prover task.
if types.ProverProveStatus(proverTask.ProvingStatus) == types.ProverProofValid ||
types.ProverProveStatus(proverTask.ProvingStatus) == types.ProverProofInvalid {
@@ -328,9 +354,6 @@ func (m *ProofReceiverLogic) validator(ctx context.Context, proverTask *orm.Prov
return ErrValidatorFailureProverTaskCannotSubmitTwice
}
proofTime := time.Since(proverTask.CreatedAt)
proofTimeSec := uint64(proofTime.Seconds())
if proofParameter.Status != int(coordinatorType.StatusOk) {
// Temporarily replace "panic" with "pa-nic" to prevent triggering the alert based on logs.
failureMsg := strings.Replace(proofParameter.FailureMsg, "panic", "pa-nic", -1)
@@ -346,14 +369,6 @@ func (m *ProofReceiverLogic) validator(ctx context.Context, proverTask *orm.Prov
return ErrValidatorFailureProofMsgStatusNotOk
}
// if prover task FailureType is SessionInfoFailureTimeout, the submit proof is timeout, need skip it
if types.ProverTaskFailureType(proverTask.FailureType) == types.ProverTaskFailureTypeTimeout {
m.validateFailureProverTaskTimeout.Inc()
log.Info("proof submit proof have timeout, skip this submit proof", "hash", proofParameter.TaskID, "taskType", proverTask.TaskType,
"proverName", proverTask.ProverName, "proverPublicKey", pk, "proofTime", proofTimeSec)
return ErrValidatorFailureProofTimeout
}
// store the proof to prover task
if updateTaskProofErr := m.updateProverTaskProof(ctx, proverTask, proofParameter); updateTaskProofErr != nil {
log.Warn("update prover task proof failure", "hash", proofParameter.TaskID, "proverPublicKey", pk,
@@ -368,6 +383,7 @@ func (m *ProofReceiverLogic) validator(ctx context.Context, proverTask *orm.Prov
"taskType", proverTask.TaskType, "proverName", proverTask.ProverName, "proverPublicKey", pk)
return ErrValidatorFailureTaskHaveVerifiedSuccess
}
return nil
}
@@ -384,7 +400,7 @@ func (m *ProofReceiverLogic) closeProofTask(ctx context.Context, proverTask *orm
log.Info("proof close task update proof status", "hash", proverTask.TaskID, "proverPublicKey", proverTask.ProverPublicKey,
"taskType", message.ProofType(proverTask.TaskType).String(), "status", types.ProvingTaskVerified.String())
if err := m.updateProofStatus(ctx, proverTask, proofParameter, types.ProverProofValid, types.ProverTaskFailureTypeUndefined, proofTimeSec); err != nil {
if err := m.updateProofStatus(ctx, proverTask, proofParameter, types.ProverProofValid, types.ProverTaskFailureType(proverTask.FailureType), proofTimeSec); err != nil {
log.Error("failed to updated proof status ProvingTaskVerified", "hash", proverTask.TaskID, "proverPublicKey", proverTask.ProverPublicKey, "error", err)
return err
}
@@ -445,6 +461,9 @@ func (m *ProofReceiverLogic) updateProofStatus(ctx context.Context, proverTask *
if err != nil {
return err
}
// sync status and failture type into proverTask
proverTask.ProvingStatus = int16(status)
proverTask.FailureType = int16(failureType)
if status == types.ProverProofValid && message.ProofType(proofParameter.TaskType) == message.ProofTypeChunk {
if checkReadyErr := m.checkAreAllChunkProofsReady(ctx, proverTask.TaskID); checkReadyErr != nil {

View File

@@ -31,6 +31,8 @@ func Version(hardForkName string, ValidiumMode bool) (uint8, error) {
stfVersion = 8
case "galileo":
stfVersion = 9
case "galileov2":
stfVersion = 10
default:
return 0, errors.New("unknown fork name " + canonicalName)
}

View File

@@ -5,7 +5,7 @@ use alloy::{
};
use eyre::Result;
use libzkp::tasks::ChunkInterpreter;
use sbv_primitives::types::{consensus::TxL1Message, Network};
use sbv_primitives::types::{Network, consensus::TxL1Message};
use serde::{Deserialize, Serialize};
fn default_max_retry() -> u32 {

View File

@@ -11,7 +11,7 @@ crate-type = ["rlib", "cdylib"]
scroll-zkvm-types = { workspace = true, features = ["scroll"] }
scroll-zkvm-verifier.workspace = true
alloy-primitives.workspace = true #depress the effect of "native-keccak"
alloy-primitives.workspace = true # depress the effect of "native-keccak"
sbv-primitives = {workspace = true, features = ["scroll-compress-info", "scroll"]}
sbv-core = { workspace = true, features = ["scroll"] }
base64.workspace = true

View File

@@ -138,7 +138,10 @@ pub fn gen_universal_task(
// always respect the fork_name_str (which has been normalized) being passed
// if the fork_name wrapped in task is not match, consider it a malformed task
if fork_name_str != task.fork_name.as_str() {
eyre::bail!("fork name in chunk task not match the calling arg, expected {fork_name_str}, get {}", task.fork_name);
eyre::bail!(
"fork name in chunk task not match the calling arg, expected {fork_name_str}, get {}",
task.fork_name
);
}
if fork_name_str != version.fork.as_str() {
eyre::bail!(
@@ -156,7 +159,10 @@ pub fn gen_universal_task(
task.fork_name = task.fork_name.to_lowercase();
let version = Version::from(task.version);
if fork_name_str != task.fork_name.as_str() {
eyre::bail!("fork name in batch task not match the calling arg, expected {fork_name_str}, get {}", task.fork_name);
eyre::bail!(
"fork name in batch task not match the calling arg, expected {fork_name_str}, get {}",
task.fork_name
);
}
if fork_name_str != version.fork.as_str() {
eyre::bail!(
@@ -174,7 +180,10 @@ pub fn gen_universal_task(
task.fork_name = task.fork_name.to_lowercase();
let version = Version::from(task.version);
if fork_name_str != task.fork_name.as_str() {
eyre::bail!("fork name in bundle task not match the calling arg, expected {fork_name_str}, get {}", task.fork_name);
eyre::bail!(
"fork name in bundle task not match the calling arg, expected {fork_name_str}, get {}",
task.fork_name
);
}
if fork_name_str != version.fork.as_str() {
eyre::bail!(

View File

@@ -13,7 +13,7 @@ use scroll_zkvm_types::{
utils::{serialize_vk, vec_as_base64},
version,
};
use serde::{de::DeserializeOwned, Deserialize, Serialize};
use serde::{Deserialize, Serialize, de::DeserializeOwned};
/// A wrapper around the actual inner proof.
#[derive(Clone, Serialize, Deserialize)]
@@ -213,7 +213,7 @@ impl<Metadata: ProofMetadata> PersistableProof for WrappedProof<Metadata> {
#[cfg(test)]
mod tests {
use base64::{prelude::BASE64_STANDARD, Engine};
use base64::{Engine, prelude::BASE64_STANDARD};
use sbv_primitives::B256;
use scroll_zkvm_types::{bundle::BundleInfo, proof::EvmProof};

View File

@@ -3,14 +3,14 @@ use eyre::Result;
use sbv_primitives::{B256, U256};
use scroll_zkvm_types::{
batch::{
build_point_eval_witness, BatchHeader, BatchHeaderV6, BatchHeaderV7, BatchHeaderValidium,
BatchInfo, BatchWitness, Envelope, EnvelopeV6, EnvelopeV7, LegacyBatchWitness,
ReferenceHeader, N_BLOB_BYTES,
BatchHeader, BatchHeaderV6, BatchHeaderV7, BatchHeaderValidium, BatchInfo, BatchWitness,
Envelope, EnvelopeV6, EnvelopeV7, LegacyBatchWitness, N_BLOB_BYTES, ReferenceHeader,
build_point_eval_witness,
},
chunk::ChunkInfo,
public_inputs::{ForkName, MultiVersionPublicInputs, Version},
task::ProvingTask,
utils::{to_rkyv_bytes, RancorError},
utils::{RancorError, to_rkyv_bytes},
version::{Codec, Domain, STFVersion},
};
@@ -28,10 +28,10 @@ pub struct BatchHeaderValidiumWithHash {
/// Parse header types passed from golang side and adapt to the
/// definition in zkvm-prover's types
/// We distinguish the header type in golang side according to the codec
/// version, i.e. v7 - v9 (current), and validium
/// And adapt it to the corresponding header version used in zkvm-prover's witness
/// definition, i.e. v7- v8 (current), and validium
/// We distinguish the header type in golang side according to the STF
/// version, i.e. v6, v7-v10 (current), and validium
/// And adapt it to the corresponding batch header type used in zkvm-prover's witness
/// definition, i.e. v6, v7 (current), and validium
#[derive(Clone, serde::Deserialize, serde::Serialize)]
#[serde(untagged)]
#[allow(non_camel_case_types)]
@@ -40,18 +40,18 @@ pub enum BatchHeaderV {
Validium(BatchHeaderValidiumWithHash),
/// Header for scroll's STF version v6.
V6(BatchHeaderV6),
/// Header for scroll's STF versions v7, v8, v9.
/// Header for scroll's STF versions v7 - v10.
///
/// Since the codec essentially is unchanged for the above STF versions, we do not define new
/// variants, instead re-using the [`BatchHeaderV7`] variant.
V7_V8_V9(BatchHeaderV7),
V7_to_V10(BatchHeaderV7),
}
impl core::fmt::Display for BatchHeaderV {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
match self {
BatchHeaderV::V6(_) => write!(f, "V6"),
BatchHeaderV::V7_V8_V9(_) => write!(f, "V7_V8_V9"),
BatchHeaderV::V7_to_V10(_) => write!(f, "V7 - V10"),
BatchHeaderV::Validium(_) => write!(f, "Validium"),
}
}
@@ -61,26 +61,29 @@ impl BatchHeaderV {
pub fn batch_hash(&self) -> B256 {
match self {
BatchHeaderV::V6(h) => h.batch_hash(),
BatchHeaderV::V7_V8_V9(h) => h.batch_hash(),
BatchHeaderV::V7_to_V10(h) => h.batch_hash(),
BatchHeaderV::Validium(h) => h.header.batch_hash(),
}
}
pub fn must_v6_header(&self) -> &BatchHeaderV6 {
pub fn to_zkvm_batch_header_v6(&self) -> &BatchHeaderV6 {
match self {
BatchHeaderV::V6(h) => h,
_ => unreachable!("A header of {} is considered to be v6", self),
}
}
pub fn must_v7_v8_v9_header(&self) -> &BatchHeaderV7 {
pub fn to_zkvm_batch_header_v7_to_v10(&self) -> &BatchHeaderV7 {
match self {
BatchHeaderV::V7_V8_V9(h) => h,
_ => unreachable!("A header of {} is considered to be in [v7, v8, v9]", self),
BatchHeaderV::V7_to_V10(h) => h,
_ => unreachable!(
"A header of {} is considered to be in [v7, v8, v9, v10]",
self
),
}
}
pub fn must_validium_header(&self) -> &BatchHeaderValidium {
pub fn to_zkvm_batch_header_validium(&self) -> &BatchHeaderValidium {
match self {
BatchHeaderV::Validium(h) => &h.header,
_ => unreachable!("A header of {} is considered to be validium", self),
@@ -147,18 +150,32 @@ impl BatchProvingTask {
match &self.batch_header {
BatchHeaderV::Validium(_) => assert!(
version.is_validium(),
"version {:?} is not match with parsed header, get validium header but version is not validium", version,
"version {:?} is not match with parsed header, get validium header but version is not validium",
version,
),
BatchHeaderV::V6(_) => assert_eq!(version.fork, ForkName::EuclidV1,
BatchHeaderV::V6(_) => assert_eq!(
version.fork,
ForkName::EuclidV1,
"hardfork mismatch for da-codec@v6 header: found={:?}, expected={:?}",
version.fork,
ForkName::EuclidV1,
),
BatchHeaderV::V7_V8_V9(_) => assert!(
matches!(version.fork, ForkName::EuclidV2 | ForkName::Feynman | ForkName::Galileo),
"hardfork mismatch for da-codec@v7/8/9 header: found={}, expected={:?}",
BatchHeaderV::V7_to_V10(_) => assert!(
matches!(
version.fork,
ForkName::EuclidV2
| ForkName::Feynman
| ForkName::Galileo
| ForkName::GalileoV2
),
"hardfork mismatch for da-codec@v7/8/9/10 header: found={}, expected={:?}",
version.fork,
[ForkName::EuclidV2, ForkName::Feynman, ForkName::Galileo],
[
ForkName::EuclidV2,
ForkName::Feynman,
ForkName::Galileo,
ForkName::GalileoV2
],
),
}
@@ -228,23 +245,25 @@ impl BatchProvingTask {
let reference_header = match (version.domain, version.stf_version) {
(Domain::Scroll, STFVersion::V6) => {
ReferenceHeader::V6(*self.batch_header.must_v6_header())
ReferenceHeader::V6(*self.batch_header.to_zkvm_batch_header_v6())
}
// The da-codec for STF versions v7, v8, v9 is identical. In zkvm-prover we do not
// The da-codec for STF versions v7, v8, v9, v10 is identical. In zkvm-prover we do not
// create additional variants to indicate the identical behaviour of codec. Instead we
// add a separate variant for the STF version.
//
// We handle the different STF versions here however build the same batch header since
// that type does not change. The batch header's version byte constructed in the
// coordinator actually defines the STF version (v7, v8 or v9) and we can derive the
// hard-fork (feynman or galileo) and the codec from the version byte.
// coordinator actually defines the STF version (v7, v8 or v9, v10) and we can derive
// the hard-fork (e.g. feynman or galileo) and the codec from the version
// byte.
//
// Refer [`scroll_zkvm_types::public_inputs::Version`].
(Domain::Scroll, STFVersion::V7 | STFVersion::V8 | STFVersion::V9) => {
ReferenceHeader::V7_V8_V9(*self.batch_header.must_v7_v8_v9_header())
}
(
Domain::Scroll,
STFVersion::V7 | STFVersion::V8 | STFVersion::V9 | STFVersion::V10,
) => ReferenceHeader::V7_V8_V9(*self.batch_header.to_zkvm_batch_header_v7_to_v10()),
(Domain::Validium, STFVersion::V1) => {
ReferenceHeader::Validium(*self.batch_header.must_validium_header())
ReferenceHeader::Validium(*self.batch_header.to_zkvm_batch_header_validium())
}
(domain, stf_version) => {
unreachable!("unsupported domain={domain:?},stf-version={stf_version:?}")

View File

@@ -17,7 +17,7 @@ pub mod base64 {
pub mod point_eval {
use c_kzg;
use sbv_primitives::{types::eips::eip4844::BLS_MODULUS, B256 as H256, U256};
use sbv_primitives::{B256 as H256, U256, types::eips::eip4844::BLS_MODULUS};
use scroll_zkvm_types::utils::sha256_rv32;
/// Given the blob-envelope, translate it to a fixed size EIP-4844 blob.

View File

@@ -4,7 +4,7 @@ use scroll_zkvm_types::{
bundle::{BundleInfo, BundleWitness, LegacyBundleWitness},
public_inputs::{MultiVersionPublicInputs, Version},
task::ProvingTask,
utils::{to_rkyv_bytes, RancorError},
utils::{RancorError, to_rkyv_bytes},
};
use crate::proofs::BatchProof;

View File

@@ -1,11 +1,11 @@
use eyre::Result;
use sbv_core::BlockWitness;
use sbv_primitives::{types::consensus::BlockHeader, B256};
use sbv_primitives::{B256, types::consensus::BlockHeader};
use scroll_zkvm_types::{
chunk::{execute, ChunkInfo, ChunkWitness, LegacyChunkWitness, ValidiumInputs},
chunk::{ChunkInfo, ChunkWitness, LegacyChunkWitness, ValidiumInputs, execute},
public_inputs::{MultiVersionPublicInputs, Version},
task::ProvingTask,
utils::{to_rkyv_bytes, RancorError},
utils::{RancorError, to_rkyv_bytes},
};
use super::chunk_interpreter::*;
@@ -224,8 +224,8 @@ impl ChunkProvingTask {
attempts += 1;
if attempts >= MAX_FETCH_NODES_ATTEMPTS {
return Err(eyre!(
"failed to fetch nodes after {MAX_FETCH_NODES_ATTEMPTS} attempts: {e}"
));
"failed to fetch nodes after {MAX_FETCH_NODES_ATTEMPTS} attempts: {e}"
));
}
let node_hash =

View File

@@ -1,6 +1,6 @@
use eyre::Result;
use sbv_core::BlockWitness;
use sbv_primitives::{types::consensus::TxL1Message, Bytes, B256};
use sbv_primitives::{B256, Bytes, types::consensus::TxL1Message};
/// An interpreter which is cirtical in translating chunk data
/// since we need to grep block witness and storage node data

View File

@@ -1,12 +1,12 @@
use std::{
panic::{catch_unwind, AssertUnwindSafe},
panic::{AssertUnwindSafe, catch_unwind},
path::Path,
};
use git_version::git_version;
use serde::{
de::{Deserialize, DeserializeOwned},
Serialize,
de::{Deserialize, DeserializeOwned},
};
use eyre::Result;

View File

@@ -11,5 +11,5 @@ crate-type = ["cdylib"]
[dependencies]
libzkp = { path = "../libzkp" }
l2geth = { path = "../l2geth"}
tracing-subscriber = { version = "0.3", features = ["env-filter"] }
tracing-subscriber.workspace = true
tracing.workspace = true

View File

@@ -1,6 +1,6 @@
mod utils;
use std::ffi::{c_char, CString};
use std::ffi::{CString, c_char};
use libzkp::TaskType;
use utils::{c_char_to_str, c_char_to_vec};
@@ -20,7 +20,7 @@ fn enable_dump() -> bool {
}
/// # Safety
#[no_mangle]
#[unsafe(no_mangle)]
pub unsafe extern "C" fn init_tracing() {
use tracing_subscriber::filter::{EnvFilter, LevelFilter};
@@ -47,14 +47,14 @@ pub unsafe extern "C" fn init_tracing() {
}
/// # Safety
#[no_mangle]
#[unsafe(no_mangle)]
pub unsafe extern "C" fn init_verifier(config: *const c_char) {
let config_str = c_char_to_str(config);
libzkp::verifier_init(config_str).unwrap();
}
/// # Safety
#[no_mangle]
#[unsafe(no_mangle)]
pub unsafe extern "C" fn init_l2geth(config: *const c_char) {
let config_str = c_char_to_str(config);
l2geth::init(config_str).unwrap();
@@ -92,7 +92,7 @@ fn verify_proof(proof: *const c_char, fork_name: *const c_char, task_type: TaskT
}
/// # Safety
#[no_mangle]
#[unsafe(no_mangle)]
pub unsafe extern "C" fn verify_chunk_proof(
proof: *const c_char,
fork_name: *const c_char,
@@ -101,7 +101,7 @@ pub unsafe extern "C" fn verify_chunk_proof(
}
/// # Safety
#[no_mangle]
#[unsafe(no_mangle)]
pub unsafe extern "C" fn verify_batch_proof(
proof: *const c_char,
fork_name: *const c_char,
@@ -110,7 +110,7 @@ pub unsafe extern "C" fn verify_batch_proof(
}
/// # Safety
#[no_mangle]
#[unsafe(no_mangle)]
pub unsafe extern "C" fn verify_bundle_proof(
proof: *const c_char,
fork_name: *const c_char,
@@ -119,7 +119,7 @@ pub unsafe extern "C" fn verify_bundle_proof(
}
/// # Safety
#[no_mangle]
#[unsafe(no_mangle)]
pub unsafe extern "C" fn dump_vk(fork_name: *const c_char, file: *const c_char) {
let fork_name_str = c_char_to_str(fork_name);
let file_str = c_char_to_str(file);
@@ -145,7 +145,7 @@ fn failed_handling_result() -> HandlingResult {
}
/// # Safety
#[no_mangle]
#[unsafe(no_mangle)]
pub unsafe extern "C" fn gen_universal_task(
task_type: i32,
task: *const c_char,
@@ -166,10 +166,7 @@ pub unsafe extern "C" fn gen_universal_task(
);
return failed_handling_result();
}
Some(std::slice::from_raw_parts(
decryption_key,
decryption_key_len,
))
Some(unsafe { std::slice::from_raw_parts(decryption_key, decryption_key_len) })
} else {
None
};
@@ -185,7 +182,7 @@ pub unsafe extern "C" fn gen_universal_task(
};
let expected_vk = if expected_vk_len > 0 {
std::slice::from_raw_parts(expected_vk, expected_vk_len)
unsafe { std::slice::from_raw_parts(expected_vk, expected_vk_len) }
} else {
&[]
};
@@ -224,18 +221,18 @@ pub unsafe extern "C" fn gen_universal_task(
}
/// # Safety
#[no_mangle]
#[unsafe(no_mangle)]
pub unsafe extern "C" fn release_task_result(result: HandlingResult) {
if !result.universal_task.is_null() {
let _ = CString::from_raw(result.universal_task);
let _ = unsafe { CString::from_raw(result.universal_task) };
}
if !result.metadata.is_null() {
let _ = CString::from_raw(result.metadata);
let _ = unsafe { CString::from_raw(result.metadata) };
}
}
/// # Safety
#[no_mangle]
#[unsafe(no_mangle)]
pub unsafe extern "C" fn gen_wrapped_proof(
proof: *const c_char,
metadata: *const c_char,
@@ -244,7 +241,7 @@ pub unsafe extern "C" fn gen_wrapped_proof(
) -> *mut c_char {
let proof_str = c_char_to_str(proof);
let metadata_str = c_char_to_str(metadata);
let vk_data = std::slice::from_raw_parts(vk as *const u8, vk_len);
let vk_data = unsafe { std::slice::from_raw_parts(vk as *const u8, vk_len) };
match libzkp::gen_wrapped_proof(proof_str, metadata_str, vk_data) {
Ok(result) => CString::new(result).unwrap().into_raw(),
@@ -256,7 +253,7 @@ pub unsafe extern "C" fn gen_wrapped_proof(
}
/// # Safety
#[no_mangle]
#[unsafe(no_mangle)]
pub unsafe extern "C" fn univ_task_compatibility_fix(task_json: *const c_char) -> *mut c_char {
let task_json_str = c_char_to_str(task_json);
match libzkp::univ_task_compatibility_fix(task_json_str) {
@@ -269,9 +266,9 @@ pub unsafe extern "C" fn univ_task_compatibility_fix(task_json: *const c_char) -
}
/// # Safety
#[no_mangle]
#[unsafe(no_mangle)]
pub unsafe extern "C" fn release_string(ptr: *mut c_char) {
if !ptr.is_null() {
let _ = CString::from_raw(ptr);
let _ = unsafe { CString::from_raw(ptr) };
}
}

View File

@@ -6,35 +6,28 @@ edition.workspace = true
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
axiom-sdk.workspace = true
scroll-zkvm-types.workspace = true
scroll-zkvm-prover.workspace = true
libzkp = { path = "../libzkp"}
scroll-proving-sdk = { git = "https://github.com/scroll-tech/scroll-proving-sdk.git", rev = "05648db" }
scroll-proving-sdk = { git = "https://github.com/scroll-tech/scroll-proving-sdk.git", rev = "504e71f" }
serde.workspace = true
serde_json.workspace = true
once_cell.workspace =true
base64.workspace = true
tiny-keccak = { workspace = true, features = ["sha3", "keccak"] }
eyre.workspace = true
tracing.workspace = true
futures = "0.3.30"
futures-util = "0.3"
reqwest = { version = "0.12.4", features = ["gzip", "stream"] }
reqwest-middleware = "0.3"
reqwest-retry = "0.5"
reqwest = { version = "0.12", features = ["gzip", "stream"] }
hex = "0.4.3"
rand = "0.8.5"
tokio = "1.37.0"
jiff.workspace = true
tokio = { workspace = true, features = ["full"] }
async-trait = "0.1"
sled = "0.34.7"
http = "1.1.0"
clap = { version = "4.5", features = ["derive"] }
ctor = "0.2.8"
url = { version = "2.5.4", features = ["serde",] }
serde_bytes = "0.11.15"
url = { version = "2.5.4", features = ["serde"] }
tempfile = "3.24"
[features]
default = []
cuda = ["scroll-zkvm-prover/cuda"]
cuda = ["scroll-zkvm-prover/cuda"]

View File

@@ -1,21 +1,32 @@
#[macro_use]
extern crate tracing;
mod prover;
mod types;
mod zk_circuits_handler;
use crate::prover::ProverKind;
use clap::{ArgAction, Parser, Subcommand};
use prover::{LocalProver, LocalProverConfig};
use scroll_proving_sdk::{
prover::{types::ProofType, ProverBuilder},
utils::{get_version, init_tracing},
prover::{ProverBuilder, types::ProofType},
utils::{VERSION, init_tracing},
};
use std::{
fs::File,
io::BufReader,
path::{Path, PathBuf},
};
use std::{fs::File, io::BufReader, path::Path};
#[derive(Parser, Debug)]
#[command(disable_version_flag = true)]
struct Args {
/// Prover kind
#[arg(long = "prover.kind", value_enum, default_value_t = ProverKind::Local)]
prover_kind: ProverKind,
/// Path of config file
#[arg(long = "config", default_value = "conf/config.json")]
config_file: String,
config_file: PathBuf,
#[arg(long = "forkname")]
fork_name: Option<String>,
@@ -42,8 +53,11 @@ enum Commands {
#[derive(Debug, serde::Deserialize)]
struct HandleSet {
#[serde(default)]
chunks: Vec<String>,
#[serde(default)]
batches: Vec<String>,
#[serde(default)]
bundles: Vec<String>,
}
@@ -54,13 +68,13 @@ async fn main() -> eyre::Result<()> {
let args = Args::parse();
if args.version {
println!("version is {}", get_version());
println!("version is {VERSION}");
std::process::exit(0);
}
info!(version = %VERSION, "Starting prover");
let cfg = LocalProverConfig::from_file(args.config_file)?;
let sdk_config = cfg.sdk_config.clone();
let local_prover = LocalProver::new(cfg.clone());
let (sdk_config, prover) = args.prover_kind.create_from_file(&args.config_file)?;
info!(prover = ?prover, "Loaded prover");
match args.command {
Some(Commands::Handle { task_path }) => {
@@ -68,37 +82,37 @@ async fn main() -> eyre::Result<()> {
let reader = BufReader::new(file);
let handle_set: HandleSet = serde_json::from_reader(reader)?;
let prover = ProverBuilder::new(sdk_config, local_prover)
let prover = ProverBuilder::new(sdk_config, prover)
.build()
.await
.map_err(|e| eyre::eyre!("build prover fail: {e}"))?;
let prover = std::sync::Arc::new(prover);
println!("Handling task set 1: chunks ...");
info!("Handling task set 1: chunks ...");
assert!(
prover
.clone()
.one_shot(&handle_set.chunks, ProofType::Chunk)
.await
);
println!("Done! Handling task set 2: batches ...");
info!("Done! Handling task set 2: batches ...");
assert!(
prover
.clone()
.one_shot(&handle_set.batches, ProofType::Batch)
.await
);
println!("Done! Handling task set 3: bundles ...");
info!("Done! Handling task set 3: bundles ...");
assert!(
prover
.clone()
.one_shot(&handle_set.bundles, ProofType::Bundle)
.await
);
println!("All done!");
info!("All done!");
}
None => {
let prover = ProverBuilder::new(sdk_config, local_prover)
let prover = ProverBuilder::new(sdk_config, prover)
.build()
.await
.map_err(|e| eyre::eyre!("build prover fail: {e}"))?;

View File

@@ -1,330 +1,96 @@
use crate::zk_circuits_handler::{universal::UniversalHandler, CircuitsHandler};
use async_trait::async_trait;
use eyre::Result;
use scroll_proving_sdk::{
config::Config as SdkConfig,
prover::{
ProvingService,
proving_service::{
GetVkRequest, GetVkResponse, ProveRequest, ProveResponse, QueryTaskRequest,
QueryTaskResponse, TaskStatus,
QueryTaskResponse,
},
types::ProofType,
ProvingService,
},
};
use scroll_zkvm_types::ProvingTask;
use serde::{Deserialize, Serialize};
use std::{
collections::HashMap,
fs::File,
path::{Path, PathBuf},
sync::{Arc, LazyLock},
time::{SystemTime, UNIX_EPOCH},
};
use tokio::{runtime::Handle, sync::Mutex, task::JoinHandle};
use std::path::Path;
#[derive(Clone, Serialize, Deserialize)]
pub struct AssetsLocationData {
/// the base url to form a general downloading url for an asset, MUST HAVE A TRAILING SLASH
pub base_url: url::Url,
#[serde(default)]
/// a altered url for specififed vk
pub asset_detours: HashMap<String, url::Url>,
mod local;
pub use local::{LocalProver, LocalProverConfig};
mod axiom;
pub use axiom::{AxiomProver, AxiomProverConfig};
#[derive(Debug)]
pub enum Prover {
Local(LocalProver),
Axiom(AxiomProver),
}
impl AssetsLocationData {
pub fn gen_asset_url(&self, vk_as_path: &str, proof_type: ProofType) -> Result<url::Url> {
Ok(self.base_url.join(
match proof_type {
ProofType::Chunk => format!("chunk/{vk_as_path}/"),
ProofType::Batch => format!("batch/{vk_as_path}/"),
ProofType::Bundle => format!("bundle/{vk_as_path}/"),
t => eyre::bail!("unrecognized proof type: {}", t as u8),
}
.as_str(),
)?)
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash, Serialize, Deserialize, clap::ValueEnum)]
pub enum ProverKind {
Local,
Axiom,
}
pub fn validate(&self) -> Result<()> {
if !self.base_url.path().ends_with('/') {
eyre::bail!(
"base_url must have a trailing slash, got: {}",
self.base_url
);
}
Ok(())
}
pub async fn get_asset(
impl ProverKind {
pub fn create_from_file<P: AsRef<Path>>(
&self,
vk: &str,
url_base: &url::Url,
base_path: impl AsRef<Path>,
) -> Result<PathBuf> {
let download_files = ["app.vmexe", "openvm.toml"];
// Step 1: Create a local path for storage
let storage_path = base_path.as_ref().join(vk);
std::fs::create_dir_all(&storage_path)?;
// Step 2 & 3: Download each file if needed
let client = reqwest::Client::new();
for filename in download_files.iter() {
let local_file_path = storage_path.join(filename);
let download_url = url_base.join(filename)?;
// Check if file already exists
if local_file_path.exists() {
// Get file metadata to check size
if let Ok(metadata) = std::fs::metadata(&local_file_path) {
// Make a HEAD request to get remote file size
if let Ok(head_resp) = client.head(download_url.clone()).send().await {
if let Some(content_length) = head_resp.headers().get("content-length") {
if let Ok(remote_size) =
content_length.to_str().unwrap_or("0").parse::<u64>()
{
// If sizes match, skip download
if metadata.len() == remote_size {
println!("File {} already exists with matching size, skipping download", filename);
continue;
}
}
}
}
}
file_name: P,
) -> eyre::Result<(SdkConfig, Prover)> {
match self {
ProverKind::Local => {
let config = LocalProverConfig::from_file(file_name)?;
let sdk_config = config.sdk_config.clone();
let prover = LocalProver::new(config);
Ok((sdk_config, Prover::Local(prover)))
}
println!("Downloading {} from {}", filename, download_url);
let response = client.get(download_url).send().await?;
if !response.status().is_success() {
eyre::bail!(
"Failed to download {}: HTTP status {}",
filename,
response.status()
);
}
// Stream the content directly to file instead of loading into memory
let mut file = std::fs::File::create(&local_file_path)?;
let mut stream = response.bytes_stream();
use futures_util::StreamExt;
while let Some(chunk) = stream.next().await {
std::io::Write::write_all(&mut file, &chunk?)?;
ProverKind::Axiom => {
let config = AxiomProverConfig::from_file(file_name)?;
let sdk_config = config.sdk_config.clone();
let prover = AxiomProver::new(config);
Ok((sdk_config, Prover::Axiom(prover)))
}
}
// Step 4: Return the storage path
Ok(storage_path)
}
}
#[derive(Clone, Serialize, Deserialize)]
pub struct LocalProverConfig {
pub sdk_config: SdkConfig,
pub circuits: HashMap<String, CircuitConfig>,
}
impl LocalProverConfig {
pub fn from_reader<R>(reader: R) -> Result<Self>
where
R: std::io::Read,
{
serde_json::from_reader(reader).map_err(|e| eyre::eyre!(e))
}
pub fn from_file(file_name: String) -> Result<Self> {
let file = File::open(file_name)?;
Self::from_reader(&file)
}
}
#[derive(Clone, Serialize, Deserialize)]
pub struct CircuitConfig {
pub hard_fork_name: String,
/// The path to save assets for a specified hard fork phase
pub workspace_path: String,
#[serde(flatten)]
/// The location data for dynamic loading
pub location_data: AssetsLocationData,
/// cached vk value to save some initial cost, for debugging only
#[serde(default)]
pub vks: HashMap<ProofType, String>,
}
pub struct LocalProver {
config: LocalProverConfig,
next_task_id: u64,
current_task: Option<JoinHandle<Result<String>>>,
handlers: HashMap<String, Arc<dyn CircuitsHandler>>,
}
#[async_trait]
impl ProvingService for LocalProver {
impl ProvingService for Prover {
fn is_local(&self) -> bool {
true
}
async fn get_vks(&self, _: GetVkRequest) -> GetVkResponse {
// get vk has been deprecated in new prover with dynamic asset loading scheme
GetVkResponse {
vks: vec![],
error: None,
match self {
Prover::Local(p) => p.is_local(),
Prover::Axiom(p) => p.is_local(),
}
}
async fn get_vks(&self, req: GetVkRequest) -> GetVkResponse {
match self {
Prover::Local(p) => p.get_vks(req).await,
Prover::Axiom(p) => p.get_vks(req).await,
}
}
async fn prove(&mut self, req: ProveRequest) -> ProveResponse {
match self.do_prove(req).await {
Ok(resp) => resp,
Err(e) => ProveResponse {
status: TaskStatus::Failed,
error: Some(format!("failed to request proof: {}", e)),
..Default::default()
},
match self {
Prover::Local(p) => p.prove(req).await,
Prover::Axiom(p) => p.prove(req).await,
}
}
async fn query_task(&mut self, req: QueryTaskRequest) -> QueryTaskResponse {
if let Some(handle) = &mut self.current_task {
if handle.is_finished() {
return match handle.await {
Ok(Ok(proof)) => QueryTaskResponse {
task_id: req.task_id,
status: TaskStatus::Success,
proof: Some(proof),
..Default::default()
},
Ok(Err(e)) => QueryTaskResponse {
task_id: req.task_id,
status: TaskStatus::Failed,
error: Some(format!("proving task failed: {}", e)),
..Default::default()
},
Err(e) => QueryTaskResponse {
task_id: req.task_id,
status: TaskStatus::Failed,
error: Some(format!("proving task panicked: {}", e)),
..Default::default()
},
};
} else {
return QueryTaskResponse {
task_id: req.task_id,
status: TaskStatus::Proving,
..Default::default()
};
}
}
// If no handle is found
QueryTaskResponse {
task_id: req.task_id,
status: TaskStatus::Failed,
error: Some("no proving task is running".to_string()),
..Default::default()
match self {
Prover::Local(p) => p.query_task(req).await,
Prover::Axiom(p) => p.query_task(req).await,
}
}
}
static GLOBAL_ASSET_URLS: LazyLock<HashMap<String, HashMap<String, url::Url>>> =
LazyLock::new(|| {
const ASSETS_JSON: &str = include_str!("../assets_url_preset.json");
serde_json::from_str(ASSETS_JSON).expect("Failed to parse assets_url_preset.json")
});
impl LocalProver {
pub fn new(mut config: LocalProverConfig) -> Self {
for (fork_name, circuit_config) in config.circuits.iter_mut() {
// validate each base url
circuit_config.location_data.validate().unwrap();
let mut template_url_mapping = GLOBAL_ASSET_URLS
.get(&fork_name.to_lowercase())
.cloned()
.unwrap_or_default();
// apply default settings in template
for (key, url) in circuit_config.location_data.asset_detours.drain() {
template_url_mapping.insert(key, url);
}
circuit_config.location_data.asset_detours = template_url_mapping;
// validate each detours url
for url in circuit_config.location_data.asset_detours.values() {
assert!(
url.path().ends_with('/'),
"url {} must be end with /",
url.as_str()
);
}
}
Self {
config,
next_task_id: 0,
current_task: None,
handlers: HashMap::new(),
}
}
async fn do_prove(&mut self, req: ProveRequest) -> Result<ProveResponse> {
self.next_task_id += 1;
let duration = SystemTime::now().duration_since(UNIX_EPOCH).unwrap();
let created_at = duration.as_secs() as f64 + duration.subsec_nanos() as f64 * 1e-9;
let prover_task = UniversalHandler::get_task_from_input(&req.input)?;
let is_openvm_13 = prover_task.use_openvm_13;
let prover_task: ProvingTask = prover_task.into();
let vk = hex::encode(&prover_task.vk);
let handler = if let Some(handler) = self.handlers.get(&vk) {
handler.clone()
} else {
let base_config = self
.config
.circuits
.get(&req.hard_fork_name)
.ok_or_else(|| {
eyre::eyre!(
"coordinator sent unexpected forkname {}",
req.hard_fork_name
)
})?;
let url_base = if let Some(url) = base_config.location_data.asset_detours.get(&vk) {
url.clone()
} else {
base_config
.location_data
.gen_asset_url(&vk, req.proof_type)?
};
let asset_path = base_config
.location_data
.get_asset(&vk, &url_base, &base_config.workspace_path)
.await?;
let circuits_handler = Arc::new(Mutex::new(UniversalHandler::new(
&asset_path,
is_openvm_13,
)?));
self.handlers.insert(vk, circuits_handler.clone());
circuits_handler
};
let handle = Handle::current();
let is_evm = req.proof_type == ProofType::Bundle;
let task_handle = tokio::task::spawn_blocking(move || {
handle.block_on(handler.get_proof_data(&prover_task, is_evm))
});
self.current_task = Some(task_handle);
Ok(ProveResponse {
task_id: self.next_task_id.to_string(),
proof_type: req.proof_type,
circuit_version: req.circuit_version,
hard_fork_name: req.hard_fork_name,
status: TaskStatus::Proving,
created_at,
input: Some(req.input),
..Default::default()
})
impl From<LocalProver> for Prover {
fn from(p: LocalProver) -> Self {
Prover::Local(p)
}
}
impl From<AxiomProver> for Prover {
fn from(p: AxiomProver) -> Self {
Prover::Axiom(p)
}
}

View File

@@ -0,0 +1,329 @@
use crate::zk_circuits_handler::universal::UniversalHandler;
use async_trait::async_trait;
use axiom_sdk::{
AxiomSdk, ProofType as AxiomProofType,
build::BuildSdk,
input::Input as AxiomInput,
prove::{ProveArgs, ProveSdk},
};
use eyre::Context;
use jiff::Timestamp;
use scroll_proving_sdk::{
config::Config as SdkConfig,
prover::{
ProofType, ProvingService,
proving_service::{
GetVkRequest, GetVkResponse, ProveRequest, ProveResponse, QueryTaskRequest,
QueryTaskResponse, TaskStatus,
},
},
};
use scroll_zkvm_types::{
ProvingTask,
proof::{OpenVmEvmProof, OpenVmVersionedVmStarkProof, ProofEnum},
};
use serde::{Deserialize, Serialize};
use std::{collections::HashMap, fs::File, io::Write, path::Path};
use tempfile::NamedTempFile;
use tracing::Level;
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct AxiomProverConfig {
pub axiom: AxiomConfig,
pub sdk_config: SdkConfig,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct AxiomConfig {
pub api_key: String,
// vk to program mapping
pub programs: HashMap<String, AxiomProgram>,
pub num_gpus: Option<usize>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct AxiomProgram {
pub program_id: String,
pub config_id: String,
}
#[derive(Debug)]
pub struct AxiomProver {
config: AxiomProverConfig,
}
impl AxiomProverConfig {
pub fn from_reader<R>(reader: R) -> eyre::Result<Self>
where
R: std::io::Read,
{
serde_json::from_reader(reader).map_err(|e| eyre::eyre!(e))
}
pub fn from_file<P: AsRef<Path>>(file_name: P) -> eyre::Result<Self> {
let file = File::open(file_name)?;
Self::from_reader(&file)
}
}
#[async_trait]
impl ProvingService for AxiomProver {
fn is_local(&self) -> bool {
false
}
async fn get_vks(&self, _: GetVkRequest) -> GetVkResponse {
// get vk has been deprecated in new prover with dynamic asset loading scheme
GetVkResponse {
vks: vec![],
error: None,
}
}
#[instrument(skip(self), ret, level = Level::DEBUG)]
async fn prove(&mut self, req: ProveRequest) -> ProveResponse {
self.prove_inner(req)
.await
.unwrap_or_else(|e| ProveResponse {
status: TaskStatus::Failed,
error: Some(format!("failed to submit proof task to axiom: {}", e)),
..Default::default()
})
}
#[instrument(skip(self), ret, level = Level::DEBUG)]
async fn query_task(&mut self, req: QueryTaskRequest) -> QueryTaskResponse {
let task_id = req.task_id.clone();
self.query_task_inner(req)
.await
.unwrap_or_else(|e| QueryTaskResponse {
task_id,
status: TaskStatus::Failed,
error: Some(format!("failed to query axiom task: {}", e)),
..Default::default()
})
}
}
impl AxiomProver {
pub fn new(config: AxiomProverConfig) -> Self {
Self { config }
}
async fn make_axiom_request<R: Send + 'static>(
&self,
config_id: Option<String>,
req: impl FnOnce(AxiomSdk) -> eyre::Result<R> + Send + 'static,
) -> eyre::Result<R> {
let api_key = self.config.axiom.api_key.clone();
tokio::task::spawn_blocking(move || {
let config = axiom_sdk::AxiomConfig {
api_key: Some(api_key),
config_id,
..Default::default()
};
let sdk = AxiomSdk::new(config);
req(sdk)
})
.await
.context("failed to join axiom request")
.flatten()
}
#[instrument(skip_all, ret, err, level = Level::DEBUG)]
fn get_program(&self, vk: &[u8]) -> eyre::Result<AxiomProgram> {
let vk = hex::encode(vk);
debug!(vk = %vk);
self.config
.axiom
.programs
.get(vk.as_str())
.cloned()
.ok_or_else(|| eyre::eyre!("no axiom program configured for vk: {vk}"))
}
#[instrument(skip_all, err, level = Level::DEBUG)]
async fn prove_inner(&mut self, req: ProveRequest) -> eyre::Result<ProveResponse> {
let prover_task = UniversalHandler::get_task_from_input(&req.input)?;
if prover_task.use_openvm_13 {
eyre::bail!("axiom prover does not support openvm v1.3 tasks");
}
let prover_task: ProvingTask = prover_task.into();
let program = self.get_program(&prover_task.vk)?;
let num_gpus = self.config.axiom.num_gpus;
let mut input_file = NamedTempFile::new()?;
let input = prover_task.build_openvm_input();
serde_json::to_writer(&mut input_file, &input)?;
input_file.flush()?;
let proof_type = if req.proof_type == ProofType::Bundle {
AxiomProofType::Evm
} else {
AxiomProofType::Stark
};
let mut response = ProveResponse {
proof_type: req.proof_type,
created_at: Timestamp::now().as_duration().as_secs_f64(),
status: TaskStatus::Proving,
..Default::default()
};
response.task_id = self
.make_axiom_request(Some(program.config_id), move |sdk| {
sdk.generate_new_proof(ProveArgs {
program_id: Some(program.program_id.clone()),
input: Some(AxiomInput::FilePath(input_file.path().to_path_buf())),
proof_type: Some(proof_type),
num_gpus,
priority: None,
})
})
.await?;
info!(
proof_type = ?req.proof_type,
identifier = %prover_task.identifier,
task_id = %response.task_id,
"submitted axiom proving task"
);
Ok(response)
}
#[instrument(skip_all, err, level = Level::DEBUG)]
async fn query_task_inner(&mut self, req: QueryTaskRequest) -> eyre::Result<QueryTaskResponse> {
let mut response = QueryTaskResponse {
task_id: req.task_id.clone(),
..Default::default()
};
let task_id = req.task_id.clone();
let (status, proof_type, proof) = self
.make_axiom_request(None, move |sdk| {
let status = sdk.get_proof_status(&task_id)?;
debug!(status = ?status, "fetched axiom task status");
let program_status = sdk.get_build_status(&status.program_uuid)?;
let proof_type = match program_status.name.as_str() {
"chunk" => ProofType::Chunk,
"batch" => ProofType::Batch,
"bundle" => ProofType::Bundle,
_ => {
return Err(eyre::eyre!("unrecognized program in: {program_status:#?}",));
}
};
let axiom_proof_type: AxiomProofType = status.proof_type.parse()?;
let proof = if status.state == "Succeeded" {
let file = NamedTempFile::new()?;
sdk.get_generated_proof(
&status.id,
&axiom_proof_type,
Some(file.path().to_path_buf()),
)?;
Some(file)
} else {
None
};
Ok((status, proof_type, proof))
})
.await?;
// Queued, Executing, Executed, AppProving, AppProvingDone, PostProcessing, Failed,
// Succeeded
response.status = match status.state.as_str() {
"Queued" => TaskStatus::Queued,
"Executing" | "Executed" | "AppProving" | "AppProvingDone" | "PostProcessing" => {
TaskStatus::Proving
}
"Succeeded" => TaskStatus::Success,
"Failed" => TaskStatus::Failed,
other => {
return Err(eyre::eyre!("unrecognized axiom task status: {other}"));
}
};
debug!(status = ?response.status, "mapped axiom task status");
if response.status == TaskStatus::Failed {
response.error = Some(
status
.error_message
.unwrap_or_else(|| "unknown error".to_string()),
);
}
response.proof_type = proof_type;
let created_at: Timestamp = status.created_at.parse()?;
response.created_at = created_at.as_duration().as_secs_f64();
if let Some(launched_at) = status.launched_at
&& !launched_at.is_empty()
{
let started_at: Timestamp = launched_at.parse()?;
let started_at = started_at.as_duration();
response.started_at = Some(started_at.as_secs_f64());
if let Some(terminated_at) = status.terminated_at
&& !terminated_at.is_empty()
{
let finished_at: Timestamp = terminated_at.parse()?;
let finished_at = finished_at.as_duration();
response.finished_at = Some(finished_at.as_secs_f64());
let duration = finished_at.checked_sub(started_at).ok_or_else(|| {
eyre::eyre!(
"invalid timestamps: started_at={:?}, finished_at={:?}",
started_at,
finished_at
)
})?;
response.compute_time_sec = Some(duration.as_secs_f64());
info!(
task_id = %req.task_id,
launched_at = %format_args!("{launched_at:#}"),
terminated_at = %format_args!("{terminated_at:#}"),
duration = %format_args!("{duration:#}"),
priority = %status.priority,
"completed"
);
info!(
task_id = %req.task_id,
cells_used = %status.cells_used,
num_gpus = %status.num_gpus,
"resource usage"
);
if let Some(num_instructions) = status.num_instructions {
let mhz = num_instructions as f64 / (duration.as_secs_f64() * 1_000_000.0);
info!(
task_id = %req.task_id,
cycles = %num_instructions,
MHz = %format_args!("{mhz:.2}"),
"performance"
);
}
}
}
if let Some(proof_file) = proof {
let proof = match proof_type {
ProofType::Bundle => {
let proof: OpenVmEvmProof = serde_json::from_reader(proof_file)?;
ProofEnum::Evm(proof.into())
}
_ => {
let proof: OpenVmVersionedVmStarkProof = serde_json::from_reader(proof_file)?;
ProofEnum::Stark(proof.try_into()?)
}
};
response.proof = Some(serde_json::to_string(&proof)?);
}
Ok(response)
}
}

View File

@@ -0,0 +1,342 @@
use crate::zk_circuits_handler::{CircuitsHandler, universal::UniversalHandler};
use async_trait::async_trait;
use eyre::Result;
use scroll_proving_sdk::{
config::Config as SdkConfig,
prover::{
ProvingService,
proving_service::{
GetVkRequest, GetVkResponse, ProveRequest, ProveResponse, QueryTaskRequest,
QueryTaskResponse, TaskStatus,
},
types::ProofType,
},
};
use scroll_zkvm_types::ProvingTask;
use serde::{Deserialize, Serialize};
use std::{
collections::HashMap,
fmt,
fs::File,
path::{Path, PathBuf},
sync::{Arc, LazyLock},
time::{SystemTime, UNIX_EPOCH},
};
use tokio::{runtime::Handle, sync::Mutex, task::JoinHandle};
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct AssetsLocationData {
/// the base url to form a general downloading url for an asset, MUST HAVE A TRAILING SLASH
pub base_url: url::Url,
#[serde(default)]
/// a altered url for specififed vk
pub asset_detours: HashMap<String, url::Url>,
}
impl AssetsLocationData {
pub fn gen_asset_url(&self, vk_as_path: &str, proof_type: ProofType) -> Result<url::Url> {
Ok(self.base_url.join(
match proof_type {
ProofType::Chunk => format!("chunk/{vk_as_path}/"),
ProofType::Batch => format!("batch/{vk_as_path}/"),
ProofType::Bundle => format!("bundle/{vk_as_path}/"),
t => eyre::bail!("unrecognized proof type: {}", t as u8),
}
.as_str(),
)?)
}
pub fn validate(&self) -> Result<()> {
if !self.base_url.path().ends_with('/') {
eyre::bail!(
"base_url must have a trailing slash, got: {}",
self.base_url
);
}
Ok(())
}
pub async fn get_asset(
&self,
vk: &str,
url_base: &url::Url,
base_path: impl AsRef<Path>,
) -> Result<PathBuf> {
let download_files = ["app.vmexe", "openvm.toml"];
// Step 1: Create a local path for storage
let storage_path = base_path.as_ref().join(vk);
std::fs::create_dir_all(&storage_path)?;
// Step 2 & 3: Download each file if needed
let client = reqwest::Client::new();
for filename in download_files.iter() {
let local_file_path = storage_path.join(filename);
let download_url = url_base.join(filename)?;
// Check if file already exists
if local_file_path.exists() {
// Get file metadata to check size
if let Ok(metadata) = std::fs::metadata(&local_file_path) {
// Make a HEAD request to get remote file size
if let Ok(head_resp) = client.head(download_url.clone()).send().await {
if let Some(content_length) = head_resp.headers().get("content-length") {
if let Ok(remote_size) =
content_length.to_str().unwrap_or("0").parse::<u64>()
{
// If sizes match, skip download
if metadata.len() == remote_size {
println!(
"File {} already exists with matching size, skipping download",
filename
);
continue;
}
}
}
}
}
}
println!("Downloading {} from {}", filename, download_url);
let response = client.get(download_url).send().await?;
if !response.status().is_success() {
eyre::bail!(
"Failed to download {}: HTTP status {}",
filename,
response.status()
);
}
// Stream the content directly to file instead of loading into memory
let mut file = std::fs::File::create(&local_file_path)?;
let mut stream = response.bytes_stream();
use futures_util::StreamExt;
while let Some(chunk) = stream.next().await {
std::io::Write::write_all(&mut file, &chunk?)?;
}
}
// Step 4: Return the storage path
Ok(storage_path)
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct LocalProverConfig {
pub sdk_config: SdkConfig,
pub circuits: HashMap<String, CircuitConfig>,
}
impl LocalProverConfig {
pub fn from_reader<R>(reader: R) -> Result<Self>
where
R: std::io::Read,
{
serde_json::from_reader(reader).map_err(|e| eyre::eyre!(e))
}
pub fn from_file<P: AsRef<Path>>(file_name: P) -> Result<Self> {
let file = File::open(file_name)?;
Self::from_reader(&file)
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct CircuitConfig {
/// The path to save assets for a specified hard fork phase
pub workspace_path: String,
#[serde(flatten)]
/// The location data for dynamic loading
pub location_data: AssetsLocationData,
/// cached vk value to save some initial cost, for debugging only
#[serde(default)]
pub vks: HashMap<ProofType, String>,
}
pub struct LocalProver {
config: LocalProverConfig,
next_task_id: u64,
current_task: Option<JoinHandle<Result<String>>>,
handlers: HashMap<String, Arc<dyn CircuitsHandler>>,
}
impl fmt::Debug for LocalProver {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("LocalProver")
.field("config", &self.config)
.field("next_task_id", &self.next_task_id)
.finish()
}
}
#[async_trait]
impl ProvingService for LocalProver {
fn is_local(&self) -> bool {
true
}
async fn get_vks(&self, _: GetVkRequest) -> GetVkResponse {
// get vk has been deprecated in new prover with dynamic asset loading scheme
GetVkResponse {
vks: vec![],
error: None,
}
}
async fn prove(&mut self, req: ProveRequest) -> ProveResponse {
match self.do_prove(req).await {
Ok(resp) => resp,
Err(e) => ProveResponse {
status: TaskStatus::Failed,
error: Some(format!("failed to request proof: {}", e)),
..Default::default()
},
}
}
async fn query_task(&mut self, req: QueryTaskRequest) -> QueryTaskResponse {
if let Some(handle) = &mut self.current_task {
if handle.is_finished() {
return match handle.await {
Ok(Ok(proof)) => QueryTaskResponse {
task_id: req.task_id,
status: TaskStatus::Success,
proof: Some(proof),
..Default::default()
},
Ok(Err(e)) => QueryTaskResponse {
task_id: req.task_id,
status: TaskStatus::Failed,
error: Some(format!("proving task failed: {}", e)),
..Default::default()
},
Err(e) => QueryTaskResponse {
task_id: req.task_id,
status: TaskStatus::Failed,
error: Some(format!("proving task panicked: {}", e)),
..Default::default()
},
};
} else {
return QueryTaskResponse {
task_id: req.task_id,
status: TaskStatus::Proving,
..Default::default()
};
}
}
// If no handle is found
QueryTaskResponse {
task_id: req.task_id,
status: TaskStatus::Failed,
error: Some("no proving task is running".to_string()),
..Default::default()
}
}
}
static GLOBAL_ASSET_URLS: LazyLock<HashMap<String, HashMap<String, url::Url>>> =
LazyLock::new(|| {
const ASSETS_JSON: &str = include_str!("../../assets_url_preset.json");
serde_json::from_str(ASSETS_JSON).expect("Failed to parse assets_url_preset.json")
});
impl LocalProver {
pub fn new(mut config: LocalProverConfig) -> Self {
for (fork_name, circuit_config) in config.circuits.iter_mut() {
// validate each base url
circuit_config.location_data.validate().unwrap();
let mut template_url_mapping = GLOBAL_ASSET_URLS
.get(&fork_name.to_lowercase())
.cloned()
.unwrap_or_default();
// apply default settings in template
for (key, url) in circuit_config.location_data.asset_detours.drain() {
template_url_mapping.insert(key, url);
}
circuit_config.location_data.asset_detours = template_url_mapping;
// validate each detours url
for url in circuit_config.location_data.asset_detours.values() {
assert!(
url.path().ends_with('/'),
"url {} must be end with /",
url.as_str()
);
}
}
Self {
config,
next_task_id: 0,
current_task: None,
handlers: HashMap::new(),
}
}
async fn do_prove(&mut self, req: ProveRequest) -> Result<ProveResponse> {
self.next_task_id += 1;
let duration = SystemTime::now().duration_since(UNIX_EPOCH).unwrap();
let created_at = duration.as_secs() as f64 + duration.subsec_nanos() as f64 * 1e-9;
let prover_task = UniversalHandler::get_task_from_input(&req.input)?;
let is_openvm_13 = prover_task.use_openvm_13;
let prover_task: ProvingTask = prover_task.into();
let vk = hex::encode(&prover_task.vk);
let handler = if let Some(handler) = self.handlers.get(&vk) {
handler.clone()
} else {
let base_config = self
.config
.circuits
.get(&req.hard_fork_name)
.ok_or_else(|| {
eyre::eyre!(
"coordinator sent unexpected forkname {}",
req.hard_fork_name
)
})?;
let url_base = if let Some(url) = base_config.location_data.asset_detours.get(&vk) {
url.clone()
} else {
base_config
.location_data
.gen_asset_url(&vk, req.proof_type)?
};
let asset_path = base_config
.location_data
.get_asset(&vk, &url_base, &base_config.workspace_path)
.await?;
let circuits_handler = Arc::new(Mutex::new(UniversalHandler::new(
&asset_path,
is_openvm_13,
)?));
self.handlers.insert(vk, circuits_handler.clone());
circuits_handler
};
let handle = Handle::current();
let is_evm = req.proof_type == ProofType::Bundle;
let task_handle = tokio::task::spawn_blocking(move || {
handle.block_on(handler.get_proof_data(&prover_task, is_evm))
});
self.current_task = Some(task_handle);
Ok(ProveResponse {
task_id: self.next_task_id.to_string(),
proof_type: req.proof_type,
circuit_version: req.circuit_version,
hard_fork_name: req.hard_fork_name,
status: TaskStatus::Proving,
created_at,
input: Some(req.input),
..Default::default()
})
}
}

View File

@@ -8,7 +8,7 @@ require (
github.com/jmoiron/sqlx v1.3.5
github.com/lib/pq v1.10.9
github.com/pressly/goose/v3 v3.16.0
github.com/scroll-tech/go-ethereum v1.10.14-0.20251117065849-b5c3dd1aa975
github.com/scroll-tech/go-ethereum v1.10.14-0.20251128092113-8629f088d78f
github.com/stretchr/testify v1.10.0
github.com/urfave/cli/v2 v2.25.7
)

View File

@@ -121,8 +121,8 @@ github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU
github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/scroll-tech/go-ethereum v1.10.14-0.20251117065849-b5c3dd1aa975 h1:KluZffkRRJ4K9UyvH/r2g8Lp16/NSK8j26MR33hHmoQ=
github.com/scroll-tech/go-ethereum v1.10.14-0.20251117065849-b5c3dd1aa975/go.mod h1:6BVek7YliYh+YeHOSjguPw9GT9BhVBfThArxzVlpqdQ=
github.com/scroll-tech/go-ethereum v1.10.14-0.20251128092113-8629f088d78f h1:j6SjP98MoWFFX9TwB1/nFYEkayqHQsrtE66Ll2C+oT0=
github.com/scroll-tech/go-ethereum v1.10.14-0.20251128092113-8629f088d78f/go.mod h1:Aa/kD1XB+OV/7rRxMQrjcPCB4b0pKyLH0gsTrtuHi38=
github.com/segmentio/asm v1.2.0 h1:9BQrFxC+YOHJlTlHGkTrFWf59nbL3XnCoFLTwDCI7ys=
github.com/segmentio/asm v1.2.0/go.mod h1:BqMnlJP91P8d+4ibuonYZw9mfnzI9HfxselHZr5aAcs=
github.com/sethvargo/go-retry v0.2.4 h1:T+jHEQy/zKJf5s95UkguisicE0zuF9y7+/vgz08Ocec=

View File

@@ -1413,16 +1413,14 @@ github.com/scroll-tech/da-codec v0.1.3-0.20250609154559-8935de62c148 h1:cyK1ifU2
github.com/scroll-tech/da-codec v0.1.3-0.20250609154559-8935de62c148/go.mod h1:gz5x3CsLy5htNTbv4PWRPBU9nSAujfx1U2XtFcXoFuk=
github.com/scroll-tech/da-codec v0.1.3-0.20250626091118-58b899494da6/go.mod h1:Z6kN5u2khPhiqHyk172kGB7o38bH/nj7Ilrb/46wZGg=
github.com/scroll-tech/da-codec v0.1.3-0.20250825071838-cddc263e5ef6/go.mod h1:Z6kN5u2khPhiqHyk172kGB7o38bH/nj7Ilrb/46wZGg=
github.com/scroll-tech/da-codec v0.9.0 h1:UvHNdSJuVhi/j9pOH7SXkRck52/zeE8T35Fx6qTPW70=
github.com/scroll-tech/da-codec v0.9.0/go.mod h1:w+vwIvNiWdiNHVE9yIY2Klx6G4s+SQhEJVSmVG/IsEQ=
github.com/scroll-tech/ecies-go/v2 v2.0.10-beta.1/go.mod h1:A+pHaITd+ogBm4Rk35xebF9OPiyMYlFlgqBOiY5PSjg=
github.com/scroll-tech/go-ethereum v1.10.14-0.20240607130425-e2becce6a1a4/go.mod h1:byf/mZ8jLYUCnUePTicjJWn+RvKdxDn7buS6glTnMwQ=
github.com/scroll-tech/go-ethereum v1.10.14-0.20240821074444-b3fa00861e5e/go.mod h1:swB5NSp8pKNDuYsTxfR08bHS6L56i119PBx8fxvV8Cs=
github.com/scroll-tech/go-ethereum v1.10.14-0.20241010064814-3d88e870ae22/go.mod h1:r9FwtxCtybMkTbWYCyBuevT9TW3zHmOTHqD082Uh+Oo=
github.com/scroll-tech/go-ethereum v1.10.14-0.20250206083728-ea43834c198f/go.mod h1:Ik3OBLl7cJxPC+CFyCBYNXBPek4wpdzkWehn/y5qLM8=
github.com/scroll-tech/go-ethereum v1.10.14-0.20250225152658-bcfdb48dd939/go.mod h1:AgU8JJxC7+nfs7R7ma35AU7dMAGW7wCw3dRZRefIKyQ=
github.com/scroll-tech/go-ethereum v1.10.14-0.20251117071111-47c22325665e h1:7U1/JilCPIqJTYNgKzlfBEkRNORRwJ8+PCmdR8/XK6A=
github.com/scroll-tech/go-ethereum v1.10.14-0.20251117071111-47c22325665e/go.mod h1:6BVek7YliYh+YeHOSjguPw9GT9BhVBfThArxzVlpqdQ=
github.com/scroll-tech/go-ethereum v1.10.14-0.20251128092359-25d5bf6b817b h1:pMQKnroJoS/FeL1aOWkz7/u1iBHUP8PWjZstNuzoUGE=
github.com/scroll-tech/go-ethereum v1.10.14-0.20251128092359-25d5bf6b817b/go.mod h1:Aa/kD1XB+OV/7rRxMQrjcPCB4b0pKyLH0gsTrtuHi38=
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I=
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
github.com/segmentio/kafka-go v0.1.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo=

View File

@@ -66,17 +66,26 @@ func action(ctx *cli.Context) error {
registry := prometheus.DefaultRegisterer
observability.Server(ctx, db)
l1client, err := ethclient.Dial(cfg.L1Config.Endpoint)
// Init L1 connection
l1RpcClient, err := rpc.Dial(cfg.L1Config.Endpoint)
if err != nil {
log.Crit("failed to connect l1 geth", "config file", cfgFile, "error", err)
log.Crit("failed to dial raw RPC client to L1 endpoint", "endpoint", cfg.L1Config.Endpoint, "error", err)
}
l1client := ethclient.NewClient(l1RpcClient)
// sanity check config
if cfg.L1Config.RelayerConfig.GasOracleConfig.L1BaseFeeLimit == 0 || cfg.L1Config.RelayerConfig.GasOracleConfig.L1BlobBaseFeeLimit == 0 {
log.Crit("gas-oracle `l1_base_fee_limit` and `l1_blob_base_fee_limit` configs must be set")
}
l1watcher := watcher.NewL1WatcherClient(ctx.Context, l1client, cfg.L1Config.StartHeight, db, registry)
// Init watcher and relayer
l1watcher := watcher.NewL1WatcherClient(ctx.Context, l1RpcClient, cfg.L1Config.StartHeight, db, registry)
l1relayer, err := relayer.NewLayer1Relayer(ctx.Context, db, cfg.L1Config.RelayerConfig, relayer.ServiceTypeL1GasOracle, registry)
if err != nil {
log.Crit("failed to create new l1 relayer", "config file", cfgFile, "error", err)
}
// Start l1 watcher process
go utils.LoopWithContext(subCtx, 10*time.Second, func(ctx context.Context) {
// Fetch the latest block number to decrease the delay when fetching gas prices

View File

@@ -21,7 +21,9 @@
"check_committed_batches_window_minutes": 5,
"l1_base_fee_default": 15000000000,
"l1_blob_base_fee_default": 1,
"l1_blob_base_fee_threshold": 0
"l1_blob_base_fee_threshold": 0,
"l1_base_fee_limit": 20000000000,
"l1_blob_base_fee_limit": 20000000000
},
"gas_oracle_sender_signer_config": {
"signer_type": "PrivateKey",
@@ -56,7 +58,8 @@
"min_batches": 1,
"max_batches": 6,
"timeout": 7200,
"backlog_max": 75
"backlog_max": 75,
"blob_fee_tolerance": 500000000
},
"gas_oracle_config": {
"min_gas_price": 0,

View File

@@ -15,8 +15,8 @@ require (
github.com/holiman/uint256 v1.3.2
github.com/mitchellh/mapstructure v1.5.0
github.com/prometheus/client_golang v1.16.0
github.com/scroll-tech/da-codec v0.9.0
github.com/scroll-tech/go-ethereum v1.10.14-0.20251117065849-b5c3dd1aa975
github.com/scroll-tech/da-codec v0.10.0
github.com/scroll-tech/go-ethereum v1.10.14-0.20251128092113-8629f088d78f
github.com/smartystreets/goconvey v1.8.0
github.com/spf13/viper v1.19.0
github.com/stretchr/testify v1.10.0
@@ -51,7 +51,7 @@ require (
github.com/cpuguy83/go-md2man/v2 v2.0.3 // indirect
github.com/crate-crypto/go-eth-kzg v1.4.0 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/deckarep/golang-set v0.0.0-20180603214616-504e848d77ea // indirect
github.com/deckarep/golang-set v1.8.0 // indirect
github.com/edsrzf/mmap-go v1.0.0 // indirect
github.com/ethereum/c-kzg-4844/v2 v2.1.5 // indirect
github.com/fjl/memsize v0.0.2 // indirect

View File

@@ -88,8 +88,8 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/deckarep/golang-set v0.0.0-20180603214616-504e848d77ea h1:j4317fAZh7X6GqbFowYdYdI0L9bwxL07jyPZIdepyZ0=
github.com/deckarep/golang-set v0.0.0-20180603214616-504e848d77ea/go.mod h1:93vsz/8Wt4joVM7c2AVqh+YRMiUSc14yDtF28KmMOgQ=
github.com/deckarep/golang-set v1.8.0 h1:sk9/l/KqpunDwP7pSjUg0keiOOLEnOBHzykLrsPppp4=
github.com/deckarep/golang-set v1.8.0/go.mod h1:5nI87KwE7wgsBU1F4GKAw2Qod7p5kyS383rP6+o6qqo=
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
github.com/edsrzf/mmap-go v1.0.0 h1:CEBF7HpRnUCSJgGUb5h1Gm7e3VkmVDrR8lvWVLtrOFw=
github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M=
@@ -287,10 +287,10 @@ github.com/sagikazarmark/locafero v0.4.0 h1:HApY1R9zGo4DBgr7dqsTH/JJxLTTsOt7u6ke
github.com/sagikazarmark/locafero v0.4.0/go.mod h1:Pe1W6UlPYUk/+wc/6KFhbORCfqzgYEpgQ3O5fPuL3H4=
github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE=
github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ=
github.com/scroll-tech/da-codec v0.9.0 h1:UvHNdSJuVhi/j9pOH7SXkRck52/zeE8T35Fx6qTPW70=
github.com/scroll-tech/da-codec v0.9.0/go.mod h1:w+vwIvNiWdiNHVE9yIY2Klx6G4s+SQhEJVSmVG/IsEQ=
github.com/scroll-tech/go-ethereum v1.10.14-0.20251117065849-b5c3dd1aa975 h1:KluZffkRRJ4K9UyvH/r2g8Lp16/NSK8j26MR33hHmoQ=
github.com/scroll-tech/go-ethereum v1.10.14-0.20251117065849-b5c3dd1aa975/go.mod h1:6BVek7YliYh+YeHOSjguPw9GT9BhVBfThArxzVlpqdQ=
github.com/scroll-tech/da-codec v0.10.0 h1:IPHxyTyXTWPV0Q+DZ08cod2fWkhUvrfysmj/VBpB+WU=
github.com/scroll-tech/da-codec v0.10.0/go.mod h1:MBlIP4wCXPcUDZ/Ci2B7n/2IbVU1WBo9OTFTZ5ffE0U=
github.com/scroll-tech/go-ethereum v1.10.14-0.20251128092113-8629f088d78f h1:j6SjP98MoWFFX9TwB1/nFYEkayqHQsrtE66Ll2C+oT0=
github.com/scroll-tech/go-ethereum v1.10.14-0.20251128092113-8629f088d78f/go.mod h1:Aa/kD1XB+OV/7rRxMQrjcPCB4b0pKyLH0gsTrtuHi38=
github.com/scroll-tech/zktrie v0.8.4 h1:UagmnZ4Z3ITCk+aUq9NQZJNAwnWl4gSxsLb2Nl7IgRE=
github.com/scroll-tech/zktrie v0.8.4/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKlUeu/erjjvaPEYiI=

View File

@@ -48,6 +48,10 @@ type BatchSubmission struct {
TimeoutSec int64 `json:"timeout"`
// The maximum number of pending batches to keep in the backlog.
BacklogMax int64 `json:"backlog_max"`
// BlobFeeTolerance is the absolute tolerance (in wei) added to the target blob fee.
// If the current fee is below target + tolerance, we proceed with submission.
// This prevents skipping submission when the price difference is negligible.
BlobFeeTolerance uint64 `json:"blob_fee_tolerance"`
}
// ChainMonitor this config is used to get batch status from chain_monitor API.
@@ -109,6 +113,10 @@ type GasOracleConfig struct {
L1BaseFeeDefault uint64 `json:"l1_base_fee_default"`
L1BlobBaseFeeDefault uint64 `json:"l1_blob_base_fee_default"`
// Upper limit values for gas oracle updates
L1BaseFeeLimit uint64 `json:"l1_base_fee_limit"`
L1BlobBaseFeeLimit uint64 `json:"l1_blob_base_fee_limit"`
// L1BlobBaseFeeThreshold the threshold of L1 blob base fee to enter the default gas price mode
L1BlobBaseFeeThreshold uint64 `json:"l1_blob_base_fee_threshold"`
}

View File

@@ -167,7 +167,7 @@ func (b *BlobUploader) constructBlobCodec(dbBatch *orm.Batch) (*kzg4844.Blob, er
Chunks: chunks,
}
case encoding.CodecV7, encoding.CodecV8, encoding.CodecV9:
case encoding.CodecV7, encoding.CodecV8, encoding.CodecV9, encoding.CodecV10:
encodingBatch = &encoding.Batch{
Index: dbBatch.Index,
ParentBatchHash: common.HexToHash(dbBatch.ParentBatchHash),

View File

@@ -173,6 +173,18 @@ func (r *Layer1Relayer) ProcessGasPriceOracle() {
} else if err != nil {
return
}
// Cap base fee update at the configured upper limit
if limit := r.cfg.GasOracleConfig.L1BaseFeeLimit; baseFee > limit {
log.Error("L1 base fee exceed max limit, set to max limit", "baseFee", baseFee, "maxLimit", limit)
r.metrics.rollupL1RelayerGasPriceOracleFeeOverLimitTotal.Inc()
baseFee = limit
}
// Cap blob base fee update at the configured upper limit
if limit := r.cfg.GasOracleConfig.L1BlobBaseFeeLimit; blobBaseFee > limit {
log.Error("L1 blob base fee exceed max limit, set to max limit", "blobBaseFee", blobBaseFee, "maxLimit", limit)
r.metrics.rollupL1RelayerGasPriceOracleFeeOverLimitTotal.Inc()
blobBaseFee = limit
}
data, err := r.l1GasOracleABI.Pack("setL1BaseFeeAndBlobBaseFee", new(big.Int).SetUint64(baseFee), new(big.Int).SetUint64(blobBaseFee))
if err != nil {
log.Error("Failed to pack setL1BaseFeeAndBlobBaseFee", "block.Hash", block.Hash, "block.Height", block.Number, "block.BaseFee", baseFee, "block.BlobBaseFee", blobBaseFee, "err", err)

View File

@@ -8,11 +8,12 @@ import (
)
type l1RelayerMetrics struct {
rollupL1RelayerGasPriceOraclerRunTotal prometheus.Counter
rollupL1RelayerLatestBaseFee prometheus.Gauge
rollupL1RelayerLatestBlobBaseFee prometheus.Gauge
rollupL1UpdateGasOracleConfirmedTotal prometheus.Counter
rollupL1UpdateGasOracleConfirmedFailedTotal prometheus.Counter
rollupL1RelayerGasPriceOraclerRunTotal prometheus.Counter
rollupL1RelayerLatestBaseFee prometheus.Gauge
rollupL1RelayerLatestBlobBaseFee prometheus.Gauge
rollupL1UpdateGasOracleConfirmedTotal prometheus.Counter
rollupL1UpdateGasOracleConfirmedFailedTotal prometheus.Counter
rollupL1RelayerGasPriceOracleFeeOverLimitTotal prometheus.Counter
}
var (
@@ -43,6 +44,10 @@ func initL1RelayerMetrics(reg prometheus.Registerer) *l1RelayerMetrics {
Name: "rollup_layer1_update_gas_oracle_confirmed_failed_total",
Help: "The total number of updating layer1 gas oracle confirmed failed",
}),
rollupL1RelayerGasPriceOracleFeeOverLimitTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "rollup_layer1_gas_price_oracle_fee_over_limit_total",
Help: "The total number of times when a gas price oracle fee update went over the configured limit",
}),
}
})
return l1RelayerMetric

View File

@@ -452,6 +452,7 @@ func (r *Layer2Relayer) ProcessPendingBatches() {
// The next call of ProcessPendingBatches will then start with the batch with the different codec version.
batchesToSubmitLen := len(batchesToSubmit)
if batchesToSubmitLen > 0 && batchesToSubmit[batchesToSubmitLen-1].Batch.CodecVersion != dbBatch.CodecVersion {
forceSubmit = true
break
}
@@ -488,7 +489,7 @@ func (r *Layer2Relayer) ProcessPendingBatches() {
codecVersion := encoding.CodecVersion(firstBatch.CodecVersion)
switch codecVersion {
case encoding.CodecV7, encoding.CodecV8, encoding.CodecV9:
case encoding.CodecV7, encoding.CodecV8, encoding.CodecV9, encoding.CodecV10:
if r.cfg.ValidiumMode {
if len(batchesToSubmit) != 1 {
log.Error("validium mode only supports committing one batch at a time", "codecVersion", codecVersion, "start index", firstBatch.Index, "end index", lastBatch.Index, "batches count", len(batchesToSubmit))
@@ -747,7 +748,7 @@ func (r *Layer2Relayer) finalizeBundle(bundle *orm.Bundle, withProof bool) error
var calldata []byte
switch encoding.CodecVersion(bundle.CodecVersion) {
case encoding.CodecV7, encoding.CodecV8, encoding.CodecV9:
case encoding.CodecV7, encoding.CodecV8, encoding.CodecV9, encoding.CodecV10:
if r.cfg.ValidiumMode {
calldata, err = r.constructFinalizeBundlePayloadValidium(dbBatch, endChunk, aggProof)
if err != nil {
@@ -1050,7 +1051,7 @@ func (r *Layer2Relayer) constructCommitBatchPayloadValidium(batch *dbBatchWithCh
commitment := common.HexToHash(lastChunk.EndBlockHash)
var version uint8
if encoding.CodecVersion(batch.Batch.CodecVersion) == encoding.CodecV8 || encoding.CodecVersion(batch.Batch.CodecVersion) == encoding.CodecV9 {
if encoding.CodecVersion(batch.Batch.CodecVersion) == encoding.CodecV8 || encoding.CodecVersion(batch.Batch.CodecVersion) == encoding.CodecV9 || encoding.CodecVersion(batch.Batch.CodecVersion) == encoding.CodecV10 {
// Validium version line starts with v1,
// but rollup-relayer behavior follows v8.
version = 1
@@ -1254,16 +1255,20 @@ func (r *Layer2Relayer) skipSubmitByFee(oldest time.Time, metrics *l2RelayerMetr
target := calculateTargetPrice(windowSec, r.batchStrategy, oldest, hist)
current := hist[len(hist)-1]
// apply absolute tolerance offset to target
tolerance := new(big.Int).SetUint64(r.cfg.BatchSubmission.BlobFeeTolerance)
threshold := new(big.Int).Add(target, tolerance)
currentFloat, _ := current.Float64()
targetFloat, _ := target.Float64()
metrics.rollupL2RelayerCurrentBlobPrice.Set(currentFloat)
metrics.rollupL2RelayerTargetBlobPrice.Set(targetFloat)
// if current fee > target and still inside the timeout window, skip
if current.Cmp(target) > 0 && time.Since(oldest) < time.Duration(windowSec)*time.Second {
// if current fee > threshold (target + tolerance) and still inside the timeout window, skip
if current.Cmp(threshold) > 0 && time.Since(oldest) < time.Duration(windowSec)*time.Second {
return true, fmt.Errorf(
"blob-fee above target & window not yet passed; current=%s target=%s age=%s",
current.String(), target.String(), time.Since(oldest),
"blob-fee above threshold & window not yet passed; current=%s target=%s threshold=%s tolerance=%s age=%s",
current.String(), target.String(), threshold.String(), tolerance.String(), time.Since(oldest),
)
}

View File

@@ -2,6 +2,7 @@ package sender
import (
"errors"
"fmt"
"math/big"
"github.com/scroll-tech/go-ethereum"
@@ -118,7 +119,7 @@ func (s *Sender) estimateGasLimit(to *common.Address, data []byte, sidecar *type
gasLimitWithoutAccessList, err := s.client.EstimateGas(s.ctx, msg)
if err != nil {
log.Error("estimateGasLimit EstimateGas failure without access list", "error", err)
log.Error("estimateGasLimit EstimateGas failure without access list", "error", err, "msg", fmt.Sprintf("%+v", msg))
return 0, nil, err
}

View File

@@ -13,7 +13,7 @@ import (
"github.com/holiman/uint256"
"github.com/prometheus/client_golang/prometheus"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/consensus/misc"
"github.com/scroll-tech/go-ethereum/common/hexutil"
gethTypes "github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/crypto/kzg4844"
"github.com/scroll-tech/go-ethereum/ethclient"
@@ -67,7 +67,8 @@ type FeeData struct {
// Sender Transaction sender to send transaction to l1/l2
type Sender struct {
config *config.SenderConfig
gethClient *gethclient.Client
rpcClient *rpc.Client // Raw RPC client
gethClient *gethclient.Client // Client to use for CreateAccessList
client *ethclient.Client // The client to retrieve on chain data (read-only)
writeClients []*ethclient.Client // The clients to send transactions to (write operations)
transactionSigner *TransactionSigner
@@ -141,6 +142,7 @@ func NewSender(ctx context.Context, config *config.SenderConfig, signerConfig *c
sender := &Sender{
ctx: ctx,
config: config,
rpcClient: rpcClient,
gethClient: gethclient.New(rpcClient),
client: client,
writeClients: writeClients,
@@ -841,8 +843,19 @@ func (s *Sender) getBlockNumberAndTimestampAndBaseFeeAndBlobFee(ctx context.Cont
var blobBaseFee uint64
if excess := header.ExcessBlobGas; excess != nil {
blobBaseFee = misc.CalcBlobFee(*excess).Uint64()
// Leave it up to the L1 node to compute the correct blob base fee.
// Previously we would compute it locally using `CalcBlobFee`, but
// that approach requires syncing any future L1 configuration changes.
// Note: The fetched blob base fee might not correspond to the block
// that we fetched in the previous step, but this is acceptable.
var blobBaseFeeHex hexutil.Big
if err := s.rpcClient.CallContext(ctx, &blobBaseFeeHex, "eth_blobBaseFee"); err != nil {
return 0, 0, 0, 0, fmt.Errorf("failed to call eth_blobBaseFee, err: %w", err)
}
// A correct L1 node could not return a value that overflows uint64
blobBaseFee = blobBaseFeeHex.ToInt().Uint64()
}
// header.Number.Uint64() returns the pendingBlockNumber, so we minus 1 to get the latestBlockNumber.
return header.Number.Uint64() - 1, header.Time, baseFee, blobBaseFee, nil
}

View File

@@ -21,6 +21,7 @@ import (
gethTypes "github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/crypto"
"github.com/scroll-tech/go-ethereum/crypto/kzg4844"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/log"
"github.com/scroll-tech/go-ethereum/rpc"
"github.com/stretchr/testify/assert"
@@ -94,8 +95,9 @@ func setupEnv(t *testing.T) {
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(sqlDB))
l1Client, err := testApps.GetPoSL1Client()
l1RawClient, err := testApps.GetPoSL1Client()
assert.NoError(t, err)
l1Client := ethclient.NewClient(l1RawClient)
chainID, err := l1Client.ChainID(context.Background())
assert.NoError(t, err)

View File

@@ -245,11 +245,13 @@ func (p *ChunkProposer) ProposeChunk() error {
// Ensure all blocks in the same chunk use the same hardfork name
// If a different hardfork name is found, truncate the blocks slice at that point
hardforkName := encoding.GetHardforkName(p.chainCfg, blocks[0].Header.Number.Uint64(), blocks[0].Header.Time)
hardforkBoundary := false
for i := 1; i < len(blocks); i++ {
currentHardfork := encoding.GetHardforkName(p.chainCfg, blocks[i].Header.Number.Uint64(), blocks[i].Header.Time)
if currentHardfork != hardforkName {
blocks = blocks[:i]
// Truncate blocks at hardfork boundary
blocks = blocks[:i]
hardforkBoundary = true
break
}
}
@@ -321,6 +323,19 @@ func (p *ChunkProposer) ProposeChunk() error {
return fmt.Errorf("failed to calculate chunk metrics: %w", calcErr)
}
// No breaking condition met, but hardfork boundary reached
if hardforkBoundary {
log.Info("hardfork boundary reached, proposing chunk",
"block count", len(chunk.Blocks),
"codec version", codecVersion,
"start block number", chunk.Blocks[0].Header.Number,
"end block number", chunk.Blocks[len(chunk.Blocks)-1].Header.Number)
p.recordAllChunkMetrics(metrics)
return p.updateDBChunkInfo(&chunk, codecVersion, metrics)
}
// No breaking condition met, check for timeout
currentTimeSec := uint64(time.Now().Unix())
if metrics.FirstBlockTimestamp+p.cfg.ChunkTimeoutSec < currentTimeSec {
log.Info("first block timeout reached",

View File

@@ -19,6 +19,8 @@ import (
"scroll-tech/rollup/internal/utils"
)
func newUint64(val uint64) *uint64 { return &val }
func testChunkProposerLimitsCodecV7(t *testing.T) {
tests := []struct {
name string
@@ -26,6 +28,7 @@ func testChunkProposerLimitsCodecV7(t *testing.T) {
chunkTimeoutSec uint64
expectedChunksLen int
expectedBlocksInFirstChunk int // only be checked when expectedChunksLen > 0
GalileoTime *uint64
}{
{
name: "NoLimitReached",
@@ -62,6 +65,14 @@ func testChunkProposerLimitsCodecV7(t *testing.T) {
expectedChunksLen: 1,
expectedBlocksInFirstChunk: 1,
},
{
name: "SingleBlockByForkBoundary",
maxL2Gas: 20_000_000,
chunkTimeoutSec: 1000000000000,
expectedChunksLen: 1,
expectedBlocksInFirstChunk: 1,
GalileoTime: newUint64(1669364525), // timestamp of `block2`
},
}
for _, tt := range tests {
@@ -78,11 +89,26 @@ func testChunkProposerLimitsCodecV7(t *testing.T) {
_, err = chunkOrm.InsertChunk(context.Background(), &encoding.Chunk{Blocks: []*encoding.Block{{Header: &gethTypes.Header{Number: big.NewInt(0)}}}}, encoding.CodecV0, utils.ChunkMetrics{})
assert.NoError(t, err)
// Initialize the chunk proposer.
chainConfig := &params.ChainConfig{
LondonBlock: big.NewInt(0),
BernoulliBlock: big.NewInt(0),
CurieBlock: big.NewInt(0),
DarwinTime: new(uint64),
DarwinV2Time: new(uint64),
EuclidTime: new(uint64),
EuclidV2Time: new(uint64),
FeynmanTime: new(uint64),
GalileoTime: tt.GalileoTime,
}
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
MaxL2GasPerChunk: tt.maxL2Gas,
ChunkTimeoutSec: tt.chunkTimeoutSec,
MaxUncompressedBatchBytesSize: math.MaxUint64,
}, encoding.CodecV7, &params.ChainConfig{LondonBlock: big.NewInt(0), BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), DarwinTime: new(uint64), DarwinV2Time: new(uint64), EuclidTime: new(uint64), EuclidV2Time: new(uint64)}, db, nil)
}, encoding.CodecV7, chainConfig, db, nil)
// Run one round of chunk proposing.
cp.TryProposeChunk()
chunks, err := chunkOrm.GetChunksGEIndex(context.Background(), 1, 0)

View File

@@ -3,13 +3,15 @@ package watcher
import (
"context"
"errors"
"fmt"
"math/big"
"github.com/prometheus/client_golang/prometheus"
"github.com/scroll-tech/go-ethereum/consensus/misc"
"github.com/scroll-tech/go-ethereum/common/hexutil"
gethTypes "github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/log"
"github.com/scroll-tech/go-ethereum/rpc"
"gorm.io/gorm"
"scroll-tech/common/types"
@@ -20,7 +22,8 @@ import (
// L1WatcherClient will listen for smart contract events from Eth L1.
type L1WatcherClient struct {
ctx context.Context
client *ethclient.Client
rpcClient *rpc.Client // Raw RPC client
client *ethclient.Client // Go SDK RPC client
l1BlockOrm *orm.L1Block
// The height of the block that the watcher has retrieved header rlp
@@ -30,7 +33,7 @@ type L1WatcherClient struct {
}
// NewL1WatcherClient returns a new instance of L1WatcherClient.
func NewL1WatcherClient(ctx context.Context, client *ethclient.Client, startHeight uint64, db *gorm.DB, reg prometheus.Registerer) *L1WatcherClient {
func NewL1WatcherClient(ctx context.Context, rpcClient *rpc.Client, startHeight uint64, db *gorm.DB, reg prometheus.Registerer) *L1WatcherClient {
l1BlockOrm := orm.NewL1Block(db)
savedL1BlockHeight, err := l1BlockOrm.GetLatestL1BlockHeight(ctx)
if err != nil {
@@ -43,7 +46,8 @@ func NewL1WatcherClient(ctx context.Context, client *ethclient.Client, startHeig
return &L1WatcherClient{
ctx: ctx,
client: client,
rpcClient: rpcClient,
client: ethclient.NewClient(rpcClient),
l1BlockOrm: l1BlockOrm,
processedBlockHeight: savedL1BlockHeight,
@@ -80,7 +84,17 @@ func (w *L1WatcherClient) FetchBlockHeader(blockHeight uint64) error {
var blobBaseFee uint64
if excess := block.ExcessBlobGas; excess != nil {
blobBaseFee = misc.CalcBlobFee(*excess).Uint64()
// Leave it up to the L1 node to compute the correct blob base fee.
// Previously we would compute it locally using `CalcBlobFee`, but
// that approach requires syncing any future L1 configuration changes.
// Note: The fetched blob base fee might not correspond to the block
// that we fetched in the previous step, but this is acceptable.
var blobBaseFeeHex hexutil.Big
if err := w.rpcClient.CallContext(w.ctx, &blobBaseFeeHex, "eth_blobBaseFee"); err != nil {
return fmt.Errorf("failed to call eth_blobBaseFee, err: %w", err)
}
// A correct L1 node could not return a value that overflows uint64
blobBaseFee = blobBaseFeeHex.ToInt().Uint64()
}
l1Block := orm.L1Block{

View File

@@ -21,10 +21,10 @@ import (
func setupL1Watcher(t *testing.T) (*L1WatcherClient, *gorm.DB) {
db := setupDB(t)
client, err := testApps.GetPoSL1Client()
l1RawClient, err := testApps.GetPoSL1Client()
assert.NoError(t, err)
l1Cfg := cfg.L1Config
watcher := NewL1WatcherClient(context.Background(), client, l1Cfg.StartHeight, db, nil)
watcher := NewL1WatcherClient(context.Background(), l1RawClient, l1Cfg.StartHeight, db, nil)
return watcher, db
}

View File

@@ -186,7 +186,7 @@ func encodeBatchHeaderValidium(b *encoding.Batch, codecVersion encoding.CodecVer
)
var version uint8
if codecVersion == encoding.CodecV8 || codecVersion == encoding.CodecV9 {
if codecVersion == encoding.CodecV8 || codecVersion == encoding.CodecV9 || codecVersion == encoding.CodecV10 {
// Validium version line starts with v1,
// but rollup-relayer behavior follows v8.
version = 1

View File

@@ -19,6 +19,7 @@ import (
"github.com/scroll-tech/go-ethereum/crypto"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/log"
"github.com/scroll-tech/go-ethereum/rpc"
"github.com/stretchr/testify/assert"
"gorm.io/gorm"
@@ -37,8 +38,9 @@ var (
rollupApp *bcmd.MockApp
// clients
l1Client *ethclient.Client
l2Client *ethclient.Client
l1RawClient *rpc.Client
l1Client *ethclient.Client
l2Client *ethclient.Client
l1Auth *bind.TransactOpts
l2Auth *bind.TransactOpts
@@ -91,8 +93,9 @@ func setupEnv(t *testing.T) {
assert.NoError(t, testApps.StartPoSL1Container())
rollupApp = bcmd.NewRollupApp(testApps, "../conf/config.json")
l1Client, err = testApps.GetPoSL1Client()
l1RawClient, err = testApps.GetPoSL1Client()
assert.NoError(t, err)
l1Client = ethclient.NewClient(l1RawClient)
l2Client, err = testApps.GetL2GethClient()
assert.NoError(t, err)
l1GethChainID, err = l1Client.ChainID(context.Background())

View File

@@ -36,7 +36,7 @@ func testImportL1GasPrice(t *testing.T) {
// Create L1Watcher
startHeight, err := l1Client.BlockNumber(context.Background())
assert.NoError(t, err)
l1Watcher := watcher.NewL1WatcherClient(context.Background(), l1Client, startHeight-1, db, nil)
l1Watcher := watcher.NewL1WatcherClient(context.Background(), l1RawClient, startHeight-1, db, nil)
// fetch new blocks
number, err := l1Client.BlockNumber(context.Background())
@@ -110,7 +110,7 @@ func testImportDefaultL1GasPriceDueToL1GasPriceSpike(t *testing.T) {
// Create L1Watcher
startHeight, err := l1Client.BlockNumber(context.Background())
assert.NoError(t, err)
l1Watcher := watcher.NewL1WatcherClient(context.Background(), l1Client, startHeight-2, db, nil)
l1Watcher := watcher.NewL1WatcherClient(context.Background(), l1RawClient, startHeight-2, db, nil)
// fetch new blocks
number, err := l1Client.BlockNumber(context.Background())

View File

@@ -0,0 +1,84 @@
package main
import (
"context"
"fmt"
"math/big"
"github.com/scroll-tech/da-codec/encoding"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/log"
"github.com/scroll-tech/go-ethereum/rpc"
)
func fetchAndStoreBlocks(ctx context.Context, from, to uint64) ([]*encoding.Block, error) {
validiumMode := cfg.ValidiumMode
cfg := cfg.FetchConfig
client, err := rpc.Dial(cfg.Endpoint)
if err != nil {
return nil, fmt.Errorf("failed to connect l2 geth, endpoint %s, err %v", cfg.Endpoint, err)
}
defer client.Close()
ethCli := ethclient.NewClient(client)
var blocks []*encoding.Block
for number := from; number <= to; number++ {
log.Debug("retrieving block", "height", number)
block, err := ethCli.BlockByNumber(ctx, new(big.Int).SetUint64(number))
if err != nil {
return nil, fmt.Errorf("failed to BlockByNumber: %v. number: %v", err, number)
}
blockTxs := block.Transactions()
var count int
for _, tx := range blockTxs {
if tx.IsL1MessageTx() {
count++
}
}
log.Info("retrieved block", "height", block.Header().Number, "hash", block.Header().Hash().String(), "L1 message count", count)
// use original (encrypted) L1 message txs in validium mode
if validiumMode {
var txs []*types.Transaction
if count > 0 {
log.Info("Fetching encrypted messages in validium mode")
err = client.CallContext(ctx, &txs, "scroll_getL1MessagesInBlock", block.Hash(), "synced")
if err != nil {
return nil, fmt.Errorf("failed to get L1 messages: %v, block hash: %v", err, block.Hash().Hex())
}
}
// sanity check
if len(txs) != count {
return nil, fmt.Errorf("L1 message count mismatch: expected %d, got %d", count, len(txs))
}
for ii := 0; ii < count; ii++ {
// sanity check
if blockTxs[ii].AsL1MessageTx().QueueIndex != txs[ii].AsL1MessageTx().QueueIndex {
return nil, fmt.Errorf("L1 message queue index mismatch at index %d: expected %d, got %d", ii, blockTxs[ii].AsL1MessageTx().QueueIndex, txs[ii].AsL1MessageTx().QueueIndex)
}
log.Info("Replacing L1 message tx in validium mode", "index", ii, "queueIndex", txs[ii].AsL1MessageTx().QueueIndex, "decryptedTxHash", blockTxs[ii].Hash().Hex(), "originalTxHash", txs[ii].Hash().Hex())
blockTxs[ii] = txs[ii]
}
}
withdrawRoot, err3 := ethCli.StorageAt(ctx, cfg.L2MessageQueueAddress, cfg.WithdrawTrieRootSlot, big.NewInt(int64(number)))
if err3 != nil {
return nil, fmt.Errorf("failed to get withdrawRoot: %v. number: %v", err3, number)
}
blocks = append(blocks, &encoding.Block{
Header: block.Header(),
Transactions: encoding.TxsToTxsData(blockTxs),
WithdrawRoot: common.BytesToHash(withdrawRoot),
})
}
return blocks, nil
}

View File

@@ -42,13 +42,21 @@ func randomPickKfromN(n, k int, rng *rand.Rand) []int {
return ret
}
func importData(ctx context.Context, beginBlk, endBlk uint64, chkNum, batchNum, bundleNum int, seed int64) (*importRecord, error) {
func importData(ctx context.Context, beginBlk, endBlk uint64, blocks []*encoding.Block, chkNum, batchNum, bundleNum int, seed int64) (*importRecord, error) {
db, err := database.InitDB(cfg.DBConfig)
if err != nil {
return nil, err
}
if len(blocks) > 0 {
log.Info("import block")
blockOrm := orm.NewL2Block(db)
if err := blockOrm.InsertL2Blocks(ctx, blocks); err != nil {
return nil, err
}
}
ret := &importRecord{}
// Create a new random source with the provided seed
source := rand.NewSource(seed)

View File

@@ -10,6 +10,7 @@ import (
"strings"
"github.com/scroll-tech/da-codec/encoding"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/log"
"github.com/urfave/cli/v2"
@@ -40,12 +41,6 @@ var seedFlag = cli.Int64Flag{
Value: 0,
}
var codecFlag = cli.IntFlag{
Name: "codec",
Usage: "codec version, valid from 6, default(auto) is 0",
Value: 0,
}
func parseThreeIntegers(value string) (int, int, int, error) {
// Split the input string by comma
parts := strings.Split(value, ",")
@@ -84,10 +79,21 @@ func parseThreeIntegers(value string) (int, int, int, error) {
return values[0], values[1], values[2], nil
}
type fetchConfig struct {
// node url.
Endpoint string `json:"endpoint"`
// The L2MessageQueue contract address deployed on layer 2 chain.
L2MessageQueueAddress common.Address `json:"l2_message_queue_address"`
// The WithdrawTrieRootSlot in L2MessageQueue contract.
WithdrawTrieRootSlot common.Hash `json:"withdraw_trie_root_slot,omitempty"`
}
// load a comptabile type of config for rollup
type config struct {
DBConfig *database.Config `json:"db_config"`
FetchConfig *fetchConfig `json:"fetch_config,omitempty"`
ValidiumMode bool `json:"validium_mode"`
CodecVersion int `json:"codec_version"`
}
func init() {
@@ -97,7 +103,7 @@ func init() {
app.Name = "integration-test-tool"
app.Usage = "The Scroll L2 Integration Test Tool"
app.Version = version.Version
app.Flags = append(app.Flags, &codecFlag, &seedFlag, &outputNumFlag, &outputPathFlag)
app.Flags = append(app.Flags, &seedFlag, &outputNumFlag, &outputPathFlag)
app.Flags = append(app.Flags, utils.CommonFlags...)
app.Before = func(ctx *cli.Context) error {
if err := utils.LogSetup(ctx); err != nil {
@@ -120,13 +126,13 @@ func newConfig(file string) (*config, error) {
return nil, err
}
cfg := &config{}
err = json.Unmarshal(buf, cfg)
loadCfg := &config{}
err = json.Unmarshal(buf, loadCfg)
if err != nil {
return nil, err
}
return cfg, nil
return loadCfg, nil
}
func action(ctx *cli.Context) error {
@@ -135,9 +141,8 @@ func action(ctx *cli.Context) error {
return fmt.Errorf("specify begin and end block number")
}
codecFl := ctx.Int(codecFlag.Name)
if codecFl != 0 {
switch codecFl {
if cfg.CodecVersion != 0 {
switch cfg.CodecVersion {
case 6:
codecCfg = encoding.CodecV6
case 7:
@@ -146,8 +151,10 @@ func action(ctx *cli.Context) error {
codecCfg = encoding.CodecV8
case 9:
codecCfg = encoding.CodecV9
case 10:
codecCfg = encoding.CodecV10
default:
return fmt.Errorf("invalid codec version %d", codecFl)
return fmt.Errorf("invalid codec version %d", cfg.CodecVersion)
}
log.Info("set codec", "version", codecCfg)
}
@@ -161,6 +168,14 @@ func action(ctx *cli.Context) error {
return fmt.Errorf("invalid begin block number: %w", err)
}
var import_blocks []*encoding.Block
if cfg.FetchConfig != nil {
import_blocks, err = fetchAndStoreBlocks(ctx.Context, beginBlk, endBlk)
if err != nil {
return err
}
}
chkNum, batchNum, bundleNum, err := parseThreeIntegers(ctx.String(outputNumFlag.Name))
if err != nil {
return err
@@ -174,7 +189,7 @@ func action(ctx *cli.Context) error {
outputPath := ctx.String(outputPathFlag.Name)
log.Info("output", "Seed", seed, "file", outputPath)
ret, err := importData(ctx.Context, beginBlk, endBlk, chkNum, batchNum, bundleNum, seed)
ret, err := importData(ctx.Context, beginBlk, endBlk, import_blocks, chkNum, batchNum, bundleNum, seed)
if err != nil {
return err
}

View File

@@ -5,8 +5,8 @@ go 1.22
toolchain go1.22.2
require (
github.com/scroll-tech/da-codec v0.9.0
github.com/scroll-tech/go-ethereum v1.10.14-0.20251117065849-b5c3dd1aa975
github.com/scroll-tech/da-codec v0.10.0
github.com/scroll-tech/go-ethereum v1.10.14-0.20251128092113-8629f088d78f
github.com/stretchr/testify v1.10.0
gorm.io/gorm v1.25.7-0.20240204074919-46816ad31dde
)

View File

@@ -93,10 +93,10 @@ github.com/rjeczalik/notify v0.9.1 h1:CLCKso/QK1snAlnhNR/CNvNiFU2saUtjV0bx3EwNeC
github.com/rjeczalik/notify v0.9.1/go.mod h1:rKwnCoCGeuQnwBtTSPL9Dad03Vh2n40ePRrjvIXnJho=
github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
github.com/scroll-tech/da-codec v0.9.0 h1:UvHNdSJuVhi/j9pOH7SXkRck52/zeE8T35Fx6qTPW70=
github.com/scroll-tech/da-codec v0.9.0/go.mod h1:w+vwIvNiWdiNHVE9yIY2Klx6G4s+SQhEJVSmVG/IsEQ=
github.com/scroll-tech/go-ethereum v1.10.14-0.20251117065849-b5c3dd1aa975 h1:KluZffkRRJ4K9UyvH/r2g8Lp16/NSK8j26MR33hHmoQ=
github.com/scroll-tech/go-ethereum v1.10.14-0.20251117065849-b5c3dd1aa975/go.mod h1:6BVek7YliYh+YeHOSjguPw9GT9BhVBfThArxzVlpqdQ=
github.com/scroll-tech/da-codec v0.10.0 h1:IPHxyTyXTWPV0Q+DZ08cod2fWkhUvrfysmj/VBpB+WU=
github.com/scroll-tech/da-codec v0.10.0/go.mod h1:MBlIP4wCXPcUDZ/Ci2B7n/2IbVU1WBo9OTFTZ5ffE0U=
github.com/scroll-tech/go-ethereum v1.10.14-0.20251128092113-8629f088d78f h1:j6SjP98MoWFFX9TwB1/nFYEkayqHQsrtE66Ll2C+oT0=
github.com/scroll-tech/go-ethereum v1.10.14-0.20251128092113-8629f088d78f/go.mod h1:Aa/kD1XB+OV/7rRxMQrjcPCB4b0pKyLH0gsTrtuHi38=
github.com/scroll-tech/zktrie v0.8.4 h1:UagmnZ4Z3ITCk+aUq9NQZJNAwnWl4gSxsLb2Nl7IgRE=
github.com/scroll-tech/zktrie v0.8.4/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKlUeu/erjjvaPEYiI=

View File

@@ -11,6 +11,9 @@ ifndef END_BLOCK
$(error END_BLOCK is not set. Define it in .make.env or pass END_BLOCK=<end_block>)
endif
BLOCK_PRE_MIGRATIONS := $(wildcard conf/*.sql)
.OPTIONAL: $(BLOCK_PRE_MIGRATIONS)
all: setup_db test_tool import_data
clean:
@@ -27,6 +30,11 @@ check_vars: | conf
exit 1; \
fi
migration_blocks: $(BLOCK_PRE_MIGRATIONS)
ifneq ($(strip $(BLOCK_PRE_MIGRATIONS)),)
GOOSE_MIGRATION_DIR=conf ${GOOSE_CMD} up-to 100
endif
setup_db: clean
docker compose up --detach
@echo "Waiting for PostgreSQL to be ready..."
@@ -44,13 +52,10 @@ setup_db: clean
fi; \
done
${GOOSE_CMD} up
GOOSE_MIGRATION_DIR=conf ${GOOSE_CMD} up-to 100
reset_db:
GOOSE_MIGRATION_DIR=conf ${GOOSE_CMD} down
${GOOSE_CMD} down-to 0
${GOOSE_CMD} up
GOOSE_MIGRATION_DIR=conf ${GOOSE_CMD} up-to 100
test_tool:
go build -o $(PWD)/build/bin/e2e_tool ../../rollup/tests/integration_tool
@@ -58,17 +63,8 @@ test_tool:
build/bin/e2e_tool: test_tool
import_data_euclid: build/bin/e2e_tool check_vars
build/bin/e2e_tool --config conf/config.json --codec 7 ${BEGIN_BLOCK} ${END_BLOCK}
import_data_feynman: build/bin/e2e_tool check_vars
build/bin/e2e_tool --config conf/config.json --codec 8 ${BEGIN_BLOCK} ${END_BLOCK}
import_data_galileo: build/bin/e2e_tool check_vars
build/bin/e2e_tool --config conf/config.json --codec 9 ${BEGIN_BLOCK} ${END_BLOCK}
import_data: build/bin/e2e_tool check_vars
build/bin/e2e_tool --config conf/config.json --codec ${CODEC_VERSION} ${BEGIN_BLOCK} ${END_BLOCK}
import_data: build/bin/e2e_tool check_vars migration_blocks
build/bin/e2e_tool --config conf/config.json ${BEGIN_BLOCK} ${END_BLOCK}
reimport_data: reset_db import_data

View File

@@ -2,15 +2,34 @@
It contains data from some blocks in a specified testnet, and helps to generate a series of chunks/batches/bundles from these blocks, filling the DB for the coordinator, so an e2e test (from chunk to bundle) can be run completely local
### Pre
1. install [goose](https://github.com/pressly/goose)
```bash
go install github.com/pressly/goose/v3/cmd/goose@latest
```
Prepare:
link the staff dir as "conf" from one of the dir with staff set, currently we have following staff sets:
+ sepolia: with blocks from scroll sepolia
+ cloak-xen: with blocks from xen sepolia, which is a cloak network
- sepolia: with blocks from scroll sepolia forking, e.g. `ln -s sepolia-galileo conf`
- galileo: with blocks from scroll galileo forking
- cloak-xen: with blocks from xen sepolia, which is a cloak network
Steps:
1. run `make all` under `tests/prover-e2e`, it would launch a postgreSql db in local docker container, which is ready to be used by coordinator (include some chunks/batches/bundles waiting to be proven)
2. setup assets by run `make coordinator_setup`
2. setup assets by run `make coordinator_setup`, `SCROLL_ZKVM_VERSION` must be sepcified, and if we do e2e test for other forking than `Galileo`, `SCROLL_FORK_NAME` is also required, example:
```bash
SCROLL_FORK_NAME=feynman SCROLL_ZKVM_VERSION=v0.7.0 make coordinator_setup
```
3. in `coordinator/build/bin/conf`, update necessary items in `config.template.json` and rename it as `config.json`
4. build and launch `coordinator_api` service locally
5. setup the `config.json` for zkvm prover to connect with the locally launched coordinator api
6. in `zkvm-prover`, launch `make test_e2e_run`, which would specific prover run locally, connect to the local coordinator api service according to the `config.json`, and prove all tasks being injected to db in step 1.
5. setup the `config.json` for zkvm prover to connect with the locally launched coordinator api:
- set the `sdk_config.coordinator.base_url` field into "http://localhost:8390",
6. in `zkvm-prover`, launch `make test_e2e_run`, which would specific prover run locally, connect to the local coordinator api service according to the `config.json`, and prove all tasks being injected to db in step 1.

View File

@@ -1,4 +1,3 @@
BEGIN_BLOCK?=35
END_BLOCK?=49
CODEC_VERSION?=8
SCROLL_FORK_NAME=feynman

View File

@@ -5,5 +5,6 @@
"maxOpenNum": 5,
"maxIdleNum": 1
},
"validium_mode": true
"validium_mode": true,
"codec_version": 8
}

File diff suppressed because one or more lines are too long

View File

@@ -25,7 +25,7 @@ SELECT 'INSERT INTO l2_block (number, hash, parent_hash, header, withdraw_root,
quote_literal(transactions) ||
');'
FROM l2_block
WHERE number >= 20278000 and number <= 20278050
WHERE number >= 15206780 and number <= 15206809
ORDER BY number ASC;
-- Write footer

View File

@@ -1,4 +1,3 @@
BEGIN_BLOCK?=10973711
END_BLOCK?=10973721
CODEC_VERSION?=8
SCROLL_FORK_NAME=feynman

View File

@@ -5,5 +5,6 @@
"maxOpenNum": 5,
"maxIdleNum": 1
},
"validium_mode": false
"validium_mode": false,
"codec_version": 8
}

View File

@@ -1,4 +1,3 @@
BEGIN_BLOCK?=20278022
END_BLOCK?=20278025
CODEC_VERSION?=9
SCROLL_FORK_NAME=galileo
BEGIN_BLOCK?=15206785
END_BLOCK?=15206794
SCROLL_FORK_NAME=galileo

File diff suppressed because one or more lines are too long

View File

@@ -5,5 +5,6 @@
"maxOpenNum": 5,
"maxIdleNum": 1
},
"validium_mode": false
"validium_mode": false,
"codec_version": 9
}

View File

@@ -0,0 +1,3 @@
BEGIN_BLOCK?=20239245
END_BLOCK?=20239250
SCROLL_FORK_NAME=galileoV2

View File

@@ -0,0 +1,15 @@
{
"db_config": {
"driver_name": "postgres",
"dsn": "postgres://dev:dev@localhost:5432/scroll?sslmode=disable",
"maxOpenNum": 5,
"maxIdleNum": 1
},
"fetch_config": {
"endpoint": "http://l2-sequencer-galileo-6.devnet.scroll.tech:8545",
"l2_message_queue_address": "0x5300000000000000000000000000000000000000"
},
"validium_mode": false,
"codec_version": 10
}

View File

@@ -0,0 +1,40 @@
{
"prover_manager": {
"provers_per_session": 1,
"session_attempts": 5,
"external_prover_threshold": 32,
"bundle_collection_time_sec": 180,
"batch_collection_time_sec": 180,
"chunk_collection_time_sec": 180,
"verifier": {
"min_prover_version": "v4.4.33",
"verifiers": [
{
"assets_path": "assets",
"fork_name": "galileoV2"
}
]
}
},
"db": {
"driver_name": "postgres",
"dsn": "postgres://dev:dev@localhost/scroll?sslmode=disable",
"maxOpenNum": 200,
"maxIdleNum": 20
},
"l2": {
"validium_mode": false,
"chain_id": 534351,
"l2geth": {
"endpoint": "<serach for a public rpc endpoint like alchemy>"
}
},
"auth": {
"secret": "prover secret key",
"challenge_expire_duration_sec": 3600,
"login_expire_duration_sec": 3600
},
"sequencer": {
"decryption_key": "not need"
}
}

File diff suppressed because one or more lines are too long

16
zkvm-prover/Dockerfile Normal file
View File

@@ -0,0 +1,16 @@
FROM scrolltech/cuda-go-rust-builder:cuda-12.9.1-go-1.22.12-rust-nightly-2025-08-18 AS builder
WORKDIR /app
COPY . .
RUN cd /app/zkvm-prover && make cpu_prover
FROM debian:trixie-slim AS runtime
WORKDIR app
RUN apt-get update \
&& apt-get install -y --no-install-recommends ca-certificates \
&& update-ca-certificates \
&& rm -rf /var/lib/apt/lists/*
ENV RUST_LOG='off,scroll_proving_sdk=info,prover=info'
COPY --from=builder /app/target/release/prover ./prover
ENTRYPOINT ["./prover"]

View File

@@ -35,6 +35,9 @@ ZK_VERSION=${ZKVM_COMMIT}-${PLONKY3_VERSION}
E2E_HANDLE_SET ?= ../tests/prover-e2e/testset.json
DUMP_DIR ?= .work
cpu_prover:
GO_TAG=${GO_TAG} GIT_REV=${GIT_REV} ZK_VERSION=${ZK_VERSION} cargo build --locked --release -p prover
prover:
GO_TAG=${GO_TAG} GIT_REV=${GIT_REV} ZK_VERSION=${ZK_VERSION} cargo build --locked --release --features cuda -p prover
@@ -60,6 +63,11 @@ test_run:
GO_TAG=${GO_TAG} GIT_REV=${GIT_REV} ZK_VERSION=${ZK_VERSION} cargo run --release -p prover -- --config ./config.json
test_e2e_run: ${E2E_HANDLE_SET}
GO_TAG=${GO_TAG} GIT_REV=${GIT_REV} ZK_VERSION=${ZK_VERSION} cargo run --release -p prover -- --config ./config.json handle ${E2E_HANDLE_SET}
GO_TAG=${GO_TAG} GIT_REV=${GIT_REV} ZK_VERSION=${ZK_VERSION} cargo run --release -p prover -- --prover.kind local --config ./config.json handle ${E2E_HANDLE_SET}
test_e2e_run_gpu: ${E2E_HANDLE_SET}
GO_TAG=${GO_TAG} GIT_REV=${GIT_REV} ZK_VERSION=${ZK_VERSION} cargo run --release --features cuda -p prover -- --config ./config.json handle ${E2E_HANDLE_SET}
test_axiom_e2e_run: ${E2E_HANDLE_SET}
GO_TAG=${GO_TAG} GIT_REV=${GIT_REV} ZK_VERSION=${ZK_VERSION} cargo run --release -p prover -- --prover.kind axiom --config ./config.json handle ${E2E_HANDLE_SET}

View File

@@ -6,7 +6,8 @@
"base_url": "<the url of coordinator>",
"retry_count": 10,
"retry_wait_time_sec": 10,
"connection_timeout_sec": 1800
"connection_timeout_sec": 1800,
"suppress_empty_task_error": false
},
"prover": {
"supported_proof_types": [
@@ -14,21 +15,33 @@
2,
3
],
"circuit_version": "v0.13.1"
"circuit_version": "v0.13.1",
"n_workers": 1,
"poll_interval_sec": 20,
"randomized_delay_sec": 0
},
"health_listener_addr": "127.0.0.1:10080",
"db_path": ".work/db"
},
"circuits": {
"feynman": {
"hard_fork_name": "feynman",
"base_url": "https://circuit-release.s3.us-west-2.amazonaws.com/scroll-zkvm/releases/feynman/",
"workspace_path": ".work/feynman"
},
"workspace_path": ".work/feynman",
"base_url": "https://circuit-release.s3.us-west-2.amazonaws.com/scroll-zkvm/releases/feynman/"
},
"galileo": {
"hard_fork_name": "galileo",
"base_url": "https://circuit-release.s3.us-west-2.amazonaws.com/scroll-zkvm/releases/galileo/",
"workspace_path": ".work/galileo",
"base_url": "https://circuit-release.s3.us-west-2.amazonaws.com/scroll-zkvm/releases/galileo/"
},
"galileoV2": {
"base_url": "https://circuit-release.s3.us-west-2.amazonaws.com/scroll-zkvm/releases/galileov2/",
"workspace_path": ".work/galileo"
}
}
},
"axiom_api_key": "<axiom api key>",
"axiom_programs": {
"<vk hex string>": {
"program_id": "prg_<axiom program id>",
"config_id": "cfg_<axiom config id>"
}
}
}
}