Compare commits

..

67 Commits

Author SHA1 Message Date
Ho
14e2633ba3 Merge remote-tracking branch 'origin/develop' into coordinator_proxy 2025-12-22 17:10:13 +09:00
Ho
7de388ef1a [Fix] Accept proof submission even it has been timeout (#1764) 2025-12-12 12:18:34 +09:00
Ho
21326c25e6 Merge remote-tracking branch 'origin/develop' into coordinator_proxy 2025-12-04 19:06:09 +09:00
Morty
27dd62eac3 feat(rollup-relayer): add blob fee tolerance (#1773) 2025-12-03 21:49:17 +08:00
Ho
22479a7952 [Feat] Galileo v2 (#1771)
Co-authored-by: Péter Garamvölgyi <peter@scroll.io>
2025-12-02 11:04:57 +01:00
Péter Garamvölgyi
690bc01c41 feat: force commit batches at hardfork boundary (#1768) 2025-11-30 20:36:53 +01:00
Ho
9c2bc02f64 Merge remote-tracking branch 'origin/develop' into coordinator_proxy 2025-11-24 18:41:23 +09:00
Ho
9e5579c4cb cover client reset in test 2025-11-21 12:34:11 +09:00
Ho
ac4a72003c refactoring client 2025-11-21 12:25:54 +09:00
Ho
19447984bd fix issues 2025-11-21 10:13:39 +09:00
Ho
d66d705456 fix after merging 2025-11-21 08:37:30 +09:00
Ho
c938d6c25e Merge remote-tracking branch 'origin/develop' into coordinator_proxy 2025-11-21 08:33:55 +09:00
Ho
cf9e3680c0 Fix login version issue 2025-11-11 19:09:06 +09:00
Ho
e9470ff7a5 update config template 2025-11-11 15:24:16 +09:00
Ho
51b1e79b31 add docker action 2025-11-11 14:28:25 +09:00
Ho
c22d9ecad1 fix goimport issue 2025-11-06 16:11:59 +09:00
Ho
e7551650b2 fix concurrent issue 2025-11-06 16:08:39 +09:00
Ho
20fde41be8 complete persistent layer and unit test 2025-11-05 22:02:14 +09:00
Ho
4df1dd8acd Merge remote-tracking branch 'origin/develop' into coordinator_proxy 2025-10-27 15:09:17 +09:00
Ho
6696aac16a WIP 2025-10-23 15:23:59 +09:00
Ho
4b79e63c9b WIP: some refactors 2025-10-22 10:27:38 +09:00
Ho
ac0396db3c add persistent for running status 2025-10-22 08:31:55 +09:00
Ho
17e6c5b7ac robust prover manager 2025-10-20 22:24:04 +09:00
Ho
b6e33456fa fix issue 2025-10-20 22:02:48 +09:00
Ho
7572bf8923 fix 2025-10-20 15:21:13 +09:00
Ho
5d41788b07 + fix get task behavior
+ improve the robust of tests
2025-10-20 14:42:05 +09:00
Ho
8f8a537fba Merge remote-tracking branch 'origin/develop' into coordinator_proxy 2025-10-20 14:09:45 +09:00
Ho
b1c3a4ecc0 more log for init 2025-10-17 22:27:51 +09:00
Ho
d9a29cddce fix config issue 2025-10-17 22:26:29 +09:00
Ho
c992157eb4 Merge remote-tracking branch 'origin/develop' into coordinator_proxy 2025-10-17 16:14:52 +09:00
Ho
404c664e10 fix unittest 2025-10-10 15:33:55 +09:00
Ho
8a15836d20 add compatibile mode and more logs 2025-10-09 14:30:43 +09:00
Ho
4365aafa9a refactor libzkp to be completely mocked out 2025-10-08 11:32:13 +09:00
Ho
6ee026fa16 depress link for libzkp 2025-10-07 11:04:04 +09:00
Ho
c79ad57fb7 finish binary 2025-10-07 10:54:41 +09:00
Ho
fa5b113248 Merge remote-tracking branch 'origin/develop' into coordinator_proxy 2025-10-07 09:53:54 +09:00
Zhang Zhuo
884b050866 Merge branch 'develop' into coordinator_proxy 2025-09-19 09:39:24 +08:00
Ho
1d9fa41535 Merge remote-tracking branch 'origin/develop' into coordinator_proxy 2025-09-10 20:48:38 +09:00
Ho
b7f23c6734 basic tests 2025-09-10 20:48:21 +09:00
Ho
057e22072c fix issues 2025-09-10 20:38:21 +09:00
Ho
c7b83a0784 fix issue in test 2025-09-10 13:55:45 +09:00
Ho
92ca7a6b76 improve get_task proxy 2025-09-10 13:55:38 +09:00
Ho
256c90af6f Merge remote-tracking branch 'origin/develop' into coordinator_proxy 2025-09-09 22:21:46 +09:00
Ho
50f3e1a97c fix issues from test 2025-09-09 22:21:24 +09:00
Ho
2721503657 refining 2025-09-09 20:10:18 +09:00
Ho
a04b64df03 routes 2025-09-08 22:30:51 +09:00
Ho
78dbe6cde1 controller WIP 2025-09-07 22:39:32 +09:00
Ho
9df6429d98 wip 2025-09-06 21:50:55 +09:00
Ho
e6be62f633 WIP 2025-09-05 22:31:45 +09:00
Ho
c72ee5d679 Merge remote-tracking branch 'origin/develop' into coordinator_proxy 2025-09-03 22:13:03 +09:00
Ho
4725d8a73c Merge remote-tracking branch 'origin/develop' into coordinator_proxy 2025-09-02 17:24:58 +09:00
Ho
322766f54f WIP 2025-09-02 17:22:35 +09:00
Ho
5614ec3b86 WIP 2025-09-01 10:12:16 +09:00
Ho
5a07a1652b WIP 2025-08-27 09:43:30 +09:00
Ho
64ef0f4ec0 WIP 2025-08-25 11:52:03 +09:00
Ho
321dd43af8 unit test for client 2025-08-25 11:43:50 +09:00
Ho
624a7a29b8 WIP: AI step 2025-08-25 09:35:10 +09:00
Ho
4f878d9231 AI step 2025-08-24 23:05:56 +09:00
Ho
7b3a65b35b framework for auto login 2025-08-24 22:41:17 +09:00
Ho
0d238d77a6 WIP: the structure of client manager 2025-08-24 22:32:38 +09:00
Ho
76ecdf064a add proxy config sample 2025-08-24 22:14:32 +09:00
Ho
5c6c225f76 WIP: config and client controller 2025-08-24 22:14:22 +09:00
Ho
3adb2e0a1b WIP: controller 2025-08-24 21:18:13 +09:00
Ho
412ad56a64 extend loginlogic 2025-08-24 20:43:40 +09:00
Ho
9796d16f6c WIP: update login logic and coordinator client 2025-08-24 20:32:11 +09:00
Ho
1f2b857671 add proxy_login route 2025-08-24 15:35:51 +09:00
Ho
5dbb5c5fb7 extend api for proxy 2025-08-24 14:54:54 +09:00
85 changed files with 3264 additions and 668 deletions

View File

@@ -360,6 +360,49 @@ jobs:
scrolltech/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }} scrolltech/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }} ${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
coordinator-proxy:
runs-on:
group: scroll-reth-runner-group
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Login to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Configure AWS credentials
uses: aws-actions/configure-aws-credentials@v4
with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
aws-region: ${{ env.AWS_REGION }}
- name: Login to Amazon ECR
id: login-ecr
uses: aws-actions/amazon-ecr-login@v2
- name: check repo and create it if not exist
env:
REPOSITORY: coordinator-proxy
run: |
aws --region ${{ env.AWS_REGION }} ecr describe-repositories --repository-names ${{ env.REPOSITORY }} && : || aws --region ${{ env.AWS_REGION }} ecr create-repository --repository-name ${{ env.REPOSITORY }}
- name: Build and push
uses: docker/build-push-action@v3
env:
ECR_REGISTRY: ${{ steps.login-ecr.outputs.registry }}
REPOSITORY: coordinator-proxy
IMAGE_TAG: ${{ github.ref_name }}
with:
context: .
file: ./build/dockerfiles/coordinator-proxy.Dockerfile
push: true
tags: |
scrolltech/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
coordinator-cron: coordinator-cron:
runs-on: runs-on:
group: scroll-reth-runner-group group: scroll-reth-runner-group

163
Cargo.lock generated
View File

@@ -1347,7 +1347,7 @@ dependencies = [
"bitflags 2.10.0", "bitflags 2.10.0",
"cexpr", "cexpr",
"clang-sys", "clang-sys",
"itertools 0.12.1", "itertools 0.11.0",
"lazy_static", "lazy_static",
"lazycell", "lazycell",
"proc-macro2", "proc-macro2",
@@ -1814,7 +1814,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "117725a109d387c937a1533ce01b450cbde6b88abceea8473c4d7a85853cda3c" checksum = "117725a109d387c937a1533ce01b450cbde6b88abceea8473c4d7a85853cda3c"
dependencies = [ dependencies = [
"lazy_static", "lazy_static",
"windows-sys 0.59.0", "windows-sys 0.48.0",
] ]
[[package]] [[package]]
@@ -2427,7 +2427,7 @@ dependencies = [
[[package]] [[package]]
name = "encoder-standard" name = "encoder-standard"
version = "0.1.0" version = "0.1.0"
source = "git+https://github.com/scroll-tech/da-codec#7a92e859b55094ba5b5c7d556c49c4dbd3f47ddb" source = "git+https://github.com/scroll-tech/da-codec#afa161a4487fe3ba600bfdb792daeb3dcc21fa25"
dependencies = [ dependencies = [
"zstd", "zstd",
] ]
@@ -3851,15 +3851,6 @@ dependencies = [
"either", "either",
] ]
[[package]]
name = "itertools"
version = "0.12.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ba291022dbbd398a455acf126c1e341954079855bc60dfdda641363bd6922569"
dependencies = [
"either",
]
[[package]] [[package]]
name = "itertools" name = "itertools"
version = "0.13.0" version = "0.13.0"
@@ -4019,7 +4010,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "07033963ba89ebaf1584d767badaa2e8fcec21aedea6b8c0346d487d49c28667" checksum = "07033963ba89ebaf1584d767badaa2e8fcec21aedea6b8c0346d487d49c28667"
dependencies = [ dependencies = [
"cfg-if", "cfg-if",
"windows-targets 0.52.6", "windows-targets 0.48.5",
] ]
[[package]] [[package]]
@@ -4588,7 +4579,7 @@ version = "0.7.3"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "af1844ef2428cc3e1cb900be36181049ef3d3193c63e43026cfe202983b27a56" checksum = "af1844ef2428cc3e1cb900be36181049ef3d3193c63e43026cfe202983b27a56"
dependencies = [ dependencies = [
"proc-macro-crate 3.3.0", "proc-macro-crate 1.3.1",
"proc-macro2", "proc-macro2",
"quote", "quote",
"syn 2.0.101", "syn 2.0.101",
@@ -7116,7 +7107,7 @@ dependencies = [
[[package]] [[package]]
name = "reth-chainspec" name = "reth-chainspec"
version = "1.8.2" version = "1.8.2"
source = "git+https://github.com/scroll-tech/reth?tag=scroll-v91#857624cf982bc814b0da14f3923ccc1ba5cc7dc7" source = "git+https://github.com/scroll-tech/reth?tag=scroll-v91.2#11d0a3f73186dee7a1ba0d51ea5416dc8fef3e46"
dependencies = [ dependencies = [
"alloy-chains", "alloy-chains",
"alloy-consensus", "alloy-consensus",
@@ -7136,7 +7127,7 @@ dependencies = [
[[package]] [[package]]
name = "reth-codecs" name = "reth-codecs"
version = "1.8.2" version = "1.8.2"
source = "git+https://github.com/scroll-tech/reth?tag=scroll-v91#857624cf982bc814b0da14f3923ccc1ba5cc7dc7" source = "git+https://github.com/scroll-tech/reth?tag=scroll-v91.2#11d0a3f73186dee7a1ba0d51ea5416dc8fef3e46"
dependencies = [ dependencies = [
"alloy-consensus", "alloy-consensus",
"alloy-eips 1.0.41", "alloy-eips 1.0.41",
@@ -7154,7 +7145,7 @@ dependencies = [
[[package]] [[package]]
name = "reth-codecs-derive" name = "reth-codecs-derive"
version = "1.8.2" version = "1.8.2"
source = "git+https://github.com/scroll-tech/reth?tag=scroll-v91#857624cf982bc814b0da14f3923ccc1ba5cc7dc7" source = "git+https://github.com/scroll-tech/reth?tag=scroll-v91.2#11d0a3f73186dee7a1ba0d51ea5416dc8fef3e46"
dependencies = [ dependencies = [
"proc-macro2", "proc-macro2",
"quote", "quote",
@@ -7164,7 +7155,7 @@ dependencies = [
[[package]] [[package]]
name = "reth-consensus" name = "reth-consensus"
version = "1.8.2" version = "1.8.2"
source = "git+https://github.com/scroll-tech/reth?tag=scroll-v91#857624cf982bc814b0da14f3923ccc1ba5cc7dc7" source = "git+https://github.com/scroll-tech/reth?tag=scroll-v91.2#11d0a3f73186dee7a1ba0d51ea5416dc8fef3e46"
dependencies = [ dependencies = [
"alloy-consensus", "alloy-consensus",
"alloy-primitives", "alloy-primitives",
@@ -7177,7 +7168,7 @@ dependencies = [
[[package]] [[package]]
name = "reth-consensus-common" name = "reth-consensus-common"
version = "1.8.2" version = "1.8.2"
source = "git+https://github.com/scroll-tech/reth?tag=scroll-v91#857624cf982bc814b0da14f3923ccc1ba5cc7dc7" source = "git+https://github.com/scroll-tech/reth?tag=scroll-v91.2#11d0a3f73186dee7a1ba0d51ea5416dc8fef3e46"
dependencies = [ dependencies = [
"alloy-consensus", "alloy-consensus",
"alloy-eips 1.0.41", "alloy-eips 1.0.41",
@@ -7189,7 +7180,7 @@ dependencies = [
[[package]] [[package]]
name = "reth-db-models" name = "reth-db-models"
version = "1.8.2" version = "1.8.2"
source = "git+https://github.com/scroll-tech/reth?tag=scroll-v91#857624cf982bc814b0da14f3923ccc1ba5cc7dc7" source = "git+https://github.com/scroll-tech/reth?tag=scroll-v91.2#11d0a3f73186dee7a1ba0d51ea5416dc8fef3e46"
dependencies = [ dependencies = [
"alloy-eips 1.0.41", "alloy-eips 1.0.41",
"alloy-primitives", "alloy-primitives",
@@ -7199,7 +7190,7 @@ dependencies = [
[[package]] [[package]]
name = "reth-errors" name = "reth-errors"
version = "1.8.2" version = "1.8.2"
source = "git+https://github.com/scroll-tech/reth?tag=scroll-v91#857624cf982bc814b0da14f3923ccc1ba5cc7dc7" source = "git+https://github.com/scroll-tech/reth?tag=scroll-v91.2#11d0a3f73186dee7a1ba0d51ea5416dc8fef3e46"
dependencies = [ dependencies = [
"reth-consensus", "reth-consensus",
"reth-execution-errors", "reth-execution-errors",
@@ -7210,7 +7201,7 @@ dependencies = [
[[package]] [[package]]
name = "reth-ethereum-consensus" name = "reth-ethereum-consensus"
version = "1.8.2" version = "1.8.2"
source = "git+https://github.com/scroll-tech/reth?tag=scroll-v91#857624cf982bc814b0da14f3923ccc1ba5cc7dc7" source = "git+https://github.com/scroll-tech/reth?tag=scroll-v91.2#11d0a3f73186dee7a1ba0d51ea5416dc8fef3e46"
dependencies = [ dependencies = [
"alloy-consensus", "alloy-consensus",
"alloy-eips 1.0.41", "alloy-eips 1.0.41",
@@ -7226,7 +7217,7 @@ dependencies = [
[[package]] [[package]]
name = "reth-ethereum-forks" name = "reth-ethereum-forks"
version = "1.8.2" version = "1.8.2"
source = "git+https://github.com/scroll-tech/reth?tag=scroll-v91#857624cf982bc814b0da14f3923ccc1ba5cc7dc7" source = "git+https://github.com/scroll-tech/reth?tag=scroll-v91.2#11d0a3f73186dee7a1ba0d51ea5416dc8fef3e46"
dependencies = [ dependencies = [
"alloy-eip2124", "alloy-eip2124",
"alloy-hardforks", "alloy-hardforks",
@@ -7238,7 +7229,7 @@ dependencies = [
[[package]] [[package]]
name = "reth-ethereum-primitives" name = "reth-ethereum-primitives"
version = "1.8.2" version = "1.8.2"
source = "git+https://github.com/scroll-tech/reth?tag=scroll-v91#857624cf982bc814b0da14f3923ccc1ba5cc7dc7" source = "git+https://github.com/scroll-tech/reth?tag=scroll-v91.2#11d0a3f73186dee7a1ba0d51ea5416dc8fef3e46"
dependencies = [ dependencies = [
"alloy-consensus", "alloy-consensus",
"alloy-eips 1.0.41", "alloy-eips 1.0.41",
@@ -7255,7 +7246,7 @@ dependencies = [
[[package]] [[package]]
name = "reth-evm" name = "reth-evm"
version = "1.8.2" version = "1.8.2"
source = "git+https://github.com/scroll-tech/reth?tag=scroll-v91#857624cf982bc814b0da14f3923ccc1ba5cc7dc7" source = "git+https://github.com/scroll-tech/reth?tag=scroll-v91.2#11d0a3f73186dee7a1ba0d51ea5416dc8fef3e46"
dependencies = [ dependencies = [
"alloy-consensus", "alloy-consensus",
"alloy-eips 1.0.41", "alloy-eips 1.0.41",
@@ -7277,7 +7268,7 @@ dependencies = [
[[package]] [[package]]
name = "reth-evm-ethereum" name = "reth-evm-ethereum"
version = "1.8.2" version = "1.8.2"
source = "git+https://github.com/scroll-tech/reth?tag=scroll-v91#857624cf982bc814b0da14f3923ccc1ba5cc7dc7" source = "git+https://github.com/scroll-tech/reth?tag=scroll-v91.2#11d0a3f73186dee7a1ba0d51ea5416dc8fef3e46"
dependencies = [ dependencies = [
"alloy-consensus", "alloy-consensus",
"alloy-eips 1.0.41", "alloy-eips 1.0.41",
@@ -7297,7 +7288,7 @@ dependencies = [
[[package]] [[package]]
name = "reth-execution-errors" name = "reth-execution-errors"
version = "1.8.2" version = "1.8.2"
source = "git+https://github.com/scroll-tech/reth?tag=scroll-v91#857624cf982bc814b0da14f3923ccc1ba5cc7dc7" source = "git+https://github.com/scroll-tech/reth?tag=scroll-v91.2#11d0a3f73186dee7a1ba0d51ea5416dc8fef3e46"
dependencies = [ dependencies = [
"alloy-evm", "alloy-evm",
"alloy-primitives", "alloy-primitives",
@@ -7310,7 +7301,7 @@ dependencies = [
[[package]] [[package]]
name = "reth-execution-types" name = "reth-execution-types"
version = "1.8.2" version = "1.8.2"
source = "git+https://github.com/scroll-tech/reth?tag=scroll-v91#857624cf982bc814b0da14f3923ccc1ba5cc7dc7" source = "git+https://github.com/scroll-tech/reth?tag=scroll-v91.2#11d0a3f73186dee7a1ba0d51ea5416dc8fef3e46"
dependencies = [ dependencies = [
"alloy-consensus", "alloy-consensus",
"alloy-eips 1.0.41", "alloy-eips 1.0.41",
@@ -7326,7 +7317,7 @@ dependencies = [
[[package]] [[package]]
name = "reth-network-peers" name = "reth-network-peers"
version = "1.8.2" version = "1.8.2"
source = "git+https://github.com/scroll-tech/reth?tag=scroll-v91#857624cf982bc814b0da14f3923ccc1ba5cc7dc7" source = "git+https://github.com/scroll-tech/reth?tag=scroll-v91.2#11d0a3f73186dee7a1ba0d51ea5416dc8fef3e46"
dependencies = [ dependencies = [
"alloy-primitives", "alloy-primitives",
"alloy-rlp", "alloy-rlp",
@@ -7338,7 +7329,7 @@ dependencies = [
[[package]] [[package]]
name = "reth-primitives" name = "reth-primitives"
version = "1.8.2" version = "1.8.2"
source = "git+https://github.com/scroll-tech/reth?tag=scroll-v91#857624cf982bc814b0da14f3923ccc1ba5cc7dc7" source = "git+https://github.com/scroll-tech/reth?tag=scroll-v91.2#11d0a3f73186dee7a1ba0d51ea5416dc8fef3e46"
dependencies = [ dependencies = [
"alloy-consensus", "alloy-consensus",
"once_cell", "once_cell",
@@ -7351,7 +7342,7 @@ dependencies = [
[[package]] [[package]]
name = "reth-primitives-traits" name = "reth-primitives-traits"
version = "1.8.2" version = "1.8.2"
source = "git+https://github.com/scroll-tech/reth?tag=scroll-v91#857624cf982bc814b0da14f3923ccc1ba5cc7dc7" source = "git+https://github.com/scroll-tech/reth?tag=scroll-v91.2#11d0a3f73186dee7a1ba0d51ea5416dc8fef3e46"
dependencies = [ dependencies = [
"alloy-consensus", "alloy-consensus",
"alloy-eips 1.0.41", "alloy-eips 1.0.41",
@@ -7379,7 +7370,7 @@ dependencies = [
[[package]] [[package]]
name = "reth-prune-types" name = "reth-prune-types"
version = "1.8.2" version = "1.8.2"
source = "git+https://github.com/scroll-tech/reth?tag=scroll-v91#857624cf982bc814b0da14f3923ccc1ba5cc7dc7" source = "git+https://github.com/scroll-tech/reth?tag=scroll-v91.2#11d0a3f73186dee7a1ba0d51ea5416dc8fef3e46"
dependencies = [ dependencies = [
"alloy-primitives", "alloy-primitives",
"derive_more 2.0.1", "derive_more 2.0.1",
@@ -7389,7 +7380,7 @@ dependencies = [
[[package]] [[package]]
name = "reth-revm" name = "reth-revm"
version = "1.8.2" version = "1.8.2"
source = "git+https://github.com/scroll-tech/reth?tag=scroll-v91#857624cf982bc814b0da14f3923ccc1ba5cc7dc7" source = "git+https://github.com/scroll-tech/reth?tag=scroll-v91.2#11d0a3f73186dee7a1ba0d51ea5416dc8fef3e46"
dependencies = [ dependencies = [
"alloy-primitives", "alloy-primitives",
"reth-primitives-traits", "reth-primitives-traits",
@@ -7401,7 +7392,7 @@ dependencies = [
[[package]] [[package]]
name = "reth-scroll-chainspec" name = "reth-scroll-chainspec"
version = "1.8.2" version = "1.8.2"
source = "git+https://github.com/scroll-tech/reth?tag=scroll-v91#857624cf982bc814b0da14f3923ccc1ba5cc7dc7" source = "git+https://github.com/scroll-tech/reth?tag=scroll-v91.2#11d0a3f73186dee7a1ba0d51ea5416dc8fef3e46"
dependencies = [ dependencies = [
"alloy-chains", "alloy-chains",
"alloy-consensus", "alloy-consensus",
@@ -7426,7 +7417,7 @@ dependencies = [
[[package]] [[package]]
name = "reth-scroll-evm" name = "reth-scroll-evm"
version = "1.8.2" version = "1.8.2"
source = "git+https://github.com/scroll-tech/reth?tag=scroll-v91#857624cf982bc814b0da14f3923ccc1ba5cc7dc7" source = "git+https://github.com/scroll-tech/reth?tag=scroll-v91.2#11d0a3f73186dee7a1ba0d51ea5416dc8fef3e46"
dependencies = [ dependencies = [
"alloy-consensus", "alloy-consensus",
"alloy-eips 1.0.41", "alloy-eips 1.0.41",
@@ -7456,7 +7447,7 @@ dependencies = [
[[package]] [[package]]
name = "reth-scroll-forks" name = "reth-scroll-forks"
version = "1.8.2" version = "1.8.2"
source = "git+https://github.com/scroll-tech/reth?tag=scroll-v91#857624cf982bc814b0da14f3923ccc1ba5cc7dc7" source = "git+https://github.com/scroll-tech/reth?tag=scroll-v91.2#11d0a3f73186dee7a1ba0d51ea5416dc8fef3e46"
dependencies = [ dependencies = [
"alloy-chains", "alloy-chains",
"alloy-primitives", "alloy-primitives",
@@ -7470,7 +7461,7 @@ dependencies = [
[[package]] [[package]]
name = "reth-scroll-primitives" name = "reth-scroll-primitives"
version = "1.8.2" version = "1.8.2"
source = "git+https://github.com/scroll-tech/reth?tag=scroll-v91#857624cf982bc814b0da14f3923ccc1ba5cc7dc7" source = "git+https://github.com/scroll-tech/reth?tag=scroll-v91.2#11d0a3f73186dee7a1ba0d51ea5416dc8fef3e46"
dependencies = [ dependencies = [
"alloy-consensus", "alloy-consensus",
"alloy-eips 1.0.41", "alloy-eips 1.0.41",
@@ -7487,7 +7478,7 @@ dependencies = [
[[package]] [[package]]
name = "reth-stages-types" name = "reth-stages-types"
version = "1.8.2" version = "1.8.2"
source = "git+https://github.com/scroll-tech/reth?tag=scroll-v91#857624cf982bc814b0da14f3923ccc1ba5cc7dc7" source = "git+https://github.com/scroll-tech/reth?tag=scroll-v91.2#11d0a3f73186dee7a1ba0d51ea5416dc8fef3e46"
dependencies = [ dependencies = [
"alloy-primitives", "alloy-primitives",
"reth-trie-common", "reth-trie-common",
@@ -7496,7 +7487,7 @@ dependencies = [
[[package]] [[package]]
name = "reth-stateless" name = "reth-stateless"
version = "1.8.2" version = "1.8.2"
source = "git+https://github.com/scroll-tech/reth?tag=scroll-v91#857624cf982bc814b0da14f3923ccc1ba5cc7dc7" source = "git+https://github.com/scroll-tech/reth?tag=scroll-v91.2#11d0a3f73186dee7a1ba0d51ea5416dc8fef3e46"
dependencies = [ dependencies = [
"alloy-consensus", "alloy-consensus",
"alloy-primitives", "alloy-primitives",
@@ -7522,7 +7513,7 @@ dependencies = [
[[package]] [[package]]
name = "reth-static-file-types" name = "reth-static-file-types"
version = "1.8.2" version = "1.8.2"
source = "git+https://github.com/scroll-tech/reth?tag=scroll-v91#857624cf982bc814b0da14f3923ccc1ba5cc7dc7" source = "git+https://github.com/scroll-tech/reth?tag=scroll-v91.2#11d0a3f73186dee7a1ba0d51ea5416dc8fef3e46"
dependencies = [ dependencies = [
"alloy-primitives", "alloy-primitives",
"derive_more 2.0.1", "derive_more 2.0.1",
@@ -7533,7 +7524,7 @@ dependencies = [
[[package]] [[package]]
name = "reth-storage-api" name = "reth-storage-api"
version = "1.8.2" version = "1.8.2"
source = "git+https://github.com/scroll-tech/reth?tag=scroll-v91#857624cf982bc814b0da14f3923ccc1ba5cc7dc7" source = "git+https://github.com/scroll-tech/reth?tag=scroll-v91.2#11d0a3f73186dee7a1ba0d51ea5416dc8fef3e46"
dependencies = [ dependencies = [
"alloy-consensus", "alloy-consensus",
"alloy-eips 1.0.41", "alloy-eips 1.0.41",
@@ -7555,7 +7546,7 @@ dependencies = [
[[package]] [[package]]
name = "reth-storage-errors" name = "reth-storage-errors"
version = "1.8.2" version = "1.8.2"
source = "git+https://github.com/scroll-tech/reth?tag=scroll-v91#857624cf982bc814b0da14f3923ccc1ba5cc7dc7" source = "git+https://github.com/scroll-tech/reth?tag=scroll-v91.2#11d0a3f73186dee7a1ba0d51ea5416dc8fef3e46"
dependencies = [ dependencies = [
"alloy-eips 1.0.41", "alloy-eips 1.0.41",
"alloy-primitives", "alloy-primitives",
@@ -7571,7 +7562,7 @@ dependencies = [
[[package]] [[package]]
name = "reth-trie" name = "reth-trie"
version = "1.8.2" version = "1.8.2"
source = "git+https://github.com/scroll-tech/reth?tag=scroll-v91#857624cf982bc814b0da14f3923ccc1ba5cc7dc7" source = "git+https://github.com/scroll-tech/reth?tag=scroll-v91.2#11d0a3f73186dee7a1ba0d51ea5416dc8fef3e46"
dependencies = [ dependencies = [
"alloy-consensus", "alloy-consensus",
"alloy-eips 1.0.41", "alloy-eips 1.0.41",
@@ -7593,7 +7584,7 @@ dependencies = [
[[package]] [[package]]
name = "reth-trie-common" name = "reth-trie-common"
version = "1.8.2" version = "1.8.2"
source = "git+https://github.com/scroll-tech/reth?tag=scroll-v91#857624cf982bc814b0da14f3923ccc1ba5cc7dc7" source = "git+https://github.com/scroll-tech/reth?tag=scroll-v91.2#11d0a3f73186dee7a1ba0d51ea5416dc8fef3e46"
dependencies = [ dependencies = [
"alloy-consensus", "alloy-consensus",
"alloy-primitives", "alloy-primitives",
@@ -7609,7 +7600,7 @@ dependencies = [
[[package]] [[package]]
name = "reth-trie-sparse" name = "reth-trie-sparse"
version = "1.8.2" version = "1.8.2"
source = "git+https://github.com/scroll-tech/reth?tag=scroll-v91#857624cf982bc814b0da14f3923ccc1ba5cc7dc7" source = "git+https://github.com/scroll-tech/reth?tag=scroll-v91.2#11d0a3f73186dee7a1ba0d51ea5416dc8fef3e46"
dependencies = [ dependencies = [
"alloy-primitives", "alloy-primitives",
"alloy-rlp", "alloy-rlp",
@@ -7625,7 +7616,7 @@ dependencies = [
[[package]] [[package]]
name = "reth-zstd-compressors" name = "reth-zstd-compressors"
version = "1.8.2" version = "1.8.2"
source = "git+https://github.com/scroll-tech/reth?tag=scroll-v91#857624cf982bc814b0da14f3923ccc1ba5cc7dc7" source = "git+https://github.com/scroll-tech/reth?tag=scroll-v91.2#11d0a3f73186dee7a1ba0d51ea5416dc8fef3e46"
dependencies = [ dependencies = [
"zstd", "zstd",
] ]
@@ -8231,7 +8222,7 @@ dependencies = [
[[package]] [[package]]
name = "risc0-ethereum-trie" name = "risc0-ethereum-trie"
version = "0.1.0" version = "0.1.0"
source = "git+https://github.com/risc0/risc0-ethereum#c1ddb41a44dc0730da883bbfa9fbe75ad335df1b" source = "git+https://github.com/risc0/risc0-ethereum#e475fe6c8dcff92fb5e67d6556cb11ba3ab4e494"
dependencies = [ dependencies = [
"alloy-primitives", "alloy-primitives",
"alloy-rlp", "alloy-rlp",
@@ -8519,7 +8510,7 @@ checksum = "28d3b2b1366ec20994f1fd18c3c594f05c5dd4bc44d8bb0c1c632c8d6829481f"
[[package]] [[package]]
name = "sbv-core" name = "sbv-core"
version = "2.0.0" version = "2.0.0"
source = "git+https://github.com/scroll-tech/stateless-block-verifier?tag=scroll-v91#dd7f8dad597344e472d239dc135ee952efd185f5" source = "git+https://github.com/scroll-tech/stateless-block-verifier?tag=scroll-v91.2#3a32848c9438432125751eae8837757f6b87562e"
dependencies = [ dependencies = [
"auto_impl", "auto_impl",
"itertools 0.14.0", "itertools 0.14.0",
@@ -8536,7 +8527,7 @@ dependencies = [
[[package]] [[package]]
name = "sbv-helpers" name = "sbv-helpers"
version = "2.0.0" version = "2.0.0"
source = "git+https://github.com/scroll-tech/stateless-block-verifier?tag=scroll-v91#dd7f8dad597344e472d239dc135ee952efd185f5" source = "git+https://github.com/scroll-tech/stateless-block-verifier?tag=scroll-v91.2#3a32848c9438432125751eae8837757f6b87562e"
dependencies = [ dependencies = [
"tracing", "tracing",
] ]
@@ -8544,7 +8535,7 @@ dependencies = [
[[package]] [[package]]
name = "sbv-primitives" name = "sbv-primitives"
version = "2.0.0" version = "2.0.0"
source = "git+https://github.com/scroll-tech/stateless-block-verifier?tag=scroll-v91#dd7f8dad597344e472d239dc135ee952efd185f5" source = "git+https://github.com/scroll-tech/stateless-block-verifier?tag=scroll-v91.2#3a32848c9438432125751eae8837757f6b87562e"
dependencies = [ dependencies = [
"alloy-consensus", "alloy-consensus",
"alloy-eips 1.0.41", "alloy-eips 1.0.41",
@@ -8554,8 +8545,6 @@ dependencies = [
"alloy-rpc-types-debug", "alloy-rpc-types-debug",
"alloy-rpc-types-eth", "alloy-rpc-types-eth",
"alloy-serde 1.0.41", "alloy-serde 1.0.41",
"auto_impl",
"itertools 0.14.0",
"reth-chainspec", "reth-chainspec",
"reth-ethereum-forks", "reth-ethereum-forks",
"reth-evm", "reth-evm",
@@ -8581,7 +8570,7 @@ dependencies = [
[[package]] [[package]]
name = "sbv-trie" name = "sbv-trie"
version = "2.0.0" version = "2.0.0"
source = "git+https://github.com/scroll-tech/stateless-block-verifier?tag=scroll-v91#dd7f8dad597344e472d239dc135ee952efd185f5" source = "git+https://github.com/scroll-tech/stateless-block-verifier?tag=scroll-v91.2#3a32848c9438432125751eae8837757f6b87562e"
dependencies = [ dependencies = [
"alloy-rlp", "alloy-rlp",
"alloy-trie 0.9.1", "alloy-trie 0.9.1",
@@ -8594,10 +8583,9 @@ dependencies = [
[[package]] [[package]]
name = "sbv-utils" name = "sbv-utils"
version = "2.0.0" version = "2.0.0"
source = "git+https://github.com/scroll-tech/stateless-block-verifier?tag=scroll-v91#dd7f8dad597344e472d239dc135ee952efd185f5" source = "git+https://github.com/scroll-tech/stateless-block-verifier?tag=scroll-v91.2#3a32848c9438432125751eae8837757f6b87562e"
dependencies = [ dependencies = [
"alloy-provider", "alloy-provider",
"alloy-rpc-client",
"alloy-transport", "alloy-transport",
"async-trait", "async-trait",
"futures", "futures",
@@ -8673,7 +8661,7 @@ checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49"
[[package]] [[package]]
name = "scroll-alloy-consensus" name = "scroll-alloy-consensus"
version = "1.8.2" version = "1.8.2"
source = "git+https://github.com/scroll-tech/reth?tag=scroll-v91#857624cf982bc814b0da14f3923ccc1ba5cc7dc7" source = "git+https://github.com/scroll-tech/reth?tag=scroll-v91.2#11d0a3f73186dee7a1ba0d51ea5416dc8fef3e46"
dependencies = [ dependencies = [
"alloy-consensus", "alloy-consensus",
"alloy-eips 1.0.41", "alloy-eips 1.0.41",
@@ -8689,7 +8677,7 @@ dependencies = [
[[package]] [[package]]
name = "scroll-alloy-evm" name = "scroll-alloy-evm"
version = "1.8.2" version = "1.8.2"
source = "git+https://github.com/scroll-tech/reth?tag=scroll-v91#857624cf982bc814b0da14f3923ccc1ba5cc7dc7" source = "git+https://github.com/scroll-tech/reth?tag=scroll-v91.2#11d0a3f73186dee7a1ba0d51ea5416dc8fef3e46"
dependencies = [ dependencies = [
"alloy-consensus", "alloy-consensus",
"alloy-eips 1.0.41", "alloy-eips 1.0.41",
@@ -8707,7 +8695,7 @@ dependencies = [
[[package]] [[package]]
name = "scroll-alloy-hardforks" name = "scroll-alloy-hardforks"
version = "1.8.2" version = "1.8.2"
source = "git+https://github.com/scroll-tech/reth?tag=scroll-v91#857624cf982bc814b0da14f3923ccc1ba5cc7dc7" source = "git+https://github.com/scroll-tech/reth?tag=scroll-v91.2#11d0a3f73186dee7a1ba0d51ea5416dc8fef3e46"
dependencies = [ dependencies = [
"alloy-hardforks", "alloy-hardforks",
"auto_impl", "auto_impl",
@@ -8717,7 +8705,7 @@ dependencies = [
[[package]] [[package]]
name = "scroll-alloy-network" name = "scroll-alloy-network"
version = "1.8.2" version = "1.8.2"
source = "git+https://github.com/scroll-tech/reth?tag=scroll-v91#857624cf982bc814b0da14f3923ccc1ba5cc7dc7" source = "git+https://github.com/scroll-tech/reth?tag=scroll-v91.2#11d0a3f73186dee7a1ba0d51ea5416dc8fef3e46"
dependencies = [ dependencies = [
"alloy-consensus", "alloy-consensus",
"alloy-network", "alloy-network",
@@ -8732,7 +8720,7 @@ dependencies = [
[[package]] [[package]]
name = "scroll-alloy-rpc-types" name = "scroll-alloy-rpc-types"
version = "1.8.2" version = "1.8.2"
source = "git+https://github.com/scroll-tech/reth?tag=scroll-v91#857624cf982bc814b0da14f3923ccc1ba5cc7dc7" source = "git+https://github.com/scroll-tech/reth?tag=scroll-v91.2#11d0a3f73186dee7a1ba0d51ea5416dc8fef3e46"
dependencies = [ dependencies = [
"alloy-consensus", "alloy-consensus",
"alloy-eips 1.0.41", "alloy-eips 1.0.41",
@@ -8778,8 +8766,8 @@ dependencies = [
[[package]] [[package]]
name = "scroll-zkvm-prover" name = "scroll-zkvm-prover"
version = "0.7.0" version = "0.7.1"
source = "git+https://github.com/scroll-tech/zkvm-prover?tag=v0.7.0#56c951893bac4754a170dd95fa186d21aa34e2bf" source = "git+https://github.com/scroll-tech/zkvm-prover?tag=v0.7.1#85dc6bc56728b8eef22281fdb215c136d7b5bbda"
dependencies = [ dependencies = [
"base64 0.22.1", "base64 0.22.1",
"bincode 1.3.3", "bincode 1.3.3",
@@ -8805,8 +8793,8 @@ dependencies = [
[[package]] [[package]]
name = "scroll-zkvm-types" name = "scroll-zkvm-types"
version = "0.7.0" version = "0.7.1"
source = "git+https://github.com/scroll-tech/zkvm-prover?tag=v0.7.0#56c951893bac4754a170dd95fa186d21aa34e2bf" source = "git+https://github.com/scroll-tech/zkvm-prover?tag=v0.7.1#85dc6bc56728b8eef22281fdb215c136d7b5bbda"
dependencies = [ dependencies = [
"alloy-primitives", "alloy-primitives",
"base64 0.22.1", "base64 0.22.1",
@@ -8829,8 +8817,8 @@ dependencies = [
[[package]] [[package]]
name = "scroll-zkvm-types-base" name = "scroll-zkvm-types-base"
version = "0.7.0" version = "0.7.1"
source = "git+https://github.com/scroll-tech/zkvm-prover?tag=v0.7.0#56c951893bac4754a170dd95fa186d21aa34e2bf" source = "git+https://github.com/scroll-tech/zkvm-prover?tag=v0.7.1#85dc6bc56728b8eef22281fdb215c136d7b5bbda"
dependencies = [ dependencies = [
"alloy-primitives", "alloy-primitives",
"alloy-serde 1.0.41", "alloy-serde 1.0.41",
@@ -8842,8 +8830,8 @@ dependencies = [
[[package]] [[package]]
name = "scroll-zkvm-types-batch" name = "scroll-zkvm-types-batch"
version = "0.7.0" version = "0.7.1"
source = "git+https://github.com/scroll-tech/zkvm-prover?tag=v0.7.0#56c951893bac4754a170dd95fa186d21aa34e2bf" source = "git+https://github.com/scroll-tech/zkvm-prover?tag=v0.7.1#85dc6bc56728b8eef22281fdb215c136d7b5bbda"
dependencies = [ dependencies = [
"alloy-primitives", "alloy-primitives",
"c-kzg", "c-kzg",
@@ -8864,8 +8852,8 @@ dependencies = [
[[package]] [[package]]
name = "scroll-zkvm-types-bundle" name = "scroll-zkvm-types-bundle"
version = "0.7.0" version = "0.7.1"
source = "git+https://github.com/scroll-tech/zkvm-prover?tag=v0.7.0#56c951893bac4754a170dd95fa186d21aa34e2bf" source = "git+https://github.com/scroll-tech/zkvm-prover?tag=v0.7.1#85dc6bc56728b8eef22281fdb215c136d7b5bbda"
dependencies = [ dependencies = [
"rkyv", "rkyv",
"scroll-zkvm-types-base", "scroll-zkvm-types-base",
@@ -8874,8 +8862,8 @@ dependencies = [
[[package]] [[package]]
name = "scroll-zkvm-types-chunk" name = "scroll-zkvm-types-chunk"
version = "0.7.0" version = "0.7.1"
source = "git+https://github.com/scroll-tech/zkvm-prover?tag=v0.7.0#56c951893bac4754a170dd95fa186d21aa34e2bf" source = "git+https://github.com/scroll-tech/zkvm-prover?tag=v0.7.1#85dc6bc56728b8eef22281fdb215c136d7b5bbda"
dependencies = [ dependencies = [
"alloy-consensus", "alloy-consensus",
"alloy-primitives", "alloy-primitives",
@@ -8899,8 +8887,8 @@ dependencies = [
[[package]] [[package]]
name = "scroll-zkvm-verifier" name = "scroll-zkvm-verifier"
version = "0.7.0" version = "0.7.1"
source = "git+https://github.com/scroll-tech/zkvm-prover?tag=v0.7.0#56c951893bac4754a170dd95fa186d21aa34e2bf" source = "git+https://github.com/scroll-tech/zkvm-prover?tag=v0.7.1#85dc6bc56728b8eef22281fdb215c136d7b5bbda"
dependencies = [ dependencies = [
"bincode 1.3.3", "bincode 1.3.3",
"eyre", "eyre",
@@ -9103,15 +9091,16 @@ dependencies = [
[[package]] [[package]]
name = "serde_json" name = "serde_json"
version = "1.0.140" version = "1.0.145"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "20068b6e96dc6c9bd23e01df8827e6c7e1f2fddd43c21810382803c136b99373" checksum = "402a6f66d8c709116cf22f558eab210f5a50187f702eb4d7e5ef38d9a7f1c79c"
dependencies = [ dependencies = [
"indexmap 2.9.0", "indexmap 2.9.0",
"itoa", "itoa",
"memchr", "memchr",
"ryu", "ryu",
"serde", "serde",
"serde_core",
] ]
[[package]] [[package]]
@@ -9126,20 +9115,21 @@ dependencies = [
[[package]] [[package]]
name = "serde_spanned" name = "serde_spanned"
version = "0.6.8" version = "0.6.9"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "87607cb1398ed59d48732e575a4c28a7a8ebf2454b964fe3f224f2afc07909e1" checksum = "bf41e0cfaf7226dca15e8197172c295a782857fcb97fad1808a166870dee75a3"
dependencies = [ dependencies = [
"serde", "serde",
] ]
[[package]] [[package]]
name = "serde_stacker" name = "serde_stacker"
version = "0.1.12" version = "0.1.14"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "69c8defe6c780725cce4ec6ad3bd91e321baf6fa4e255df1f31e345d507ef01a" checksum = "d4936375d50c4be7eff22293a9344f8e46f323ed2b3c243e52f89138d9bb0f4a"
dependencies = [ dependencies = [
"serde", "serde",
"serde_core",
"stacker", "stacker",
] ]
@@ -9157,9 +9147,9 @@ dependencies = [
[[package]] [[package]]
name = "serde_with" name = "serde_with"
version = "3.14.0" version = "3.16.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f2c45cd61fefa9db6f254525d46e392b852e0e61d9a1fd36e5bd183450a556d5" checksum = "10574371d41b0d9b2cff89418eda27da52bcaff2cc8741db26382a77c29131f1"
dependencies = [ dependencies = [
"base64 0.22.1", "base64 0.22.1",
"chrono", "chrono",
@@ -9168,8 +9158,7 @@ dependencies = [
"indexmap 2.9.0", "indexmap 2.9.0",
"schemars 0.9.0", "schemars 0.9.0",
"schemars 1.0.4", "schemars 1.0.4",
"serde", "serde_core",
"serde_derive",
"serde_json", "serde_json",
"serde_with_macros", "serde_with_macros",
"time", "time",
@@ -9177,11 +9166,11 @@ dependencies = [
[[package]] [[package]]
name = "serde_with_macros" name = "serde_with_macros"
version = "3.14.0" version = "3.16.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "de90945e6565ce0d9a25098082ed4ee4002e047cb59892c318d66821e14bb30f" checksum = "08a72d8216842fdd57820dc78d840bef99248e35fb2554ff923319e60f2d686b"
dependencies = [ dependencies = [
"darling 0.20.11", "darling 0.21.3",
"proc-macro2", "proc-macro2",
"quote", "quote",
"syn 2.0.101", "syn 2.0.101",

View File

@@ -17,13 +17,13 @@ repository = "https://github.com/scroll-tech/scroll"
version = "4.7.1" version = "4.7.1"
[workspace.dependencies] [workspace.dependencies]
scroll-zkvm-prover = { git = "https://github.com/scroll-tech/zkvm-prover", tag = "v0.7.0" } scroll-zkvm-prover = { git = "https://github.com/scroll-tech/zkvm-prover", tag = "v0.7.1" }
scroll-zkvm-verifier = { git = "https://github.com/scroll-tech/zkvm-prover", tag = "v0.7.0" } scroll-zkvm-verifier = { git = "https://github.com/scroll-tech/zkvm-prover", tag = "v0.7.1" }
scroll-zkvm-types = { git = "https://github.com/scroll-tech/zkvm-prover", tag = "v0.7.0" } scroll-zkvm-types = { git = "https://github.com/scroll-tech/zkvm-prover", tag = "v0.7.1" }
sbv-primitives = { git = "https://github.com/scroll-tech/stateless-block-verifier", tag = "scroll-v91", features = ["scroll", "rkyv"] } sbv-primitives = { git = "https://github.com/scroll-tech/stateless-block-verifier", tag = "scroll-v91.2", features = ["scroll", "rkyv"] }
sbv-utils = { git = "https://github.com/scroll-tech/stateless-block-verifier", tag = "scroll-v91" } sbv-utils = { git = "https://github.com/scroll-tech/stateless-block-verifier", tag = "scroll-v91.2" }
sbv-core = { git = "https://github.com/scroll-tech/stateless-block-verifier", tag = "scroll-v91", features = ["scroll"] } sbv-core = { git = "https://github.com/scroll-tech/stateless-block-verifier", tag = "scroll-v91.2", features = ["scroll"] }
metrics = "0.23.0" metrics = "0.23.0"
metrics-util = "0.17" metrics-util = "0.17"

View File

@@ -1,6 +1,6 @@
.PHONY: fmt dev_docker build_test_docker run_test_docker clean update .PHONY: fmt dev_docker build_test_docker run_test_docker clean update
L2GETH_TAG=scroll-v5.9.7 L2GETH_TAG=scroll-v5.9.17
help: ## Display this help message help: ## Display this help message
@grep -h \ @grep -h \

View File

@@ -10,8 +10,8 @@ require (
github.com/go-redis/redis/v8 v8.11.5 github.com/go-redis/redis/v8 v8.11.5
github.com/pressly/goose/v3 v3.16.0 github.com/pressly/goose/v3 v3.16.0
github.com/prometheus/client_golang v1.19.0 github.com/prometheus/client_golang v1.19.0
github.com/scroll-tech/da-codec v0.9.0 github.com/scroll-tech/da-codec v0.10.0
github.com/scroll-tech/go-ethereum v1.10.14-0.20251113125950-906b730d541d github.com/scroll-tech/go-ethereum v1.10.14-0.20251128092113-8629f088d78f
github.com/stretchr/testify v1.10.0 github.com/stretchr/testify v1.10.0
github.com/urfave/cli/v2 v2.25.7 github.com/urfave/cli/v2 v2.25.7
golang.org/x/sync v0.11.0 golang.org/x/sync v0.11.0
@@ -21,7 +21,7 @@ require (
// Hotfix for header hash incompatibility issue. // Hotfix for header hash incompatibility issue.
// PR: https://github.com/scroll-tech/go-ethereum/pull/1133/ // PR: https://github.com/scroll-tech/go-ethereum/pull/1133/
// CAUTION: Requires careful handling. When upgrading go-ethereum, ensure this fix remains up-to-date in this branch. // CAUTION: Requires careful handling. When upgrading go-ethereum, ensure this fix remains up-to-date in this branch.
replace github.com/scroll-tech/go-ethereum => github.com/scroll-tech/go-ethereum v1.10.14-0.20251117071111-47c22325665e replace github.com/scroll-tech/go-ethereum => github.com/scroll-tech/go-ethereum v1.10.14-0.20251128092359-25d5bf6b817b
require ( require (
dario.cat/mergo v1.0.0 // indirect dario.cat/mergo v1.0.0 // indirect

View File

@@ -309,10 +309,10 @@ github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik=
github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/scroll-tech/da-codec v0.9.0 h1:UvHNdSJuVhi/j9pOH7SXkRck52/zeE8T35Fx6qTPW70= github.com/scroll-tech/da-codec v0.10.0 h1:IPHxyTyXTWPV0Q+DZ08cod2fWkhUvrfysmj/VBpB+WU=
github.com/scroll-tech/da-codec v0.9.0/go.mod h1:w+vwIvNiWdiNHVE9yIY2Klx6G4s+SQhEJVSmVG/IsEQ= github.com/scroll-tech/da-codec v0.10.0/go.mod h1:MBlIP4wCXPcUDZ/Ci2B7n/2IbVU1WBo9OTFTZ5ffE0U=
github.com/scroll-tech/go-ethereum v1.10.14-0.20251117071111-47c22325665e h1:7U1/JilCPIqJTYNgKzlfBEkRNORRwJ8+PCmdR8/XK6A= github.com/scroll-tech/go-ethereum v1.10.14-0.20251128092359-25d5bf6b817b h1:pMQKnroJoS/FeL1aOWkz7/u1iBHUP8PWjZstNuzoUGE=
github.com/scroll-tech/go-ethereum v1.10.14-0.20251117071111-47c22325665e/go.mod h1:6BVek7YliYh+YeHOSjguPw9GT9BhVBfThArxzVlpqdQ= github.com/scroll-tech/go-ethereum v1.10.14-0.20251128092359-25d5bf6b817b/go.mod h1:Aa/kD1XB+OV/7rRxMQrjcPCB4b0pKyLH0gsTrtuHi38=
github.com/scroll-tech/zktrie v0.8.4 h1:UagmnZ4Z3ITCk+aUq9NQZJNAwnWl4gSxsLb2Nl7IgRE= github.com/scroll-tech/zktrie v0.8.4 h1:UagmnZ4Z3ITCk+aUq9NQZJNAwnWl4gSxsLb2Nl7IgRE=
github.com/scroll-tech/zktrie v0.8.4/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk= github.com/scroll-tech/zktrie v0.8.4/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
github.com/segmentio/asm v1.2.0 h1:9BQrFxC+YOHJlTlHGkTrFWf59nbL3XnCoFLTwDCI7ys= github.com/segmentio/asm v1.2.0 h1:9BQrFxC+YOHJlTlHGkTrFWf59nbL3XnCoFLTwDCI7ys=

View File

@@ -0,0 +1,26 @@
# Download Go dependencies
FROM scrolltech/go-rust-builder:go-1.22.12-rust-nightly-2025-02-14 as base
WORKDIR /src
COPY go.work* ./
COPY ./rollup/go.* ./rollup/
COPY ./common/go.* ./common/
COPY ./coordinator/go.* ./coordinator/
COPY ./database/go.* ./database/
COPY ./tests/integration-test/go.* ./tests/integration-test/
COPY ./bridge-history-api/go.* ./bridge-history-api/
RUN go mod download -x
# Build coordinator proxy
FROM base as builder
COPY . .
RUN cd ./coordinator && CGO_LDFLAGS="-Wl,--no-as-needed -ldl" make coordinator_proxy && mv ./build/bin/coordinator_proxy /bin/coordinator_proxy
# Pull coordinator proxy into a second stage deploy ubuntu container
FROM ubuntu:20.04
ENV CGO_LDFLAGS="-Wl,--no-as-needed -ldl"
RUN apt update && apt install vim netcat-openbsd net-tools curl jq -y
COPY --from=builder /bin/coordinator_proxy /bin/
RUN /bin/coordinator_proxy --version
WORKDIR /app
ENTRYPOINT ["/bin/coordinator_proxy"]

View File

@@ -0,0 +1,8 @@
assets/
contracts/
docs/
l2geth/
rpc-gateway/
*target/*
permissionless-batches/conf/

View File

@@ -12,10 +12,11 @@ require (
github.com/gin-gonic/gin v1.9.1 github.com/gin-gonic/gin v1.9.1
github.com/mattn/go-colorable v0.1.13 github.com/mattn/go-colorable v0.1.13
github.com/mattn/go-isatty v0.0.20 github.com/mattn/go-isatty v0.0.20
github.com/mitchellh/mapstructure v1.5.0
github.com/modern-go/reflect2 v1.0.2 github.com/modern-go/reflect2 v1.0.2
github.com/orcaman/concurrent-map v1.0.0 github.com/orcaman/concurrent-map v1.0.0
github.com/prometheus/client_golang v1.19.0 github.com/prometheus/client_golang v1.19.0
github.com/scroll-tech/go-ethereum v1.10.14-0.20251117065849-b5c3dd1aa975 github.com/scroll-tech/go-ethereum v1.10.14-0.20251128092113-8629f088d78f
github.com/stretchr/testify v1.10.0 github.com/stretchr/testify v1.10.0
github.com/testcontainers/testcontainers-go v0.30.0 github.com/testcontainers/testcontainers-go v0.30.0
github.com/testcontainers/testcontainers-go/modules/compose v0.30.0 github.com/testcontainers/testcontainers-go/modules/compose v0.30.0
@@ -147,7 +148,6 @@ require (
github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b // indirect github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b // indirect
github.com/miekg/pkcs11 v1.1.1 // indirect github.com/miekg/pkcs11 v1.1.1 // indirect
github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/mitchellh/pointerstructure v1.2.0 // indirect github.com/mitchellh/pointerstructure v1.2.0 // indirect
github.com/mitchellh/reflectwalk v1.0.2 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect
github.com/mmcloughlin/addchain v0.4.0 // indirect github.com/mmcloughlin/addchain v0.4.0 // indirect
@@ -184,7 +184,7 @@ require (
github.com/rjeczalik/notify v0.9.1 // indirect github.com/rjeczalik/notify v0.9.1 // indirect
github.com/rs/cors v1.7.0 // indirect github.com/rs/cors v1.7.0 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/scroll-tech/da-codec v0.9.0 // indirect github.com/scroll-tech/da-codec v0.10.0 // indirect
github.com/scroll-tech/zktrie v0.8.4 // indirect github.com/scroll-tech/zktrie v0.8.4 // indirect
github.com/secure-systems-lab/go-securesystemslib v0.4.0 // indirect github.com/secure-systems-lab/go-securesystemslib v0.4.0 // indirect
github.com/serialx/hashring v0.0.0-20190422032157-8b2912629002 // indirect github.com/serialx/hashring v0.0.0-20190422032157-8b2912629002 // indirect

View File

@@ -636,10 +636,10 @@ github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik=
github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/scroll-tech/da-codec v0.9.0 h1:UvHNdSJuVhi/j9pOH7SXkRck52/zeE8T35Fx6qTPW70= github.com/scroll-tech/da-codec v0.10.0 h1:IPHxyTyXTWPV0Q+DZ08cod2fWkhUvrfysmj/VBpB+WU=
github.com/scroll-tech/da-codec v0.9.0/go.mod h1:w+vwIvNiWdiNHVE9yIY2Klx6G4s+SQhEJVSmVG/IsEQ= github.com/scroll-tech/da-codec v0.10.0/go.mod h1:MBlIP4wCXPcUDZ/Ci2B7n/2IbVU1WBo9OTFTZ5ffE0U=
github.com/scroll-tech/go-ethereum v1.10.14-0.20251117065849-b5c3dd1aa975 h1:KluZffkRRJ4K9UyvH/r2g8Lp16/NSK8j26MR33hHmoQ= github.com/scroll-tech/go-ethereum v1.10.14-0.20251128092113-8629f088d78f h1:j6SjP98MoWFFX9TwB1/nFYEkayqHQsrtE66Ll2C+oT0=
github.com/scroll-tech/go-ethereum v1.10.14-0.20251117065849-b5c3dd1aa975/go.mod h1:6BVek7YliYh+YeHOSjguPw9GT9BhVBfThArxzVlpqdQ= github.com/scroll-tech/go-ethereum v1.10.14-0.20251128092113-8629f088d78f/go.mod h1:Aa/kD1XB+OV/7rRxMQrjcPCB4b0pKyLH0gsTrtuHi38=
github.com/scroll-tech/zktrie v0.8.4 h1:UagmnZ4Z3ITCk+aUq9NQZJNAwnWl4gSxsLb2Nl7IgRE= github.com/scroll-tech/zktrie v0.8.4 h1:UagmnZ4Z3ITCk+aUq9NQZJNAwnWl4gSxsLb2Nl7IgRE=
github.com/scroll-tech/zktrie v0.8.4/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk= github.com/scroll-tech/zktrie v0.8.4/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
github.com/secure-systems-lab/go-securesystemslib v0.4.0 h1:b23VGrQhTA8cN2CbBw7/FulN9fTtqYUdS5+Oxzt+DUE= github.com/secure-systems-lab/go-securesystemslib v0.4.0 h1:b23VGrQhTA8cN2CbBw7/FulN9fTtqYUdS5+Oxzt+DUE=

View File

@@ -4,6 +4,7 @@ import (
"net/http" "net/http"
"github.com/gin-gonic/gin" "github.com/gin-gonic/gin"
"github.com/mitchellh/mapstructure"
) )
// Response the response schema // Response the response schema
@@ -13,6 +14,19 @@ type Response struct {
Data interface{} `json:"data"` Data interface{} `json:"data"`
} }
func (resp *Response) DecodeData(out interface{}) error {
// Decode generically unmarshaled JSON (map[string]any, []any) into a typed struct
// honoring `json` tags and allowing weak type conversions.
dec, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{
TagName: "json",
Result: out,
})
if err != nil {
return err
}
return dec.Decode(resp.Data)
}
// RenderJSON renders response with json // RenderJSON renders response with json
func RenderJSON(ctx *gin.Context, errCode int, err error, data interface{}) { func RenderJSON(ctx *gin.Context, errCode int, err error, data interface{}) {
var errMsg string var errMsg string

View File

@@ -5,7 +5,7 @@ import (
"runtime/debug" "runtime/debug"
) )
var tag = "v4.7.7" var tag = "v4.7.10"
var commit = func() string { var commit = func() string {
if info, ok := debug.ReadBuildInfo(); ok { if info, ok := debug.ReadBuildInfo(); ok {

View File

@@ -34,6 +34,10 @@ coordinator_cron:
coordinator_tool: coordinator_tool:
go build -ldflags "-X scroll-tech/common/version.ZkVersion=${ZK_VERSION}" -o $(PWD)/build/bin/coordinator_tool ./cmd/tool go build -ldflags "-X scroll-tech/common/version.ZkVersion=${ZK_VERSION}" -o $(PWD)/build/bin/coordinator_tool ./cmd/tool
coordinator_proxy:
go build -ldflags "-X scroll-tech/common/version.ZkVersion=${ZK_VERSION}" -tags="mock_prover mock_verifier" -o $(PWD)/build/bin/coordinator_proxy ./cmd/proxy
localsetup: coordinator_api ## Local setup: build coordinator_api, copy config, and setup releases localsetup: coordinator_api ## Local setup: build coordinator_api, copy config, and setup releases
mkdir -p build/bin/conf mkdir -p build/bin/conf
@echo "Copying configuration files..." @echo "Copying configuration files..."

View File

@@ -7,7 +7,7 @@ if [ -z "${SCROLL_ZKVM_VERSION}" ]; then
fi fi
# default fork name from env or "galileo" # default fork name from env or "galileo"
SCROLL_FORK_NAME="${SCROLL_FORK_NAME:-galileo}" SCROLL_FORK_NAME="${SCROLL_FORK_NAME:-galileov2}"
# set ASSET_DIR by reading from config.json # set ASSET_DIR by reading from config.json
CONFIG_FILE="bin/conf/config.template.json" CONFIG_FILE="bin/conf/config.template.json"

View File

@@ -0,0 +1,122 @@
package app
import (
"context"
"errors"
"fmt"
"net/http"
"os"
"os/signal"
"time"
"github.com/gin-gonic/gin"
"github.com/prometheus/client_golang/prometheus"
"github.com/scroll-tech/go-ethereum/log"
"github.com/urfave/cli/v2"
"gorm.io/gorm"
"scroll-tech/common/database"
"scroll-tech/common/observability"
"scroll-tech/common/utils"
"scroll-tech/common/version"
"scroll-tech/coordinator/internal/config"
"scroll-tech/coordinator/internal/controller/proxy"
"scroll-tech/coordinator/internal/route"
)
var app *cli.App
func init() {
// Set up coordinator app info.
app = cli.NewApp()
app.Action = action
app.Name = "coordinator proxy"
app.Usage = "Proxy for multiple Scroll L2 Coordinators"
app.Version = version.Version
app.Flags = append(app.Flags, utils.CommonFlags...)
app.Flags = append(app.Flags, apiFlags...)
app.Before = func(ctx *cli.Context) error {
return utils.LogSetup(ctx)
}
// Register `coordinator-test` app for integration-test.
utils.RegisterSimulation(app, utils.CoordinatorAPIApp)
}
func action(ctx *cli.Context) error {
cfgFile := ctx.String(utils.ConfigFileFlag.Name)
cfg, err := config.NewProxyConfig(cfgFile)
if err != nil {
log.Crit("failed to load config file", "config file", cfgFile, "error", err)
}
var db *gorm.DB
if dbCfg := cfg.ProxyManager.DB; dbCfg != nil {
log.Info("Apply persistent storage", "via", cfg.ProxyManager.DB.DSN)
db, err = database.InitDB(cfg.ProxyManager.DB)
if err != nil {
log.Crit("failed to init db connection", "err", err)
}
defer func() {
if err = database.CloseDB(db); err != nil {
log.Error("can not close db connection", "error", err)
}
}()
observability.Server(ctx, db)
}
registry := prometheus.DefaultRegisterer
apiSrv := server(ctx, cfg, db, registry)
log.Info(
"Start coordinator api successfully.",
"version", version.Version,
)
// Catch CTRL-C to ensure a graceful shutdown.
interrupt := make(chan os.Signal, 1)
signal.Notify(interrupt, os.Interrupt)
// Wait until the interrupt signal is received from an OS signal.
<-interrupt
log.Info("start shutdown coordinator proxy server ...")
closeCtx, cancelExit := context.WithTimeout(context.Background(), 5*time.Second)
defer cancelExit()
if err = apiSrv.Shutdown(closeCtx); err != nil {
log.Warn("shutdown coordinator proxy server failure", "error", err)
return nil
}
<-closeCtx.Done()
log.Info("coordinator proxy server exiting success")
return nil
}
func server(ctx *cli.Context, cfg *config.ProxyConfig, db *gorm.DB, reg prometheus.Registerer) *http.Server {
router := gin.New()
proxy.InitController(cfg, db, reg)
route.ProxyRoute(router, cfg, reg)
port := ctx.String(httpPortFlag.Name)
srv := &http.Server{
Addr: fmt.Sprintf(":%s", port),
Handler: router,
ReadHeaderTimeout: time.Minute,
}
go func() {
if runServerErr := srv.ListenAndServe(); runServerErr != nil && !errors.Is(runServerErr, http.ErrServerClosed) {
log.Crit("run coordinator proxy http server failure", "error", runServerErr)
}
}()
return srv
}
// Run coordinator.
func Run() {
// RunApp the coordinator.
if err := app.Run(os.Args); err != nil {
_, _ = fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
}

View File

@@ -0,0 +1,30 @@
package app
import "github.com/urfave/cli/v2"
var (
apiFlags = []cli.Flag{
// http flags
&httpEnabledFlag,
&httpListenAddrFlag,
&httpPortFlag,
}
// httpEnabledFlag enable rpc server.
httpEnabledFlag = cli.BoolFlag{
Name: "http",
Usage: "Enable the HTTP-RPC server",
Value: false,
}
// httpListenAddrFlag set the http address.
httpListenAddrFlag = cli.StringFlag{
Name: "http.addr",
Usage: "HTTP-RPC server listening interface",
Value: "localhost",
}
// httpPortFlag set http.port.
httpPortFlag = cli.IntFlag{
Name: "http.port",
Usage: "HTTP-RPC server listening port",
Value: 8590,
}
)

View File

@@ -0,0 +1,7 @@
package main
import "scroll-tech/coordinator/cmd/proxy/app"
func main() {
app.Run()
}

View File

@@ -17,6 +17,10 @@
{ {
"assets_path": "assets", "assets_path": "assets",
"fork_name": "galileo" "fork_name": "galileo"
},
{
"assets_path": "assets_v2",
"fork_name": "galileoV2"
} }
] ]
} }

View File

@@ -0,0 +1,31 @@
{
"proxy_manager": {
"proxy_cli": {
"proxy_name": "proxy_name",
"secret": "client private key"
},
"auth": {
"secret": "proxy secret key",
"challenge_expire_duration_sec": 3600,
"login_expire_duration_sec": 3600
},
"verifier": {
"min_prover_version": "v4.4.45",
"verifiers": []
},
"db": {
"driver_name": "postgres",
"dsn": "postgres://localhost/scroll?sslmode=disable",
"maxOpenNum": 200,
"maxIdleNum": 20
}
},
"coordinators": {
"sepolia": {
"base_url": "http://localhost:8555",
"retry_count": 10,
"retry_wait_time_sec": 10,
"connection_timeout_sec": 30
}
}
}

View File

@@ -9,8 +9,8 @@ require (
github.com/google/uuid v1.6.0 github.com/google/uuid v1.6.0
github.com/mitchellh/mapstructure v1.5.0 github.com/mitchellh/mapstructure v1.5.0
github.com/prometheus/client_golang v1.19.0 github.com/prometheus/client_golang v1.19.0
github.com/scroll-tech/da-codec v0.9.0 github.com/scroll-tech/da-codec v0.10.0
github.com/scroll-tech/go-ethereum v1.10.14-0.20251117065849-b5c3dd1aa975 github.com/scroll-tech/go-ethereum v1.10.14-0.20251128092113-8629f088d78f
github.com/shopspring/decimal v1.3.1 github.com/shopspring/decimal v1.3.1
github.com/stretchr/testify v1.10.0 github.com/stretchr/testify v1.10.0
github.com/urfave/cli/v2 v2.25.7 github.com/urfave/cli/v2 v2.25.7

View File

@@ -253,10 +253,10 @@ github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik=
github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/scroll-tech/da-codec v0.9.0 h1:UvHNdSJuVhi/j9pOH7SXkRck52/zeE8T35Fx6qTPW70= github.com/scroll-tech/da-codec v0.10.0 h1:IPHxyTyXTWPV0Q+DZ08cod2fWkhUvrfysmj/VBpB+WU=
github.com/scroll-tech/da-codec v0.9.0/go.mod h1:w+vwIvNiWdiNHVE9yIY2Klx6G4s+SQhEJVSmVG/IsEQ= github.com/scroll-tech/da-codec v0.10.0/go.mod h1:MBlIP4wCXPcUDZ/Ci2B7n/2IbVU1WBo9OTFTZ5ffE0U=
github.com/scroll-tech/go-ethereum v1.10.14-0.20251117065849-b5c3dd1aa975 h1:KluZffkRRJ4K9UyvH/r2g8Lp16/NSK8j26MR33hHmoQ= github.com/scroll-tech/go-ethereum v1.10.14-0.20251128092113-8629f088d78f h1:j6SjP98MoWFFX9TwB1/nFYEkayqHQsrtE66Ll2C+oT0=
github.com/scroll-tech/go-ethereum v1.10.14-0.20251117065849-b5c3dd1aa975/go.mod h1:6BVek7YliYh+YeHOSjguPw9GT9BhVBfThArxzVlpqdQ= github.com/scroll-tech/go-ethereum v1.10.14-0.20251128092113-8629f088d78f/go.mod h1:Aa/kD1XB+OV/7rRxMQrjcPCB4b0pKyLH0gsTrtuHi38=
github.com/scroll-tech/zktrie v0.8.4 h1:UagmnZ4Z3ITCk+aUq9NQZJNAwnWl4gSxsLb2Nl7IgRE= github.com/scroll-tech/zktrie v0.8.4 h1:UagmnZ4Z3ITCk+aUq9NQZJNAwnWl4gSxsLb2Nl7IgRE=
github.com/scroll-tech/zktrie v0.8.4/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk= github.com/scroll-tech/zktrie v0.8.4/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKlUeu/erjjvaPEYiI= github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKlUeu/erjjvaPEYiI=

View File

@@ -0,0 +1,74 @@
package config
import (
"encoding/json"
"os"
"path/filepath"
"scroll-tech/common/database"
"scroll-tech/common/utils"
)
// Proxy loads proxy configuration items.
type ProxyManager struct {
// Zk verifier config help to confine the connected prover.
Verifier *VerifierConfig `json:"verifier"`
Client *ProxyClient `json:"proxy_cli"`
Auth *Auth `json:"auth"`
DB *database.Config `json:"db,omitempty"`
}
func (m *ProxyManager) Normalize() {
if m.Client.Secret == "" {
m.Client.Secret = m.Auth.Secret
}
if m.Client.ProxyVersion == "" {
m.Client.ProxyVersion = m.Verifier.MinProverVersion
}
}
// Proxy client configuration for connect to upstream as a client
type ProxyClient struct {
ProxyName string `json:"proxy_name"`
ProxyVersion string `json:"proxy_version,omitempty"`
Secret string `json:"secret,omitempty"`
}
// Coordinator configuration
type UpStream struct {
BaseUrl string `json:"base_url"`
RetryCount uint `json:"retry_count"`
RetryWaitTime uint `json:"retry_wait_time_sec"`
ConnectionTimeoutSec uint `json:"connection_timeout_sec"`
CompatibileMode bool `json:"compatible_mode,omitempty"`
}
// Config load configuration items.
type ProxyConfig struct {
ProxyManager *ProxyManager `json:"proxy_manager"`
ProxyName string `json:"proxy_name"`
Coordinators map[string]*UpStream `json:"coordinators"`
}
// NewConfig returns a new instance of Config.
func NewProxyConfig(file string) (*ProxyConfig, error) {
buf, err := os.ReadFile(filepath.Clean(file))
if err != nil {
return nil, err
}
cfg := &ProxyConfig{}
err = json.Unmarshal(buf, cfg)
if err != nil {
return nil, err
}
// Override config with environment variables
err = utils.OverrideConfigWithEnv(cfg, "SCROLL_COORDINATOR_PROXY")
if err != nil {
return nil, err
}
return cfg, nil
}

View File

@@ -19,28 +19,56 @@ type AuthController struct {
loginLogic *auth.LoginLogic loginLogic *auth.LoginLogic
} }
// NewAuthController returns an LoginController instance func NewAuthControllerWithLogic(loginLogic *auth.LoginLogic) *AuthController {
func NewAuthController(db *gorm.DB, cfg *config.Config, vf *verifier.Verifier) *AuthController {
return &AuthController{ return &AuthController{
loginLogic: auth.NewLoginLogic(db, cfg, vf), loginLogic: loginLogic,
} }
} }
// Login the api controller for login // NewAuthController returns an LoginController instance
func NewAuthController(db *gorm.DB, cfg *config.Config, vf *verifier.Verifier) *AuthController {
return &AuthController{
loginLogic: auth.NewLoginLogic(db, cfg.ProverManager.Verifier, vf),
}
}
// Login the api controller for login, used as the Authenticator in JWT
// It can work in two mode: full process for normal login, or if login request
// is posted from proxy, run a simpler process to login a client
func (a *AuthController) Login(c *gin.Context) (interface{}, error) { func (a *AuthController) Login(c *gin.Context) (interface{}, error) {
// check if the login is post by proxy
var viaProxy bool
if proverType, proverTypeExist := c.Get(types.ProverProviderTypeKey); proverTypeExist {
proverType := uint8(proverType.(float64))
viaProxy = proverType == types.ProverProviderTypeProxy
}
var login types.LoginParameter var login types.LoginParameter
if err := c.ShouldBind(&login); err != nil { if err := c.ShouldBind(&login); err != nil {
return "", fmt.Errorf("missing the public_key, err:%w", err) return "", fmt.Errorf("missing the public_key, err:%w", err)
} }
// check login parameter's token is equal to bearer token, the Authorization must be existed // if not, process with normal login
// if not exist, the jwt token will intercept it if !viaProxy {
brearToken := c.GetHeader("Authorization") // check login parameter's token is equal to bearer token, the Authorization must be existed
if brearToken != "Bearer "+login.Message.Challenge { // if not exist, the jwt token will intercept it
return "", errors.New("check challenge failure for the not equal challenge string") brearToken := c.GetHeader("Authorization")
if brearToken != "Bearer "+login.Message.Challenge {
return "", errors.New("check challenge failure for the not equal challenge string")
}
if err := auth.VerifyMsg(&login); err != nil {
return "", err
}
// check the challenge is used, if used, return failure
if err := a.loginLogic.InsertChallengeString(c, login.Message.Challenge); err != nil {
return "", fmt.Errorf("login insert challenge string failure:%w", err)
}
} }
if err := a.loginLogic.Check(&login); err != nil { if err := a.loginLogic.CompatiblityCheck(&login); err != nil {
return "", fmt.Errorf("check the login parameter failure: %w", err) return "", fmt.Errorf("check the login parameter failure: %w", err)
} }
@@ -49,11 +77,6 @@ func (a *AuthController) Login(c *gin.Context) (interface{}, error) {
return "", fmt.Errorf("prover hard fork name failure:%w", err) return "", fmt.Errorf("prover hard fork name failure:%w", err)
} }
// check the challenge is used, if used, return failure
if err := a.loginLogic.InsertChallengeString(c, login.Message.Challenge); err != nil {
return "", fmt.Errorf("login insert challenge string failure:%w", err)
}
returnData := types.LoginParameterWithHardForkName{ returnData := types.LoginParameterWithHardForkName{
HardForkName: hardForkNames, HardForkName: hardForkNames,
LoginParameter: login, LoginParameter: login,
@@ -85,10 +108,6 @@ func (a *AuthController) IdentityHandler(c *gin.Context) interface{} {
c.Set(types.ProverName, proverName) c.Set(types.ProverName, proverName)
} }
if publicKey, ok := claims[types.PublicKey]; ok {
c.Set(types.PublicKey, publicKey)
}
if proverVersion, ok := claims[types.ProverVersion]; ok { if proverVersion, ok := claims[types.ProverVersion]; ok {
c.Set(types.ProverVersion, proverVersion) c.Set(types.ProverVersion, proverVersion)
} }
@@ -101,5 +120,9 @@ func (a *AuthController) IdentityHandler(c *gin.Context) interface{} {
c.Set(types.ProverProviderTypeKey, providerType) c.Set(types.ProverProviderTypeKey, providerType)
} }
if publicKey, ok := claims[types.PublicKey]; ok {
return publicKey
}
return nil return nil
} }

View File

@@ -0,0 +1,150 @@
package proxy
import (
"context"
"fmt"
"sync"
"time"
jwt "github.com/appleboy/gin-jwt/v2"
"github.com/gin-gonic/gin"
"github.com/scroll-tech/go-ethereum/log"
"scroll-tech/coordinator/internal/config"
"scroll-tech/coordinator/internal/controller/api"
"scroll-tech/coordinator/internal/logic/auth"
"scroll-tech/coordinator/internal/logic/verifier"
"scroll-tech/coordinator/internal/types"
)
// AuthController is login API
type AuthController struct {
apiLogin *api.AuthController
clients Clients
proverMgr *ProverManager
}
const upstreamConnTimeout = time.Second * 5
const LoginParamCache = "login_param"
const ProverTypesKey = "prover_types"
const SignatureKey = "prover_signature"
// NewAuthController returns an LoginController instance
func NewAuthController(cfg *config.ProxyConfig, clients Clients, proverMgr *ProverManager) *AuthController {
// use a dummy Verifier to create login logic (we do not use any information in verifier)
dummyVf := verifier.Verifier{
OpenVMVkMap: make(map[string]struct{}),
}
loginLogic := auth.NewLoginLogicWithSimpleDeduplicator(cfg.ProxyManager.Verifier, &dummyVf)
authController := &AuthController{
apiLogin: api.NewAuthControllerWithLogic(loginLogic),
clients: clients,
proverMgr: proverMgr,
}
return authController
}
// Login extended the Login hander in api controller
func (a *AuthController) Login(c *gin.Context) (interface{}, error) {
loginRes, err := a.apiLogin.Login(c)
if err != nil {
return nil, err
}
loginParam := loginRes.(types.LoginParameterWithHardForkName)
if loginParam.LoginParameter.Message.ProverProviderType == types.ProverProviderTypeProxy {
return nil, fmt.Errorf("proxy do not support recursive login")
}
session := a.proverMgr.GetOrCreate(loginParam.PublicKey)
log.Debug("start handling login", "cli", loginParam.Message.ProverName)
loginCtx, cf := context.WithTimeout(context.Background(), upstreamConnTimeout)
var wg sync.WaitGroup
for _, cli := range a.clients {
wg.Add(1)
go func(cli Client) {
defer wg.Done()
if err := session.ProxyLogin(loginCtx, cli, &loginParam.LoginParameter); err != nil {
log.Error("proxy login failed during token cache update",
"userKey", loginParam.PublicKey,
"upstream", cli.Name(),
"error", err)
}
}(cli)
}
go func(cliName string) {
wg.Wait()
cf()
log.Debug("first login attempt has completed", "cli", cliName)
}(loginParam.Message.ProverName)
return loginParam.LoginParameter, nil
}
// PayloadFunc returns jwt.MapClaims with {public key, prover name}.
func (a *AuthController) PayloadFunc(data interface{}) jwt.MapClaims {
v, ok := data.(types.LoginParameter)
if !ok {
log.Error("PayloadFunc received unexpected type", "type", fmt.Sprintf("%T", data))
return jwt.MapClaims{}
}
return jwt.MapClaims{
types.PublicKey: v.PublicKey,
types.ProverName: v.Message.ProverName,
types.ProverVersion: v.Message.ProverVersion,
types.ProverProviderTypeKey: v.Message.ProverProviderType,
SignatureKey: v.Signature,
ProverTypesKey: v.Message.ProverTypes,
}
}
// IdentityHandler replies to client for /login
func (a *AuthController) IdentityHandler(c *gin.Context) interface{} {
claims := jwt.ExtractClaims(c)
loginParam := &types.LoginParameter{}
if proverName, ok := claims[types.ProverName]; ok {
loginParam.Message.ProverName, _ = proverName.(string)
}
if proverVersion, ok := claims[types.ProverVersion]; ok {
loginParam.Message.ProverVersion, _ = proverVersion.(string)
}
if providerType, ok := claims[types.ProverProviderTypeKey]; ok {
num, _ := providerType.(float64)
loginParam.Message.ProverProviderType = types.ProverProviderType(num)
}
if signature, ok := claims[SignatureKey]; ok {
loginParam.Signature, _ = signature.(string)
}
if proverTypes, ok := claims[ProverTypesKey]; ok {
arr, _ := proverTypes.([]any)
for _, elm := range arr {
num, _ := elm.(float64)
loginParam.Message.ProverTypes = append(loginParam.Message.ProverTypes, types.ProverType(num))
}
}
if publicKey, ok := claims[types.PublicKey]; ok {
loginParam.PublicKey, _ = publicKey.(string)
}
if loginParam.PublicKey != "" {
c.Set(LoginParamCache, loginParam)
c.Set(types.ProverName, loginParam.Message.ProverName)
// publickey will also be set since we have specified public_key as identical key
return loginParam.PublicKey
}
return nil
}

View File

@@ -0,0 +1,246 @@
//nolint:errcheck,bodyclose // body is closed in the following handleHttpResp call
package proxy
import (
"bytes"
"context"
"encoding/json"
"fmt"
"net/http"
"time"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/crypto"
ctypes "scroll-tech/common/types"
"scroll-tech/coordinator/internal/config"
"scroll-tech/coordinator/internal/types"
)
type ProxyCli interface {
Login(ctx context.Context, genLogin func(string) (*types.LoginParameter, error)) (*ctypes.Response, error)
ProxyLogin(ctx context.Context, param *types.LoginParameter) (*ctypes.Response, error)
Token() string
Reset()
}
type ProverCli interface {
GetTask(ctx context.Context, param *types.GetTaskParameter) (*ctypes.Response, error)
SubmitProof(ctx context.Context, param *types.SubmitProofParameter) (*ctypes.Response, error)
}
// Client wraps an http client with a preset host for coordinator API calls
type upClient struct {
httpClient *http.Client
baseURL string
loginToken string
compatibileMode bool
resetFromMgr func()
}
// NewClient creates a new Client with the specified host
func newUpClient(cfg *config.UpStream) *upClient {
return &upClient{
httpClient: &http.Client{
Timeout: time.Duration(cfg.ConnectionTimeoutSec) * time.Second,
},
baseURL: cfg.BaseUrl,
compatibileMode: cfg.CompatibileMode,
}
}
func (c *upClient) Reset() {
if c.resetFromMgr != nil {
c.resetFromMgr()
}
}
func (c *upClient) Token() string {
return c.loginToken
}
// need a parsable schema definition
type loginSchema struct {
Time string `json:"time"`
Token string `json:"token"`
}
// Login performs the complete login process: get challenge then login
func (c *upClient) Login(ctx context.Context, genLogin func(string) (*types.LoginParameter, error)) (*ctypes.Response, error) {
// Step 1: Get challenge
url := fmt.Sprintf("%s/coordinator/v1/challenge", c.baseURL)
req, err := http.NewRequest("GET", url, nil)
if err != nil {
return nil, fmt.Errorf("failed to create challenge request: %w", err)
}
challengeResp, err := c.httpClient.Do(req)
if err != nil {
return nil, fmt.Errorf("failed to get challenge: %w", err)
}
parsedResp, err := handleHttpResp(challengeResp)
if err != nil {
return nil, err
} else if parsedResp.ErrCode != 0 {
return nil, fmt.Errorf("challenge failed: %d (%s)", parsedResp.ErrCode, parsedResp.ErrMsg)
}
// Ste p2: Parse challenge response
var challengeSchema loginSchema
if err := parsedResp.DecodeData(&challengeSchema); err != nil {
return nil, fmt.Errorf("failed to parse challenge response: %w", err)
}
// Step 3: Use the token from challenge as Bearer token for login
url = fmt.Sprintf("%s/coordinator/v1/login", c.baseURL)
param, err := genLogin(challengeSchema.Token)
if err != nil {
return nil, fmt.Errorf("failed to setup login parameter: %w", err)
}
jsonData, err := json.Marshal(param)
if err != nil {
return nil, fmt.Errorf("failed to marshal login parameter: %w", err)
}
req, err = http.NewRequest("POST", url, bytes.NewBuffer(jsonData))
if err != nil {
return nil, fmt.Errorf("failed to create login request: %w", err)
}
req.Header.Set("Content-Type", "application/json")
req.Header.Set("Authorization", "Bearer "+challengeSchema.Token)
loginResp, err := c.httpClient.Do(req)
if err != nil {
return nil, fmt.Errorf("failed to perform login request: %w", err)
}
return handleHttpResp(loginResp)
}
func handleHttpResp(resp *http.Response) (*ctypes.Response, error) {
if resp.StatusCode == http.StatusOK || resp.StatusCode == http.StatusUnauthorized {
defer resp.Body.Close()
var respWithData ctypes.Response
// Note: Body is consumed after decoding, caller should not read it again
if err := json.NewDecoder(resp.Body).Decode(&respWithData); err == nil {
return &respWithData, nil
} else {
return nil, fmt.Errorf("login parsing expected response failed: %v", err)
}
}
return nil, fmt.Errorf("login request failed with status: %d", resp.StatusCode)
}
func (c *upClient) proxyLoginCompatibleMode(ctx context.Context, param *types.LoginParameter) (*ctypes.Response, error) {
mimePrivK, err := buildPrivateKey([]byte(param.PublicKey))
if err != nil {
return nil, err
}
mimePkHex := common.Bytes2Hex(crypto.CompressPubkey(&mimePrivK.PublicKey))
genLoginParam := func(challenge string) (*types.LoginParameter, error) {
// Create login parameter with proxy settings
loginParam := &types.LoginParameter{
Message: param.Message,
PublicKey: mimePkHex,
}
loginParam.Message.Challenge = challenge
// Sign the message with the private key
if err := loginParam.SignWithKey(mimePrivK); err != nil {
return nil, fmt.Errorf("failed to sign login parameter: %w", err)
}
return loginParam, nil
}
return c.Login(ctx, genLoginParam)
}
// ProxyLogin makes a POST request to /v1/proxy_login with LoginParameter
func (c *upClient) ProxyLogin(ctx context.Context, param *types.LoginParameter) (*ctypes.Response, error) {
if c.compatibileMode {
return c.proxyLoginCompatibleMode(ctx, param)
}
url := fmt.Sprintf("%s/coordinator/v1/proxy_login", c.baseURL)
jsonData, err := json.Marshal(param)
if err != nil {
return nil, fmt.Errorf("failed to marshal proxy login parameter: %w", err)
}
req, err := http.NewRequestWithContext(ctx, "POST", url, bytes.NewBuffer(jsonData))
if err != nil {
return nil, fmt.Errorf("failed to create proxy login request: %w", err)
}
req.Header.Set("Content-Type", "application/json")
req.Header.Set("Authorization", "Bearer "+c.loginToken)
proxyLoginResp, err := c.httpClient.Do(req)
if err != nil {
return nil, fmt.Errorf("failed to perform proxy login request: %w", err)
}
return handleHttpResp(proxyLoginResp)
}
// GetTask makes a POST request to /v1/get_task with GetTaskParameter
func (c *upClient) GetTask(ctx context.Context, param *types.GetTaskParameter) (*ctypes.Response, error) {
url := fmt.Sprintf("%s/coordinator/v1/get_task", c.baseURL)
jsonData, err := json.Marshal(param)
if err != nil {
return nil, fmt.Errorf("failed to marshal get task parameter: %w", err)
}
req, err := http.NewRequestWithContext(ctx, "POST", url, bytes.NewBuffer(jsonData))
if err != nil {
return nil, fmt.Errorf("failed to create get task request: %w", err)
}
req.Header.Set("Content-Type", "application/json")
if c.loginToken != "" {
req.Header.Set("Authorization", "Bearer "+c.loginToken)
}
resp, err := c.httpClient.Do(req)
if err != nil {
return nil, err
}
return handleHttpResp(resp)
}
// SubmitProof makes a POST request to /v1/submit_proof with SubmitProofParameter
func (c *upClient) SubmitProof(ctx context.Context, param *types.SubmitProofParameter) (*ctypes.Response, error) {
url := fmt.Sprintf("%s/coordinator/v1/submit_proof", c.baseURL)
jsonData, err := json.Marshal(param)
if err != nil {
return nil, fmt.Errorf("failed to marshal submit proof parameter: %w", err)
}
req, err := http.NewRequestWithContext(ctx, "POST", url, bytes.NewBuffer(jsonData))
if err != nil {
return nil, fmt.Errorf("failed to create submit proof request: %w", err)
}
req.Header.Set("Content-Type", "application/json")
if c.loginToken != "" {
req.Header.Set("Authorization", "Bearer "+c.loginToken)
}
resp, err := c.httpClient.Do(req)
if err != nil {
return nil, err
}
return handleHttpResp(resp)
}

View File

@@ -0,0 +1,220 @@
package proxy
import (
"context"
"crypto/ecdsa"
"fmt"
"sync"
"time"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/crypto"
"github.com/scroll-tech/go-ethereum/log"
"scroll-tech/common/version"
"scroll-tech/coordinator/internal/config"
"scroll-tech/coordinator/internal/types"
)
type Client interface {
// a client to access upstream coordinator with specified identity
// so prover can contact with coordinator as itself
Client(string) ProverCli
// the client to access upstream as proxy itself
ClientAsProxy(context.Context) ProxyCli
Name() string
}
type ClientManager struct {
name string
cliCfg *config.ProxyClient
cfg *config.UpStream
privKey *ecdsa.PrivateKey
cachedCli struct {
sync.RWMutex
cli *upClient
completionCtx context.Context
}
}
// transformToValidPrivateKey safely transforms arbitrary bytes into valid private key bytes
func buildPrivateKey(inputBytes []byte) (*ecdsa.PrivateKey, error) {
// Try appending bytes from 0x0 to 0x20 until we get a valid private key
for appendByte := byte(0x0); appendByte <= 0x20; appendByte++ {
// Append the byte to input
extendedBytes := append(inputBytes, appendByte)
// Calculate 256-bit hash
hash := crypto.Keccak256(extendedBytes)
// Try to create private key from hash
if k, err := crypto.ToECDSA(hash); err == nil {
return k, nil
}
}
return nil, fmt.Errorf("failed to generate valid private key from input bytes")
}
func NewClientManager(name string, cliCfg *config.ProxyClient, cfg *config.UpStream) (*ClientManager, error) {
log.Info("init client", "name", name, "upcfg", cfg.BaseUrl, "compatible mode", cfg.CompatibileMode)
privKey, err := buildPrivateKey([]byte(cliCfg.Secret))
if err != nil {
return nil, err
}
return &ClientManager{
name: name,
privKey: privKey,
cfg: cfg,
cliCfg: cliCfg,
}, nil
}
type ctxKeyType string
const loginCliKey ctxKeyType = "cli"
func (cliMgr *ClientManager) doLogin(ctx context.Context, loginCli *upClient) {
if cliMgr.cfg.CompatibileMode {
loginCli.loginToken = "dummy"
log.Info("Skip login process for compatible mode")
return
}
// Calculate wait time between 2 seconds and cfg.RetryWaitTime
minWait := 2 * time.Second
waitDuration := time.Duration(cliMgr.cfg.RetryWaitTime) * time.Second
if waitDuration < minWait {
waitDuration = minWait
}
for {
log.Info("proxy attempting login to upstream coordinator", "name", cliMgr.name)
loginResp, err := loginCli.Login(ctx, cliMgr.genLoginParam)
if err == nil && loginResp.ErrCode == 0 {
var loginResult loginSchema
err = loginResp.DecodeData(&loginResult)
if err != nil {
log.Error("login parsing data fail", "error", err)
} else {
loginCli.loginToken = loginResult.Token
log.Info("login to upstream coordinator successful", "name", cliMgr.name, "time", loginResult.Time)
// TODO: we need to parse time if we start making use of it
return
}
} else if err != nil {
log.Error("login process fail", "error", err)
} else {
log.Error("login get fail resp", "code", loginResp.ErrCode, "msg", loginResp.ErrMsg)
}
log.Info("login to upstream coordinator failed, retrying", "name", cliMgr.name, "error", err, "waitDuration", waitDuration)
timer := time.NewTimer(waitDuration)
select {
case <-ctx.Done():
timer.Stop()
return
case <-timer.C:
// Continue to next retry
}
}
}
func (cliMgr *ClientManager) Name() string {
return cliMgr.name
}
func (cliMgr *ClientManager) Client(token string) ProverCli {
loginCli := newUpClient(cliMgr.cfg)
loginCli.loginToken = token
return loginCli
}
func (cliMgr *ClientManager) ClientAsProxy(ctx context.Context) ProxyCli {
cliMgr.cachedCli.RLock()
if cliMgr.cachedCli.cli != nil {
defer cliMgr.cachedCli.RUnlock()
return cliMgr.cachedCli.cli
}
cliMgr.cachedCli.RUnlock()
cliMgr.cachedCli.Lock()
if cliMgr.cachedCli.cli != nil {
defer cliMgr.cachedCli.Unlock()
return cliMgr.cachedCli.cli
}
var completionCtx context.Context
// Check if completion context is set
if cliMgr.cachedCli.completionCtx != nil {
completionCtx = cliMgr.cachedCli.completionCtx
} else {
// Set new completion context and launch login goroutine
ctx, completionDone := context.WithCancel(context.TODO())
loginCli := newUpClient(cliMgr.cfg)
loginCli.resetFromMgr = func() {
cliMgr.cachedCli.Lock()
if cliMgr.cachedCli.cli == loginCli {
log.Info("cached client cleared", "name", cliMgr.name)
cliMgr.cachedCli.cli = nil
}
cliMgr.cachedCli.Unlock()
}
completionCtx = context.WithValue(ctx, loginCliKey, loginCli)
cliMgr.cachedCli.completionCtx = completionCtx
// Launch keep-login goroutine
go func() {
defer completionDone()
cliMgr.doLogin(context.Background(), loginCli)
cliMgr.cachedCli.Lock()
cliMgr.cachedCli.cli = loginCli
cliMgr.cachedCli.completionCtx = nil
cliMgr.cachedCli.Unlock()
}()
}
cliMgr.cachedCli.Unlock()
// Wait for completion or request cancellation
select {
case <-ctx.Done():
return nil
case <-completionCtx.Done():
cli := completionCtx.Value(loginCliKey).(*upClient)
return cli
}
}
func (cliMgr *ClientManager) genLoginParam(challenge string) (*types.LoginParameter, error) {
// Generate public key string
publicKeyHex := common.Bytes2Hex(crypto.CompressPubkey(&cliMgr.privKey.PublicKey))
// Create login parameter with proxy settings
loginParam := &types.LoginParameter{
Message: types.Message{
Challenge: challenge,
ProverName: cliMgr.cliCfg.ProxyName,
ProverVersion: version.Version,
ProverProviderType: types.ProverProviderTypeProxy,
ProverTypes: []types.ProverType{}, // Default empty
VKs: []string{}, // Default empty
},
PublicKey: publicKeyHex,
}
// Sign the message with the private key
if err := loginParam.SignWithKey(cliMgr.privKey); err != nil {
return nil, fmt.Errorf("failed to sign login parameter: %w", err)
}
return loginParam, nil
}

View File

@@ -0,0 +1,44 @@
package proxy
import (
"github.com/prometheus/client_golang/prometheus"
"gorm.io/gorm"
"scroll-tech/coordinator/internal/config"
)
var (
// GetTask the prover task controller
GetTask *GetTaskController
// SubmitProof the submit proof controller
SubmitProof *SubmitProofController
// Auth the auth controller
Auth *AuthController
)
// Clients manager a series of thread-safe clients for requesting upstream
// coordinators
type Clients map[string]Client
// InitController inits Controller with database
func InitController(cfg *config.ProxyConfig, db *gorm.DB, reg prometheus.Registerer) {
// normalize cfg
cfg.ProxyManager.Normalize()
clients := make(map[string]Client)
for nm, upCfg := range cfg.Coordinators {
cli, err := NewClientManager(nm, cfg.ProxyManager.Client, upCfg)
if err != nil {
panic("create new client fail")
}
clients[cli.Name()] = cli
}
proverManager := NewProverManagerWithPersistent(100, db)
priorityManager := NewPriorityUpstreamManagerPersistent(db)
Auth = NewAuthController(cfg, clients, proverManager)
GetTask = NewGetTaskController(cfg, clients, proverManager, priorityManager, reg)
SubmitProof = NewSubmitProofController(cfg, clients, proverManager, priorityManager, reg)
}

View File

@@ -0,0 +1,229 @@
package proxy
import (
"fmt"
"math/rand"
"sync"
"github.com/gin-gonic/gin"
"github.com/prometheus/client_golang/prometheus"
"github.com/scroll-tech/go-ethereum/log"
"gorm.io/gorm"
"scroll-tech/common/types"
"scroll-tech/coordinator/internal/config"
coordinatorType "scroll-tech/coordinator/internal/types"
)
func getSessionData(ctx *gin.Context) (string, string) {
publicKeyData, publicKeyExist := ctx.Get(coordinatorType.PublicKey)
publicKey, castOk := publicKeyData.(string)
if !publicKeyExist || !castOk {
nerr := fmt.Errorf("no public key binding: %v", publicKeyData)
log.Warn("get_task parameter fail", "error", nerr)
types.RenderFailure(ctx, types.ErrCoordinatorParameterInvalidNo, nerr)
return "", ""
}
publicNameData, publicNameExist := ctx.Get(coordinatorType.ProverName)
publicName, castOk := publicNameData.(string)
if !publicNameExist || !castOk {
log.Error("no public name binding for unknown reason, but we still forward with name = 'unknown'", "data", publicNameData)
publicName = "unknown"
}
return publicKey, publicName
}
// PriorityUpstreamManager manages priority upstream mappings with thread safety
type PriorityUpstreamManager struct {
sync.RWMutex
*proverPriorityPersist
data map[string]string
}
// NewPriorityUpstreamManager creates a new PriorityUpstreamManager
func NewPriorityUpstreamManager() *PriorityUpstreamManager {
return &PriorityUpstreamManager{
data: make(map[string]string),
}
}
// NewPriorityUpstreamManager creates a new PriorityUpstreamManager
func NewPriorityUpstreamManagerPersistent(db *gorm.DB) *PriorityUpstreamManager {
return &PriorityUpstreamManager{
data: make(map[string]string),
proverPriorityPersist: NewProverPriorityPersist(db),
}
}
// Get retrieves the priority upstream for a given key
func (p *PriorityUpstreamManager) Get(key string) (string, bool) {
p.RLock()
value, exists := p.data[key]
p.RUnlock()
if !exists {
if v, err := p.proverPriorityPersist.Get(key); err != nil {
log.Error("persistent priority record read failure", "error", err, "key", key)
} else if v != "" {
log.Debug("restore record from persistent layer", "key", key, "value", v)
return v, true
}
}
return value, exists
}
// Set sets the priority upstream for a given key
func (p *PriorityUpstreamManager) Set(key, value string) {
defer func() {
if err := p.proverPriorityPersist.Update(key, value); err != nil {
log.Error("update priority record failure", "error", err, "key", key, "value", value)
}
}()
p.Lock()
defer p.Unlock()
p.data[key] = value
}
// Delete removes the priority upstream for a given key
func (p *PriorityUpstreamManager) Delete(key string) {
defer func() {
if err := p.proverPriorityPersist.Del(key); err != nil {
log.Error("delete priority record failure", "error", err, "key", key)
}
}()
p.Lock()
defer p.Unlock()
delete(p.data, key)
}
// GetTaskController the get prover task api controller
type GetTaskController struct {
proverMgr *ProverManager
clients Clients
priorityUpstream *PriorityUpstreamManager
//workingRnd *rand.Rand
//getTaskAccessCounter *prometheus.CounterVec
}
// NewGetTaskController create a get prover task controller
func NewGetTaskController(cfg *config.ProxyConfig, clients Clients, proverMgr *ProverManager, priorityMgr *PriorityUpstreamManager, reg prometheus.Registerer) *GetTaskController {
// TODO: implement proxy get task controller initialization
return &GetTaskController{
priorityUpstream: priorityMgr,
proverMgr: proverMgr,
clients: clients,
}
}
// func (ptc *GetTaskController) incGetTaskAccessCounter(ctx *gin.Context) error {
// // TODO: implement proxy get task access counter
// return nil
// }
// GetTasks get assigned chunk/batch task
func (ptc *GetTaskController) GetTasks(ctx *gin.Context) {
var getTaskParameter coordinatorType.GetTaskParameter
if err := ctx.ShouldBind(&getTaskParameter); err != nil {
nerr := fmt.Errorf("prover task parameter invalid, err:%w", err)
types.RenderFailure(ctx, types.ErrCoordinatorParameterInvalidNo, nerr)
return
}
publicKey, proverName := getSessionData(ctx)
if publicKey == "" {
return
}
session := ptc.proverMgr.Get(publicKey)
if session == nil {
nerr := fmt.Errorf("can not get session for prover %s", proverName)
types.RenderFailure(ctx, types.InternalServerError, nerr)
return
}
getTask := func(cli Client) (error, int) {
log.Debug("Start get task", "up", cli.Name(), "cli", proverName)
upStream := cli.Name()
resp, err := session.GetTask(ctx, &getTaskParameter, cli)
if err != nil {
log.Error("Upstream error for get task", "error", err, "up", upStream, "cli", proverName)
return err, types.ErrCoordinatorGetTaskFailure
} else if resp.ErrCode != types.ErrCoordinatorEmptyProofData {
if resp.ErrCode != 0 {
// simply dispatch the error from upstream to prover
log.Error("Upstream has error resp for get task", "code", resp.ErrCode, "msg", resp.ErrMsg, "up", upStream, "cli", proverName)
return fmt.Errorf("upstream failure %s:", resp.ErrMsg), resp.ErrCode
}
var task coordinatorType.GetTaskSchema
if err = resp.DecodeData(&task); err == nil {
task.TaskID = formUpstreamWithTaskName(upStream, task.TaskID)
ptc.priorityUpstream.Set(publicKey, upStream)
log.Debug("Upstream get task", "up", upStream, "cli", proverName, "taskID", task.TaskID, "taskType", task.TaskType)
types.RenderSuccess(ctx, &task)
return nil, 0
} else {
log.Error("Upstream has wrong data for get task", "error", err, "up", upStream, "cli", proverName)
return fmt.Errorf("decode task fail: %v", err), types.InternalServerError
}
}
return nil, resp.ErrCode
}
// if the priority upstream is set, we try this upstream first until get the task resp or no task resp
priorityUpstream, exist := ptc.priorityUpstream.Get(publicKey)
if exist {
cli := ptc.clients[priorityUpstream]
log.Debug("Try get task from priority stream", "up", priorityUpstream, "cli", proverName)
if cli != nil {
err, code := getTask(cli)
if err != nil {
types.RenderFailure(ctx, code, err)
return
} else if code == 0 {
// get task done and rendered, return
return
}
// only continue if get empty task (the task has been removed in upstream)
log.Debug("can not get priority task from upstream", "up", priorityUpstream, "cli", proverName)
} else {
log.Warn("A upstream is removed or lost for some reason while running", "up", priorityUpstream, "cli", proverName)
}
}
ptc.priorityUpstream.Delete(publicKey)
// Create a slice to hold the keys
keys := make([]string, 0, len(ptc.clients))
for k := range ptc.clients {
keys = append(keys, k)
}
// Shuffle the keys using a local RNG (avoid deprecated rand.Seed)
rand.Shuffle(len(keys), func(i, j int) {
keys[i], keys[j] = keys[j], keys[i]
})
// Iterate over the shuffled keys
for _, n := range keys {
if err, code := getTask(ptc.clients[n]); err == nil && code == 0 {
// get task done
return
}
}
log.Debug("get no task from upstream", "cli", proverName)
// if all get task failed, throw empty proof resp
types.RenderFailure(ctx, types.ErrCoordinatorEmptyProofData, fmt.Errorf("get empty prover task"))
}

View File

@@ -0,0 +1,125 @@
package proxy
import (
"time"
"gorm.io/gorm"
"gorm.io/gorm/clause"
"scroll-tech/coordinator/internal/types"
)
type proverDataPersist struct {
db *gorm.DB
}
// NewProverDataPersist creates a persistence instance backed by a gorm DB.
func NewProverDataPersist(db *gorm.DB) *proverDataPersist {
return &proverDataPersist{db: db}
}
// gorm model mapping to table `prover_sessions`
type proverSessionRecord struct {
PublicKey string `gorm:"column:public_key;not null"`
Upstream string `gorm:"column:upstream;not null"`
UpToken string `gorm:"column:up_token;not null"`
Expired time.Time `gorm:"column:expired;not null"`
}
func (proverSessionRecord) TableName() string { return "prover_sessions" }
// priority_upstream model
type priorityUpstreamRecord struct {
PublicKey string `gorm:"column:public_key;not null"`
Upstream string `gorm:"column:upstream;not null"`
}
func (priorityUpstreamRecord) TableName() string { return "priority_upstream" }
// get retrieves ProverSession for a given user key, returns empty if still not exists
func (p *proverDataPersist) Get(userKey string) (*proverSession, error) {
if p == nil || p.db == nil {
return nil, nil
}
var rows []proverSessionRecord
if err := p.db.Where("public_key = ?", userKey).Find(&rows).Error; err != nil || len(rows) == 0 {
return nil, err
}
ret := &proverSession{
proverToken: make(map[string]loginToken),
}
for _, r := range rows {
ls := &types.LoginSchema{
Token: r.UpToken,
Time: r.Expired,
}
ret.proverToken[r.Upstream] = loginToken{LoginSchema: ls}
}
return ret, nil
}
func (p *proverDataPersist) Update(userKey, up string, login *types.LoginSchema) error {
if p == nil || p.db == nil || login == nil {
return nil
}
rec := proverSessionRecord{
PublicKey: userKey,
Upstream: up,
UpToken: login.Token,
Expired: login.Time,
}
return p.db.Clauses(
clause.OnConflict{
Columns: []clause.Column{{Name: "public_key"}, {Name: "upstream"}},
DoUpdates: clause.AssignmentColumns([]string{"up_token", "expired"}),
},
).Create(&rec).Error
}
type proverPriorityPersist struct {
db *gorm.DB
}
func NewProverPriorityPersist(db *gorm.DB) *proverPriorityPersist {
return &proverPriorityPersist{db: db}
}
func (p *proverPriorityPersist) Get(userKey string) (string, error) {
if p == nil || p.db == nil {
return "", nil
}
var rec priorityUpstreamRecord
if err := p.db.Where("public_key = ?", userKey).First(&rec).Error; err != nil {
if err != gorm.ErrRecordNotFound {
return "", err
} else {
return "", nil
}
}
return rec.Upstream, nil
}
func (p *proverPriorityPersist) Update(userKey, up string) error {
if p == nil || p.db == nil {
return nil
}
rec := priorityUpstreamRecord{PublicKey: userKey, Upstream: up}
return p.db.Clauses(
clause.OnConflict{
Columns: []clause.Column{{Name: "public_key"}},
DoUpdates: clause.Assignments(map[string]interface{}{"upstream": up}),
},
).Create(&rec).Error
}
func (p *proverPriorityPersist) Del(userKey string) error {
if p == nil || p.db == nil {
return nil
}
return p.db.Where("public_key = ?", userKey).Delete(&priorityUpstreamRecord{}).Error
}

View File

@@ -0,0 +1,285 @@
package proxy
import (
"context"
"fmt"
"math"
"sync"
"gorm.io/gorm"
"github.com/scroll-tech/go-ethereum/log"
ctypes "scroll-tech/common/types"
"scroll-tech/coordinator/internal/types"
)
type ProverManager struct {
sync.RWMutex
data map[string]*proverSession
willDeprecatedData map[string]*proverSession
sizeLimit int
persistent *proverDataPersist
}
func NewProverManager(size int) *ProverManager {
return &ProverManager{
data: make(map[string]*proverSession),
willDeprecatedData: make(map[string]*proverSession),
sizeLimit: size,
}
}
func NewProverManagerWithPersistent(size int, db *gorm.DB) *ProverManager {
return &ProverManager{
data: make(map[string]*proverSession),
willDeprecatedData: make(map[string]*proverSession),
sizeLimit: size,
persistent: NewProverDataPersist(db),
}
}
// get retrieves ProverSession for a given user key, returns empty if still not exists
func (m *ProverManager) Get(userKey string) (ret *proverSession) {
defer func() {
if ret == nil {
var err error
ret, err = m.persistent.Get(userKey)
if err != nil {
log.Error("Get persistent layer for prover tokens fail", "error", err)
} else if ret != nil {
log.Debug("restore record from persistent", "key", userKey, "token", ret.proverToken)
ret.persistent = m.persistent
}
}
if ret != nil {
m.Lock()
m.data[userKey] = ret
m.Unlock()
}
}()
m.RLock()
defer m.RUnlock()
if r, existed := m.data[userKey]; existed {
return r
} else {
return m.willDeprecatedData[userKey]
}
}
func (m *ProverManager) GetOrCreate(userKey string) *proverSession {
if ret := m.Get(userKey); ret != nil {
return ret
}
m.Lock()
defer m.Unlock()
ret := &proverSession{
proverToken: make(map[string]loginToken),
persistent: m.persistent,
}
if len(m.data) >= m.sizeLimit {
m.willDeprecatedData = m.data
m.data = make(map[string]*proverSession)
}
m.data[userKey] = ret
return ret
}
type loginToken struct {
*types.LoginSchema
phase uint
}
// Client wraps an http client with a preset host for coordinator API calls
type proverSession struct {
persistent *proverDataPersist
sync.RWMutex
proverToken map[string]loginToken
completionCtx context.Context
}
func (c *proverSession) maintainLogin(ctx context.Context, cliMgr Client, up string, param *types.LoginParameter, phase uint) (result loginToken, nerr error) {
c.Lock()
curPhase := c.proverToken[up].phase
if c.completionCtx != nil {
waitctx := c.completionCtx
c.Unlock()
select {
case <-waitctx.Done():
return c.maintainLogin(ctx, cliMgr, up, param, phase)
case <-ctx.Done():
nerr = fmt.Errorf("ctx fail")
return
}
}
if phase < curPhase {
// outdate login phase, give up
log.Debug("drop outdated proxy login attempt", "upstream", up, "cli", param.Message.ProverName, "phase", phase, "now", curPhase)
defer c.Unlock()
return c.proverToken[up], nil
}
// occupy the update slot
completeCtx, cf := context.WithCancel(ctx)
defer cf()
c.completionCtx = completeCtx
defer func() {
c.Lock()
c.completionCtx = nil
if result.LoginSchema != nil {
c.proverToken[up] = result
log.Info("maintain login status", "upstream", up, "cli", param.Message.ProverName, "phase", curPhase+1)
}
c.Unlock()
if nerr != nil {
log.Error("maintain login fail", "error", nerr, "upstream", up, "cli", param.Message.ProverName, "phase", curPhase)
}
}()
c.Unlock()
log.Debug("start proxy login process", "upstream", up, "cli", param.Message.ProverName)
cli := cliMgr.ClientAsProxy(ctx)
if cli == nil {
nerr = fmt.Errorf("get upstream cli fail")
return
}
resp, err := cli.ProxyLogin(ctx, param)
if err != nil {
nerr = fmt.Errorf("proxylogin fail: %v", err)
return
}
if resp.ErrCode == ctypes.ErrJWTTokenExpired {
log.Info("up stream has expired, renew upstream connection", "up", up)
cli.Reset()
cli = cliMgr.ClientAsProxy(ctx)
if cli == nil {
nerr = fmt.Errorf("get upstream cli fail (secondary try)")
return
}
// like SDK, we would try one more time if the upstream token is expired
resp, err = cli.ProxyLogin(ctx, param)
if err != nil {
nerr = fmt.Errorf("proxylogin fail: %v", err)
return
}
}
if resp.ErrCode != 0 {
nerr = fmt.Errorf("upstream fail: %d (%s)", resp.ErrCode, resp.ErrMsg)
return
}
var loginResult loginSchema
if err := resp.DecodeData(&loginResult); err != nil {
nerr = err
return
}
log.Debug("Proxy login done", "upstream", up, "cli", param.Message.ProverName)
result = loginToken{
LoginSchema: &types.LoginSchema{
Token: loginResult.Token,
},
phase: curPhase + 1,
}
return
}
// const expireTolerant = 10 * time.Minute
// ProxyLogin makes a POST request to /v1/proxy_login with LoginParameter
func (c *proverSession) ProxyLogin(ctx context.Context, cli Client, param *types.LoginParameter) error {
up := cli.Name()
c.RLock()
existedToken := c.proverToken[up]
c.RUnlock()
newtoken, err := c.maintainLogin(ctx, cli, up, param, math.MaxUint)
if newtoken.phase > existedToken.phase {
if err := c.persistent.Update(param.PublicKey, up, newtoken.LoginSchema); err != nil {
log.Error("Update persistent layer for prover tokens fail", "error", err)
}
}
return err
}
// GetTask makes a POST request to /v1/get_task with GetTaskParameter
func (c *proverSession) GetTask(ctx context.Context, param *types.GetTaskParameter, cliMgr Client) (*ctypes.Response, error) {
up := cliMgr.Name()
c.RLock()
log.Debug("call get task", "up", up, "tokens", c.proverToken)
token := c.proverToken[up]
c.RUnlock()
if token.LoginSchema != nil {
resp, err := cliMgr.Client(token.Token).GetTask(ctx, param)
if err != nil {
return nil, err
}
if resp.ErrCode != ctypes.ErrJWTTokenExpired {
return resp, nil
}
}
// like SDK, we would try one more time if the upstream token is expired
// get param from ctx
loginParam, ok := ctx.Value(LoginParamCache).(*types.LoginParameter)
if !ok {
return nil, fmt.Errorf("Unexpected error, no loginparam ctx value")
}
newToken, err := c.maintainLogin(ctx, cliMgr, up, loginParam, token.phase)
if err != nil {
return nil, fmt.Errorf("update prover token fail: %v", err)
}
return cliMgr.Client(newToken.Token).GetTask(ctx, param)
}
// SubmitProof makes a POST request to /v1/submit_proof with SubmitProofParameter
func (c *proverSession) SubmitProof(ctx context.Context, param *types.SubmitProofParameter, cliMgr Client) (*ctypes.Response, error) {
up := cliMgr.Name()
c.RLock()
token := c.proverToken[up]
c.RUnlock()
if token.LoginSchema != nil {
resp, err := cliMgr.Client(token.Token).SubmitProof(ctx, param)
if err != nil {
return nil, err
}
if resp.ErrCode != ctypes.ErrJWTTokenExpired {
return resp, nil
}
}
// like SDK, we would try one more time if the upstream token is expired
// get param from ctx
loginParam, ok := ctx.Value(LoginParamCache).(*types.LoginParameter)
if !ok {
return nil, fmt.Errorf("Unexpected error, no loginparam ctx value")
}
newToken, err := c.maintainLogin(ctx, cliMgr, up, loginParam, token.phase)
if err != nil {
return nil, fmt.Errorf("update prover token fail: %v", err)
}
return cliMgr.Client(newToken.Token).SubmitProof(ctx, param)
}

View File

@@ -0,0 +1,107 @@
package proxy
import (
"testing"
)
// TestProverManagerGetAndCreate validates basic creation and retrieval semantics.
func TestProverManagerGetAndCreate(t *testing.T) {
pm := NewProverManager(2)
if got := pm.Get("user1"); got != nil {
t.Fatalf("expected nil for non-existent key, got: %+v", got)
}
sess1 := pm.GetOrCreate("user1")
if sess1 == nil {
t.Fatalf("expected non-nil session from GetOrCreate")
}
// Should be stable on subsequent Get
if got := pm.Get("user1"); got != sess1 {
t.Fatalf("expected same session pointer on Get, got different instance: %p vs %p", got, sess1)
}
}
// TestProverManagerRolloverAndPromotion verifies rollover when sizeLimit is reached
// and that old entries are accessible and promoted back to active data map.
func TestProverManagerRolloverAndPromotion(t *testing.T) {
pm := NewProverManager(2)
s1 := pm.GetOrCreate("u1")
s2 := pm.GetOrCreate("u2")
if s1 == nil || s2 == nil {
t.Fatalf("expected sessions to be created for u1/u2")
}
// Precondition: data should contain 2 entries, no deprecated yet.
pm.RLock()
if len(pm.data) != 2 {
pm.RUnlock()
t.Fatalf("expected data len=2 before rollover, got %d", len(pm.data))
}
if len(pm.willDeprecatedData) != 0 {
pm.RUnlock()
t.Fatalf("expected willDeprecatedData len=0 before rollover, got %d", len(pm.willDeprecatedData))
}
pm.RUnlock()
// Trigger rollover by creating a third key.
s3 := pm.GetOrCreate("u3")
if s3 == nil {
t.Fatalf("expected session for u3 after rollover")
}
// After rollover: current data should only have u3, deprecated should hold u1 and u2.
pm.RLock()
if len(pm.data) != 1 {
pm.RUnlock()
t.Fatalf("expected data len=1 after rollover (only u3), got %d", len(pm.data))
}
if _, ok := pm.data["u3"]; !ok {
pm.RUnlock()
t.Fatalf("expected 'u3' to be in active data after rollover")
}
if len(pm.willDeprecatedData) != 2 {
pm.RUnlock()
t.Fatalf("expected willDeprecatedData len=2 after rollover, got %d", len(pm.willDeprecatedData))
}
pm.RUnlock()
// Accessing an old key should return the same pointer and promote it to active data map.
got1 := pm.Get("u1")
if got1 != s1 {
t.Fatalf("expected same pointer for u1 after promotion, got %p want %p", got1, s1)
}
// The promotion should add it to active data (without enforcing size limit on promotion).
pm.RLock()
if _, ok := pm.data["u1"]; !ok {
pm.RUnlock()
t.Fatalf("expected 'u1' to be present in active data after promotion")
}
if len(pm.data) != 2 {
// Now should contain u3 and u1
pm.RUnlock()
t.Fatalf("expected data len=2 after promotion of u1, got %d", len(pm.data))
}
pm.RUnlock()
// Access the other deprecated key and ensure behavior is consistent.
got2 := pm.Get("u2")
if got2 != s2 {
t.Fatalf("expected same pointer for u2 after promotion, got %p want %p", got2, s2)
}
pm.RLock()
if _, ok := pm.data["u2"]; !ok {
pm.RUnlock()
t.Fatalf("expected 'u2' to be present in active data after promotion")
}
// Note: promotion does not enforce sizeLimit, so data can grow beyond sizeLimit after promotions.
if len(pm.data) != 3 {
pm.RUnlock()
t.Fatalf("expected data len=3 after promoting both u1 and u2, got %d", len(pm.data))
}
pm.RUnlock()
}

View File

@@ -0,0 +1,94 @@
package proxy
import (
"fmt"
"strings"
"github.com/gin-gonic/gin"
"github.com/prometheus/client_golang/prometheus"
"github.com/scroll-tech/go-ethereum/log"
"scroll-tech/common/types"
"scroll-tech/coordinator/internal/config"
coordinatorType "scroll-tech/coordinator/internal/types"
)
// SubmitProofController the submit proof api controller
type SubmitProofController struct {
proverMgr *ProverManager
clients Clients
priorityUpstream *PriorityUpstreamManager
}
// NewSubmitProofController create the submit proof api controller instance
func NewSubmitProofController(cfg *config.ProxyConfig, clients Clients, proverMgr *ProverManager, priorityMgr *PriorityUpstreamManager, reg prometheus.Registerer) *SubmitProofController {
return &SubmitProofController{
proverMgr: proverMgr,
clients: clients,
priorityUpstream: priorityMgr,
}
}
func upstreamFromTaskName(taskID string) (string, string) {
parts, rest, found := strings.Cut(taskID, ":")
if found {
return parts, rest
}
return "", parts
}
func formUpstreamWithTaskName(upstream string, taskID string) string {
return fmt.Sprintf("%s:%s", upstream, taskID)
}
// SubmitProof prover submit the proof to coordinator
func (spc *SubmitProofController) SubmitProof(ctx *gin.Context) {
var submitParameter coordinatorType.SubmitProofParameter
if err := ctx.ShouldBind(&submitParameter); err != nil {
nerr := fmt.Errorf("prover submitProof parameter invalid, err:%w", err)
types.RenderFailure(ctx, types.ErrCoordinatorParameterInvalidNo, nerr)
return
}
publicKey, proverName := getSessionData(ctx)
if publicKey == "" {
return
}
session := spc.proverMgr.Get(publicKey)
if session == nil {
nerr := fmt.Errorf("can not get session for prover %s", proverName)
types.RenderFailure(ctx, types.InternalServerError, nerr)
return
}
upstream, realTaskID := upstreamFromTaskName(submitParameter.TaskID)
cli, existed := spc.clients[upstream]
if !existed {
log.Warn("A upstream for submitting is removed or lost for some reason while running", "up", upstream)
nerr := fmt.Errorf("Invalid upstream name (%s) from taskID %s", upstream, submitParameter.TaskID)
types.RenderFailure(ctx, types.ErrCoordinatorParameterInvalidNo, nerr)
return
}
log.Debug("Start submitting", "up", upstream, "cli", proverName, "id", realTaskID, "status", submitParameter.Status)
submitParameter.TaskID = realTaskID
resp, err := session.SubmitProof(ctx, &submitParameter, cli)
if err != nil {
log.Error("Upstream has error resp for submit", "error", err, "up", upstream, "cli", proverName, "taskID", realTaskID)
types.RenderFailure(ctx, types.ErrCoordinatorGetTaskFailure, err)
return
} else if resp.ErrCode != 0 {
log.Error("Upstream has error resp for get task", "code", resp.ErrCode, "msg", resp.ErrMsg, "up", upstream, "cli", proverName, "taskID", realTaskID)
// simply dispatch the error from upstream to prover
types.RenderFailure(ctx, resp.ErrCode, fmt.Errorf("%s", resp.ErrMsg))
return
} else {
log.Debug("Submit proof to upstream", "up", upstream, "cli", proverName, "taskID", realTaskID)
spc.priorityUpstream.Delete(publicKey)
types.RenderSuccess(ctx, resp.Data)
return
}
}

View File

@@ -1,6 +1,7 @@
package auth package auth
import ( import (
"context"
"errors" "errors"
"fmt" "fmt"
"strings" "strings"
@@ -19,45 +20,72 @@ import (
// LoginLogic the auth logic // LoginLogic the auth logic
type LoginLogic struct { type LoginLogic struct {
cfg *config.Config cfg *config.VerifierConfig
challengeOrm *orm.Challenge deduplicator ChallengeDeduplicator
openVmVks map[string]struct{} openVmVks map[string]struct{}
proverVersionHardForkMap map[string]string proverVersionHardForkMap map[string]string
} }
type ChallengeDeduplicator interface {
InsertChallenge(ctx context.Context, challengeString string) error
}
type SimpleDeduplicator struct {
}
func (s *SimpleDeduplicator) InsertChallenge(ctx context.Context, challengeString string) error {
return nil
}
// NewLoginLogicWithSimpleDEduplicator new a LoginLogic, do not use db to deduplicate challenge
func NewLoginLogicWithSimpleDeduplicator(vcfg *config.VerifierConfig, vf *verifier.Verifier) *LoginLogic {
return newLoginLogic(&SimpleDeduplicator{}, vcfg, vf)
}
// NewLoginLogic new a LoginLogic // NewLoginLogic new a LoginLogic
func NewLoginLogic(db *gorm.DB, cfg *config.Config, vf *verifier.Verifier) *LoginLogic { func NewLoginLogic(db *gorm.DB, vcfg *config.VerifierConfig, vf *verifier.Verifier) *LoginLogic {
return newLoginLogic(orm.NewChallenge(db), vcfg, vf)
}
func newLoginLogic(deduplicator ChallengeDeduplicator, vcfg *config.VerifierConfig, vf *verifier.Verifier) *LoginLogic {
proverVersionHardForkMap := make(map[string]string) proverVersionHardForkMap := make(map[string]string)
for _, cfg := range cfg.ProverManager.Verifier.Verifiers { for _, cfg := range vcfg.Verifiers {
proverVersionHardForkMap[cfg.ForkName] = cfg.MinProverVersion proverVersionHardForkMap[cfg.ForkName] = cfg.MinProverVersion
} }
return &LoginLogic{ return &LoginLogic{
cfg: cfg, cfg: vcfg,
openVmVks: vf.OpenVMVkMap, openVmVks: vf.OpenVMVkMap,
challengeOrm: orm.NewChallenge(db), deduplicator: deduplicator,
proverVersionHardForkMap: proverVersionHardForkMap, proverVersionHardForkMap: proverVersionHardForkMap,
} }
} }
// InsertChallengeString insert and check the challenge string is existed // Verify the completeness of login message
func (l *LoginLogic) InsertChallengeString(ctx *gin.Context, challenge string) error { func VerifyMsg(login *types.LoginParameter) error {
return l.challengeOrm.InsertChallenge(ctx.Copy(), challenge)
}
func (l *LoginLogic) Check(login *types.LoginParameter) error {
verify, err := login.Verify() verify, err := login.Verify()
if err != nil || !verify { if err != nil || !verify {
log.Error("auth message verify failure", "prover_name", login.Message.ProverName, log.Error("auth message verify failure", "prover_name", login.Message.ProverName,
"prover_version", login.Message.ProverVersion, "message", login.Message) "prover_version", login.Message.ProverVersion, "message", login.Message)
return errors.New("auth message verify failure") return errors.New("auth message verify failure")
} }
return nil
}
if !version.CheckScrollRepoVersion(login.Message.ProverVersion, l.cfg.ProverManager.Verifier.MinProverVersion) { // InsertChallengeString insert and check the challenge string is existed
return fmt.Errorf("incompatible prover version. please upgrade your prover, minimum allowed version: %s, actual version: %s", l.cfg.ProverManager.Verifier.MinProverVersion, login.Message.ProverVersion) func (l *LoginLogic) InsertChallengeString(ctx *gin.Context, challenge string) error {
return l.deduplicator.InsertChallenge(ctx.Copy(), challenge)
}
// Check if the login client is compatible with the setting in coordinator
func (l *LoginLogic) CompatiblityCheck(login *types.LoginParameter) error {
if !version.CheckScrollRepoVersion(login.Message.ProverVersion, l.cfg.MinProverVersion) {
return fmt.Errorf("incompatible prover version. please upgrade your prover, minimum allowed version: %s, actual version: %s", l.cfg.MinProverVersion, login.Message.ProverVersion)
} }
vks := make(map[string]struct{}) vks := make(map[string]struct{})
@@ -65,27 +93,32 @@ func (l *LoginLogic) Check(login *types.LoginParameter) error {
vks[vk] = struct{}{} vks[vk] = struct{}{}
} }
for _, vk := range login.Message.VKs { // new coordinator / proxy do not check vks while login, code only for backward compatibility
if _, ok := vks[vk]; !ok { if len(vks) != 0 {
log.Error("vk inconsistency", "prover vk", vk, "prover name", login.Message.ProverName, for _, vk := range login.Message.VKs {
"prover_version", login.Message.ProverVersion, "message", login.Message) if _, ok := vks[vk]; !ok {
if !version.CheckScrollProverVersion(login.Message.ProverVersion) { log.Error("vk inconsistency", "prover vk", vk, "prover name", login.Message.ProverName,
return fmt.Errorf("incompatible prover version. please upgrade your prover, expect version: %s, actual version: %s", "prover_version", login.Message.ProverVersion, "message", login.Message)
version.Version, login.Message.ProverVersion) if !version.CheckScrollProverVersion(login.Message.ProverVersion) {
return fmt.Errorf("incompatible prover version. please upgrade your prover, expect version: %s, actual version: %s",
version.Version, login.Message.ProverVersion)
}
// if the prover reports a same prover version
return errors.New("incompatible vk. please check your params files or config files")
} }
// if the prover reports a same prover version
return errors.New("incompatible vk. please check your params files or config files")
} }
} }
if login.Message.ProverProviderType != types.ProverProviderTypeInternal && login.Message.ProverProviderType != types.ProverProviderTypeExternal { switch login.Message.ProverProviderType {
case types.ProverProviderTypeInternal:
case types.ProverProviderTypeExternal:
case types.ProverProviderTypeProxy:
case types.ProverProviderTypeUndefined:
// for backward compatibility, set ProverProviderType as internal // for backward compatibility, set ProverProviderType as internal
if login.Message.ProverProviderType == types.ProverProviderTypeUndefined { login.Message.ProverProviderType = types.ProverProviderTypeInternal
login.Message.ProverProviderType = types.ProverProviderTypeInternal default:
} else { log.Error("invalid prover_provider_type", "value", login.Message.ProverProviderType, "prover name", login.Message.ProverName, "prover version", login.Message.ProverVersion)
log.Error("invalid prover_provider_type", "value", login.Message.ProverProviderType, "prover name", login.Message.ProverName, "prover version", login.Message.ProverVersion) return errors.New("invalid prover provider type.")
return errors.New("invalid prover provider type.")
}
} }
return nil return nil

View File

@@ -1,3 +1,5 @@
//go:build !mock_verifier
package libzkp package libzkp
/* /*
@@ -13,8 +15,6 @@ import (
"os" "os"
"strings" "strings"
"unsafe" "unsafe"
"scroll-tech/common/types/message"
) )
func init() { func init() {
@@ -72,31 +72,6 @@ func VerifyBundleProof(proofData, forkName string) bool {
return result != 0 return result != 0
} }
// TaskType enum values matching the Rust enum
const (
TaskTypeChunk = 0
TaskTypeBatch = 1
TaskTypeBundle = 2
)
func fromMessageTaskType(taskType int) int {
switch message.ProofType(taskType) {
case message.ProofTypeChunk:
return TaskTypeChunk
case message.ProofTypeBatch:
return TaskTypeBatch
case message.ProofTypeBundle:
return TaskTypeBundle
default:
panic(fmt.Sprintf("unsupported proof type: %d", taskType))
}
}
// Generate a universal task
func GenerateUniversalTask(taskType int, taskJSON, forkName string, expectedVk []byte, decryptionKey []byte) (bool, string, string, []byte) {
return generateUniversalTask(fromMessageTaskType(taskType), taskJSON, strings.ToLower(forkName), expectedVk, decryptionKey)
}
// Generate wrapped proof // Generate wrapped proof
func GenerateWrappedProof(proofJSON, metadata string, vkData []byte) string { func GenerateWrappedProof(proofJSON, metadata string, vkData []byte) string {
cProofJSON := goToCString(proofJSON) cProofJSON := goToCString(proofJSON)

View File

@@ -0,0 +1,57 @@
//go:build mock_verifier
package libzkp
import (
"encoding/json"
)
// // InitVerifier is a no-op in the mock.
// func InitVerifier(configJSON string) {}
// // VerifyChunkProof returns a fixed success in the mock.
// func VerifyChunkProof(proofData, forkName string) bool {
// return true
// }
// // VerifyBatchProof returns a fixed success in the mock.
// func VerifyBatchProof(proofData, forkName string) bool {
// return true
// }
// // VerifyBundleProof returns a fixed success in the mock.
// func VerifyBundleProof(proofData, forkName string) bool {
// return true
// }
func UniversalTaskCompatibilityFix(taskJSON string) (string, error) {
panic("should not run here")
}
// GenerateWrappedProof returns a fixed dummy proof string in the mock.
func GenerateWrappedProof(proofJSON, metadata string, vkData []byte) string {
payload := struct {
Metadata json.RawMessage `json:"metadata"`
Proof json.RawMessage `json:"proof"`
GitVersion string `json:"git_version"`
}{
Metadata: json.RawMessage(metadata),
Proof: json.RawMessage(proofJSON),
GitVersion: "mock-git-version",
}
out, err := json.Marshal(payload)
if err != nil {
panic(err)
}
return string(out)
}
// DumpVk is a no-op and returns nil in the mock.
func DumpVk(forkName, filePath string) error {
return nil
}
// SetDynamicFeature is a no-op in the mock.
func SetDynamicFeature(feats string) {}

View File

@@ -0,0 +1,27 @@
package libzkp
import (
"fmt"
"scroll-tech/common/types/message"
)
// TaskType enum values matching the Rust enum
const (
TaskTypeChunk = 0
TaskTypeBatch = 1
TaskTypeBundle = 2
)
func fromMessageTaskType(taskType int) int {
switch message.ProofType(taskType) {
case message.ProofTypeChunk:
return TaskTypeChunk
case message.ProofTypeBatch:
return TaskTypeBatch
case message.ProofTypeBundle:
return TaskTypeBundle
default:
panic(fmt.Sprintf("unsupported proof type: %d", taskType))
}
}

View File

@@ -5,6 +5,7 @@ package libzkp
import ( import (
"encoding/json" "encoding/json"
"fmt" "fmt"
"strings"
"scroll-tech/common/types/message" "scroll-tech/common/types/message"
@@ -14,6 +15,10 @@ import (
func InitL2geth(configJSON string) { func InitL2geth(configJSON string) {
} }
func GenerateUniversalTask(taskType int, taskJSON, forkName string, expectedVk []byte, decryptionKey []byte) (bool, string, string, []byte) {
return generateUniversalTask(fromMessageTaskType(taskType), taskJSON, strings.ToLower(forkName), expectedVk, decryptionKey)
}
func generateUniversalTask(taskType int, taskJSON, forkName string, expectedVk []byte, decryptionKey []byte) (bool, string, string, []byte) { func generateUniversalTask(taskType int, taskJSON, forkName string, expectedVk []byte, decryptionKey []byte) (bool, string, string, []byte) {
fmt.Printf("call mocked generate universal task %d, taskJson %s\n", taskType, taskJSON) fmt.Printf("call mocked generate universal task %d, taskJson %s\n", taskType, taskJSON)

View File

@@ -7,7 +7,10 @@ package libzkp
#include "libzkp.h" #include "libzkp.h"
*/ */
import "C" //nolint:typecheck import "C" //nolint:typecheck
import "unsafe" import (
"strings"
"unsafe"
)
// Initialize the handler for universal task // Initialize the handler for universal task
func InitL2geth(configJSON string) { func InitL2geth(configJSON string) {
@@ -17,6 +20,11 @@ func InitL2geth(configJSON string) {
C.init_l2geth(cConfig) C.init_l2geth(cConfig)
} }
// Generate a universal task
func GenerateUniversalTask(taskType int, taskJSON, forkName string, expectedVk []byte, decryptionKey []byte) (bool, string, string, []byte) {
return generateUniversalTask(fromMessageTaskType(taskType), taskJSON, strings.ToLower(forkName), expectedVk, decryptionKey)
}
func generateUniversalTask(taskType int, taskJSON, forkName string, expectedVk []byte, decryptionKey []byte) (bool, string, string, []byte) { func generateUniversalTask(taskType int, taskJSON, forkName string, expectedVk []byte, decryptionKey []byte) (bool, string, string, []byte) {
cTask := goToCString(taskJSON) cTask := goToCString(taskJSON)
cForkName := goToCString(forkName) cForkName := goToCString(forkName)

View File

@@ -314,7 +314,7 @@ func (bp *BatchProverTask) getBatchTaskDetail(dbBatch *orm.Batch, chunkProofs []
case 0: case 0:
log.Warn("the codec version is 0, if it is not under integration test we have encountered an error here") log.Warn("the codec version is 0, if it is not under integration test we have encountered an error here")
return taskDetail, nil return taskDetail, nil
case encoding.CodecV3, encoding.CodecV4, encoding.CodecV6, encoding.CodecV7, encoding.CodecV8, encoding.CodecV9: case encoding.CodecV3, encoding.CodecV4, encoding.CodecV6, encoding.CodecV7, encoding.CodecV8, encoding.CodecV9, encoding.CodecV10:
default: default:
return nil, fmt.Errorf("Unsupported codec version <%d>", dbBatchCodecVersion) return nil, fmt.Errorf("Unsupported codec version <%d>", dbBatchCodecVersion)
} }

View File

@@ -155,7 +155,7 @@ func NewSubmitProofReceiverLogic(cfg *config.ProverManager, chainCfg *params.Cha
// HandleZkProof handle a ZkProof submitted from a prover. // HandleZkProof handle a ZkProof submitted from a prover.
// For now only proving/verifying error will lead to setting status as skipped. // For now only proving/verifying error will lead to setting status as skipped.
// db/unmarshal errors will not because they are errors on the business logic side. // db/unmarshal errors will not because they are errors on the business logic side.
func (m *ProofReceiverLogic) HandleZkProof(ctx *gin.Context, proofParameter coordinatorType.SubmitProofParameter) error { func (m *ProofReceiverLogic) HandleZkProof(ctx *gin.Context, proofParameter coordinatorType.SubmitProofParameter) (rerr error) {
m.proofReceivedTotal.Inc() m.proofReceivedTotal.Inc()
pk := ctx.GetString(coordinatorType.PublicKey) pk := ctx.GetString(coordinatorType.PublicKey)
if len(pk) == 0 { if len(pk) == 0 {
@@ -172,6 +172,18 @@ func (m *ProofReceiverLogic) HandleZkProof(ctx *gin.Context, proofParameter coor
return ErrValidatorFailureProverTaskEmpty return ErrValidatorFailureProverTaskEmpty
} }
defer func() {
if rerr != nil && types.ProverProveStatus(proverTask.ProvingStatus) == types.ProverAssigned {
// trigger a last-chance closing of current task if some routine had missed it
log.Warn("last chance proof recover triggerred",
"proofID", proofParameter.TaskID,
"err", rerr,
)
m.proofRecover(ctx.Copy(), proverTask, types.ProverTaskFailureTypeUndefined, proofParameter)
}
}()
proofTime := time.Since(proverTask.CreatedAt) proofTime := time.Since(proverTask.CreatedAt)
proofTimeSec := uint64(proofTime.Seconds()) proofTimeSec := uint64(proofTime.Seconds())
@@ -311,6 +323,20 @@ func (m *ProofReceiverLogic) validator(ctx context.Context, proverTask *orm.Prov
} }
}() }()
// Internally we overide the timeout failure:
// if prover task FailureType is SessionInfoFailureTimeout, the submit proof is timeout, but we still accept it
if types.ProverProveStatus(proverTask.ProvingStatus) == types.ProverProofInvalid &&
types.ProverTaskFailureType(proverTask.FailureType) == types.ProverTaskFailureTypeTimeout {
m.validateFailureProverTaskTimeout.Inc()
proverTask.ProvingStatus = int16(types.ProverAssigned)
proofTime := time.Since(proverTask.CreatedAt)
proofTimeSec := uint64(proofTime.Seconds())
log.Warn("proof submit proof have timeout", "hash", proofParameter.TaskID, "taskType", proverTask.TaskType,
"proverName", proverTask.ProverName, "proverPublicKey", pk, "proofTime", proofTimeSec)
}
// Ensure this prover is eligible to participate in the prover task. // Ensure this prover is eligible to participate in the prover task.
if types.ProverProveStatus(proverTask.ProvingStatus) == types.ProverProofValid || if types.ProverProveStatus(proverTask.ProvingStatus) == types.ProverProofValid ||
types.ProverProveStatus(proverTask.ProvingStatus) == types.ProverProofInvalid { types.ProverProveStatus(proverTask.ProvingStatus) == types.ProverProofInvalid {
@@ -328,9 +354,6 @@ func (m *ProofReceiverLogic) validator(ctx context.Context, proverTask *orm.Prov
return ErrValidatorFailureProverTaskCannotSubmitTwice return ErrValidatorFailureProverTaskCannotSubmitTwice
} }
proofTime := time.Since(proverTask.CreatedAt)
proofTimeSec := uint64(proofTime.Seconds())
if proofParameter.Status != int(coordinatorType.StatusOk) { if proofParameter.Status != int(coordinatorType.StatusOk) {
// Temporarily replace "panic" with "pa-nic" to prevent triggering the alert based on logs. // Temporarily replace "panic" with "pa-nic" to prevent triggering the alert based on logs.
failureMsg := strings.Replace(proofParameter.FailureMsg, "panic", "pa-nic", -1) failureMsg := strings.Replace(proofParameter.FailureMsg, "panic", "pa-nic", -1)
@@ -346,14 +369,6 @@ func (m *ProofReceiverLogic) validator(ctx context.Context, proverTask *orm.Prov
return ErrValidatorFailureProofMsgStatusNotOk return ErrValidatorFailureProofMsgStatusNotOk
} }
// if prover task FailureType is SessionInfoFailureTimeout, the submit proof is timeout, need skip it
if types.ProverTaskFailureType(proverTask.FailureType) == types.ProverTaskFailureTypeTimeout {
m.validateFailureProverTaskTimeout.Inc()
log.Info("proof submit proof have timeout, skip this submit proof", "hash", proofParameter.TaskID, "taskType", proverTask.TaskType,
"proverName", proverTask.ProverName, "proverPublicKey", pk, "proofTime", proofTimeSec)
return ErrValidatorFailureProofTimeout
}
// store the proof to prover task // store the proof to prover task
if updateTaskProofErr := m.updateProverTaskProof(ctx, proverTask, proofParameter); updateTaskProofErr != nil { if updateTaskProofErr := m.updateProverTaskProof(ctx, proverTask, proofParameter); updateTaskProofErr != nil {
log.Warn("update prover task proof failure", "hash", proofParameter.TaskID, "proverPublicKey", pk, log.Warn("update prover task proof failure", "hash", proofParameter.TaskID, "proverPublicKey", pk,
@@ -368,6 +383,7 @@ func (m *ProofReceiverLogic) validator(ctx context.Context, proverTask *orm.Prov
"taskType", proverTask.TaskType, "proverName", proverTask.ProverName, "proverPublicKey", pk) "taskType", proverTask.TaskType, "proverName", proverTask.ProverName, "proverPublicKey", pk)
return ErrValidatorFailureTaskHaveVerifiedSuccess return ErrValidatorFailureTaskHaveVerifiedSuccess
} }
return nil return nil
} }
@@ -384,7 +400,7 @@ func (m *ProofReceiverLogic) closeProofTask(ctx context.Context, proverTask *orm
log.Info("proof close task update proof status", "hash", proverTask.TaskID, "proverPublicKey", proverTask.ProverPublicKey, log.Info("proof close task update proof status", "hash", proverTask.TaskID, "proverPublicKey", proverTask.ProverPublicKey,
"taskType", message.ProofType(proverTask.TaskType).String(), "status", types.ProvingTaskVerified.String()) "taskType", message.ProofType(proverTask.TaskType).String(), "status", types.ProvingTaskVerified.String())
if err := m.updateProofStatus(ctx, proverTask, proofParameter, types.ProverProofValid, types.ProverTaskFailureTypeUndefined, proofTimeSec); err != nil { if err := m.updateProofStatus(ctx, proverTask, proofParameter, types.ProverProofValid, types.ProverTaskFailureType(proverTask.FailureType), proofTimeSec); err != nil {
log.Error("failed to updated proof status ProvingTaskVerified", "hash", proverTask.TaskID, "proverPublicKey", proverTask.ProverPublicKey, "error", err) log.Error("failed to updated proof status ProvingTaskVerified", "hash", proverTask.TaskID, "proverPublicKey", proverTask.ProverPublicKey, "error", err)
return err return err
} }
@@ -445,6 +461,9 @@ func (m *ProofReceiverLogic) updateProofStatus(ctx context.Context, proverTask *
if err != nil { if err != nil {
return err return err
} }
// sync status and failture type into proverTask
proverTask.ProvingStatus = int16(status)
proverTask.FailureType = int16(failureType)
if status == types.ProverProofValid && message.ProofType(proofParameter.TaskType) == message.ProofTypeChunk { if status == types.ProverProofValid && message.ProofType(proofParameter.TaskType) == message.ProofTypeChunk {
if checkReadyErr := m.checkAreAllChunkProofsReady(ctx, proverTask.TaskID); checkReadyErr != nil { if checkReadyErr := m.checkAreAllChunkProofsReady(ctx, proverTask.TaskID); checkReadyErr != nil {

View File

@@ -14,7 +14,7 @@ import (
) )
// ChallengeMiddleware jwt challenge middleware // ChallengeMiddleware jwt challenge middleware
func ChallengeMiddleware(conf *config.Config) *jwt.GinJWTMiddleware { func ChallengeMiddleware(auth *config.Auth) *jwt.GinJWTMiddleware {
jwtMiddleware, err := jwt.New(&jwt.GinJWTMiddleware{ jwtMiddleware, err := jwt.New(&jwt.GinJWTMiddleware{
Authenticator: func(c *gin.Context) (interface{}, error) { Authenticator: func(c *gin.Context) (interface{}, error) {
return nil, nil return nil, nil
@@ -30,8 +30,8 @@ func ChallengeMiddleware(conf *config.Config) *jwt.GinJWTMiddleware {
} }
}, },
Unauthorized: unauthorized, Unauthorized: unauthorized,
Key: []byte(conf.Auth.Secret), Key: []byte(auth.Secret),
Timeout: time.Second * time.Duration(conf.Auth.ChallengeExpireDurationSec), Timeout: time.Second * time.Duration(auth.ChallengeExpireDurationSec),
TokenLookup: "header: Authorization, query: token, cookie: jwt", TokenLookup: "header: Authorization, query: token, cookie: jwt",
TokenHeadName: "Bearer", TokenHeadName: "Bearer",
TimeFunc: time.Now, TimeFunc: time.Now,

View File

@@ -4,22 +4,57 @@ import (
"time" "time"
jwt "github.com/appleboy/gin-jwt/v2" jwt "github.com/appleboy/gin-jwt/v2"
"github.com/gin-gonic/gin"
"github.com/scroll-tech/go-ethereum/log" "github.com/scroll-tech/go-ethereum/log"
"scroll-tech/coordinator/internal/config" "scroll-tech/coordinator/internal/config"
"scroll-tech/coordinator/internal/controller/api" "scroll-tech/coordinator/internal/controller/api"
"scroll-tech/coordinator/internal/controller/proxy"
"scroll-tech/coordinator/internal/types" "scroll-tech/coordinator/internal/types"
) )
func nonIdendityAuthorizator(data interface{}, _ *gin.Context) bool {
return data != nil
}
// LoginMiddleware jwt auth middleware // LoginMiddleware jwt auth middleware
func LoginMiddleware(conf *config.Config) *jwt.GinJWTMiddleware { func LoginMiddleware(auth *config.Auth) *jwt.GinJWTMiddleware {
jwtMiddleware, err := jwt.New(&jwt.GinJWTMiddleware{ jwtMiddleware, err := jwt.New(&jwt.GinJWTMiddleware{
PayloadFunc: api.Auth.PayloadFunc, PayloadFunc: api.Auth.PayloadFunc,
IdentityHandler: api.Auth.IdentityHandler, IdentityHandler: api.Auth.IdentityHandler,
IdentityKey: types.PublicKey, IdentityKey: types.PublicKey,
Key: []byte(conf.Auth.Secret), Key: []byte(auth.Secret),
Timeout: time.Second * time.Duration(conf.Auth.LoginExpireDurationSec), Timeout: time.Second * time.Duration(auth.LoginExpireDurationSec),
Authenticator: api.Auth.Login, Authenticator: api.Auth.Login,
Authorizator: nonIdendityAuthorizator,
Unauthorized: unauthorized,
TokenLookup: "header: Authorization, query: token, cookie: jwt",
TokenHeadName: "Bearer",
TimeFunc: time.Now,
LoginResponse: loginResponse,
})
if err != nil {
log.Crit("new jwt middleware panic", "error", err)
}
if errInit := jwtMiddleware.MiddlewareInit(); errInit != nil {
log.Crit("init jwt middleware panic", "error", errInit)
}
return jwtMiddleware
}
// ProxyLoginMiddleware jwt auth middleware for proxy login
func ProxyLoginMiddleware(auth *config.Auth) *jwt.GinJWTMiddleware {
jwtMiddleware, err := jwt.New(&jwt.GinJWTMiddleware{
PayloadFunc: proxy.Auth.PayloadFunc,
IdentityHandler: proxy.Auth.IdentityHandler,
IdentityKey: types.PublicKey,
Key: []byte(auth.Secret),
Timeout: time.Second * time.Duration(auth.LoginExpireDurationSec),
Authenticator: proxy.Auth.Login,
Authorizator: nonIdendityAuthorizator,
Unauthorized: unauthorized, Unauthorized: unauthorized,
TokenLookup: "header: Authorization, query: token, cookie: jwt", TokenLookup: "header: Authorization, query: token, cookie: jwt",
TokenHeadName: "Bearer", TokenHeadName: "Bearer",

View File

@@ -28,8 +28,8 @@ func TestMain(m *testing.M) {
defer func() { defer func() {
if testApps != nil { if testApps != nil {
testApps.Free() testApps.Free()
tearDownEnv(t)
} }
tearDownEnv(t)
}() }()
m.Run() m.Run()
} }

View File

@@ -8,6 +8,7 @@ import (
"scroll-tech/coordinator/internal/config" "scroll-tech/coordinator/internal/config"
"scroll-tech/coordinator/internal/controller/api" "scroll-tech/coordinator/internal/controller/api"
"scroll-tech/coordinator/internal/controller/proxy"
"scroll-tech/coordinator/internal/middleware" "scroll-tech/coordinator/internal/middleware"
) )
@@ -25,16 +26,45 @@ func Route(router *gin.Engine, cfg *config.Config, reg prometheus.Registerer) {
func v1(router *gin.RouterGroup, conf *config.Config) { func v1(router *gin.RouterGroup, conf *config.Config) {
r := router.Group("/v1") r := router.Group("/v1")
challengeMiddleware := middleware.ChallengeMiddleware(conf) challengeMiddleware := middleware.ChallengeMiddleware(conf.Auth)
r.GET("/challenge", challengeMiddleware.LoginHandler) r.GET("/challenge", challengeMiddleware.LoginHandler)
loginMiddleware := middleware.LoginMiddleware(conf) loginMiddleware := middleware.LoginMiddleware(conf.Auth)
r.POST("/login", challengeMiddleware.MiddlewareFunc(), loginMiddleware.LoginHandler) r.POST("/login", challengeMiddleware.MiddlewareFunc(), loginMiddleware.LoginHandler)
// need jwt token api // need jwt token api
r.Use(loginMiddleware.MiddlewareFunc()) r.Use(loginMiddleware.MiddlewareFunc())
{ {
r.POST("/proxy_login", loginMiddleware.LoginHandler)
r.POST("/get_task", api.GetTask.GetTasks) r.POST("/get_task", api.GetTask.GetTasks)
r.POST("/submit_proof", api.SubmitProof.SubmitProof) r.POST("/submit_proof", api.SubmitProof.SubmitProof)
} }
} }
// Route register route for coordinator
func ProxyRoute(router *gin.Engine, cfg *config.ProxyConfig, reg prometheus.Registerer) {
router.Use(gin.Recovery())
observability.Use(router, "coordinator", reg)
r := router.Group("coordinator")
v1_proxy(r, cfg)
}
func v1_proxy(router *gin.RouterGroup, conf *config.ProxyConfig) {
r := router.Group("/v1")
challengeMiddleware := middleware.ChallengeMiddleware(conf.ProxyManager.Auth)
r.GET("/challenge", challengeMiddleware.LoginHandler)
loginMiddleware := middleware.ProxyLoginMiddleware(conf.ProxyManager.Auth)
r.POST("/login", challengeMiddleware.MiddlewareFunc(), loginMiddleware.LoginHandler)
// need jwt token api
r.Use(loginMiddleware.MiddlewareFunc())
{
r.POST("/get_task", proxy.GetTask.GetTasks)
r.POST("/submit_proof", proxy.SubmitProof.SubmitProof)
}
}

View File

@@ -64,6 +64,8 @@ func (r ProverProviderType) String() string {
return "prover provider type internal" return "prover provider type internal"
case ProverProviderTypeExternal: case ProverProviderTypeExternal:
return "prover provider type external" return "prover provider type external"
case ProverProviderTypeProxy:
return "prover provider type proxy"
default: default:
return fmt.Sprintf("prover provider type: %d", r) return fmt.Sprintf("prover provider type: %d", r)
} }
@@ -76,4 +78,6 @@ const (
ProverProviderTypeInternal ProverProviderTypeInternal
// ProverProviderTypeExternal is an external prover provider type // ProverProviderTypeExternal is an external prover provider type
ProverProviderTypeExternal ProverProviderTypeExternal
// ProverProviderTypeProxy is an proxy prover provider type
ProverProviderTypeProxy = 3
) )

View File

@@ -0,0 +1,48 @@
package types
import (
"encoding/json"
"reflect"
"testing"
"scroll-tech/common/types"
)
func TestResponseDecodeData_GetTaskSchema(t *testing.T) {
// Arrange: build a dummy payload and wrap it in Response
in := GetTaskSchema{
UUID: "uuid-123",
TaskID: "task-abc",
TaskType: 1,
UseSnark: true,
TaskData: "dummy-data",
HardForkName: "cancun",
}
resp := types.Response{
ErrCode: 0,
ErrMsg: "",
Data: in,
}
// Act: JSON round-trip the Response to simulate real HTTP encoding/decoding
b, err := json.Marshal(resp)
if err != nil {
t.Fatalf("marshal response: %v", err)
}
var decoded types.Response
if err := json.Unmarshal(b, &decoded); err != nil {
t.Fatalf("unmarshal response: %v", err)
}
var out GetTaskSchema
if err := decoded.DecodeData(&out); err != nil {
t.Fatalf("DecodeData error: %v", err)
}
// Assert: structs match after decode
if !reflect.DeepEqual(in, out) {
t.Fatalf("decoded struct mismatch:\nwant: %+v\n got: %+v", in, out)
}
}

View File

@@ -31,6 +31,8 @@ func Version(hardForkName string, ValidiumMode bool) (uint8, error) {
stfVersion = 8 stfVersion = 8
case "galileo": case "galileo":
stfVersion = 9 stfVersion = 9
case "galileov2":
stfVersion = 10
default: default:
return 0, errors.New("unknown fork name " + canonicalName) return 0, errors.New("unknown fork name " + canonicalName)
} }

View File

@@ -30,12 +30,14 @@ import (
"scroll-tech/coordinator/internal/config" "scroll-tech/coordinator/internal/config"
"scroll-tech/coordinator/internal/controller/api" "scroll-tech/coordinator/internal/controller/api"
"scroll-tech/coordinator/internal/controller/cron" "scroll-tech/coordinator/internal/controller/cron"
"scroll-tech/coordinator/internal/controller/proxy"
"scroll-tech/coordinator/internal/orm" "scroll-tech/coordinator/internal/orm"
"scroll-tech/coordinator/internal/route" "scroll-tech/coordinator/internal/route"
) )
var ( var (
conf *config.Config conf *config.Config
proxyConf *config.ProxyConfig
testApps *testcontainers.TestcontainerApps testApps *testcontainers.TestcontainerApps
@@ -51,6 +53,9 @@ var (
chunk *encoding.Chunk chunk *encoding.Chunk
batch *encoding.Batch batch *encoding.Batch
tokenTimeout int tokenTimeout int
envSet bool
portUsed map[int64]struct{}
) )
func TestMain(m *testing.M) { func TestMain(m *testing.M) {
@@ -63,18 +68,44 @@ func TestMain(m *testing.M) {
} }
func randomURL() string { func randomURL() string {
id, _ := rand.Int(rand.Reader, big.NewInt(2000-1)) return randmURLBatch(1)[0]
return fmt.Sprintf("localhost:%d", 10000+2000+id.Int64())
} }
func setupCoordinator(t *testing.T, proversPerSession uint8, coordinatorURL string) (*cron.Collector, *http.Server) { // Generate a batch of random localhost URLs with different ports, similar to randomURL.
var err error func randmURLBatch(n int) []string {
db, err = testApps.GetGormDBClient() if n <= 0 {
return nil
}
urls := make([]string, 0, n)
if portUsed == nil {
portUsed = make(map[int64]struct{})
}
for len(urls) < n {
id, _ := rand.Int(rand.Reader, big.NewInt(2000-1))
port := 20000 + 2000 + id.Int64()
if _, exist := portUsed[port]; exist {
continue
}
portUsed[port] = struct{}{}
urls = append(urls, fmt.Sprintf("localhost:%d", port))
}
return urls
}
assert.NoError(t, err) func setupCoordinatorDb(t *testing.T) {
var err error
assert.NotNil(t, db, "setEnv must be called before")
// db, err = testApps.GetGormDBClient()
// assert.NoError(t, err)
sqlDB, err := db.DB() sqlDB, err := db.DB()
assert.NoError(t, err) assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(sqlDB)) assert.NoError(t, migrate.ResetDB(sqlDB))
}
func launchCoordinator(t *testing.T, proversPerSession uint8, coordinatorURL string) (*cron.Collector, *http.Server) {
assert.NotNil(t, db, "db must be set")
tokenTimeout = 60 tokenTimeout = 60
conf = &config.Config{ conf = &config.Config{
@@ -114,6 +145,7 @@ func setupCoordinator(t *testing.T, proversPerSession uint8, coordinatorURL stri
EuclidV2Time: new(uint64), EuclidV2Time: new(uint64),
}, db, nil) }, db, nil)
route.Route(router, conf, nil) route.Route(router, conf, nil)
t.Log("coordinator server url", coordinatorURL)
srv := &http.Server{ srv := &http.Server{
Addr: coordinatorURL, Addr: coordinatorURL,
Handler: router, Handler: router,
@@ -129,7 +161,77 @@ func setupCoordinator(t *testing.T, proversPerSession uint8, coordinatorURL stri
return proofCollector, srv return proofCollector, srv
} }
func setupCoordinator(t *testing.T, proversPerSession uint8, coordinatorURL string) (*cron.Collector, *http.Server) {
setupCoordinatorDb(t)
return launchCoordinator(t, proversPerSession, coordinatorURL)
}
func setupProxyDb(t *testing.T) {
assert.NotNil(t, db, "setEnv must be called before")
sqlDB, err := db.DB()
assert.NoError(t, err)
assert.NoError(t, migrate.ResetModuleDB(sqlDB, "proxy"))
}
func launchProxy(t *testing.T, proxyURL string, coordinatorURL []string, usePersistent bool) *http.Server {
var err error
assert.NoError(t, err)
coordinators := make(map[string]*config.UpStream)
for i, n := range coordinatorURL {
coordinators[fmt.Sprintf("coordinator_%d", i)] = testProxyUpStreamCfg(n)
}
tokenTimeout = 60
proxyConf = &config.ProxyConfig{
ProxyName: "test_proxy",
ProxyManager: &config.ProxyManager{
Verifier: &config.VerifierConfig{
MinProverVersion: "v4.4.89",
Verifiers: []config.AssetConfig{{
AssetsPath: "",
ForkName: "euclidV2",
}},
},
Client: testProxyClientCfg(),
Auth: &config.Auth{
Secret: "proxy",
ChallengeExpireDurationSec: tokenTimeout,
LoginExpireDurationSec: tokenTimeout,
},
},
Coordinators: coordinators,
}
router := gin.New()
if usePersistent {
proxy.InitController(proxyConf, db, nil)
} else {
proxy.InitController(proxyConf, nil, nil)
}
route.ProxyRoute(router, proxyConf, nil)
t.Log("proxy server url", proxyURL)
srv := &http.Server{
Addr: proxyURL,
Handler: router,
}
go func() {
runErr := srv.ListenAndServe()
if runErr != nil && !errors.Is(runErr, http.ErrServerClosed) {
assert.NoError(t, runErr)
}
}()
time.Sleep(time.Second * 2)
return srv
}
func setEnv(t *testing.T) { func setEnv(t *testing.T) {
if envSet {
t.Log("SetEnv is re-entried")
return
}
var err error var err error
version.Version = "v4.5.45" version.Version = "v4.5.45"
@@ -146,6 +248,7 @@ func setEnv(t *testing.T) {
sqlDB, err := db.DB() sqlDB, err := db.DB()
assert.NoError(t, err) assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(sqlDB)) assert.NoError(t, migrate.ResetDB(sqlDB))
assert.NoError(t, migrate.MigrateModule(sqlDB, "proxy"))
batchOrm = orm.NewBatch(db) batchOrm = orm.NewBatch(db)
chunkOrm = orm.NewChunk(db) chunkOrm = orm.NewChunk(db)
@@ -169,6 +272,7 @@ func setEnv(t *testing.T) {
assert.NoError(t, err) assert.NoError(t, err)
batch = &encoding.Batch{Chunks: []*encoding.Chunk{chunk}} batch = &encoding.Batch{Chunks: []*encoding.Chunk{chunk}}
envSet = true
} }
func TestApis(t *testing.T) { func TestApis(t *testing.T) {

View File

@@ -34,6 +34,8 @@ type mockProver struct {
privKey *ecdsa.PrivateKey privKey *ecdsa.PrivateKey
proofType message.ProofType proofType message.ProofType
coordinatorURL string coordinatorURL string
token string
useCacheToken bool
} }
func newMockProver(t *testing.T, proverName string, coordinatorURL string, proofType message.ProofType, version string) *mockProver { func newMockProver(t *testing.T, proverName string, coordinatorURL string, proofType message.ProofType, version string) *mockProver {
@@ -50,6 +52,14 @@ func newMockProver(t *testing.T, proverName string, coordinatorURL string, proof
return prover return prover
} }
func (r *mockProver) resetConnection(coordinatorURL string) {
r.coordinatorURL = coordinatorURL
}
func (r *mockProver) setUseCacheToken(enable bool) {
r.useCacheToken = enable
}
// connectToCoordinator sets up a websocket client to connect to the prover manager. // connectToCoordinator sets up a websocket client to connect to the prover manager.
func (r *mockProver) connectToCoordinator(t *testing.T, proverTypes []types.ProverType) (string, int, string) { func (r *mockProver) connectToCoordinator(t *testing.T, proverTypes []types.ProverType) (string, int, string) {
challengeString := r.challenge(t) challengeString := r.challenge(t)
@@ -115,6 +125,7 @@ func (r *mockProver) login(t *testing.T, challengeString string, proverTypes []t
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, http.StatusOK, resp.StatusCode()) assert.Equal(t, http.StatusOK, resp.StatusCode())
assert.Empty(t, result.ErrMsg) assert.Empty(t, result.ErrMsg)
r.token = loginData.Token
return loginData.Token, 0, "" return loginData.Token, 0, ""
} }
@@ -144,11 +155,14 @@ func (r *mockProver) healthCheckFailure(t *testing.T) bool {
func (r *mockProver) getProverTask(t *testing.T, proofType message.ProofType) (*types.GetTaskSchema, int, string) { func (r *mockProver) getProverTask(t *testing.T, proofType message.ProofType) (*types.GetTaskSchema, int, string) {
// get task from coordinator // get task from coordinator
token, errCode, errMsg := r.connectToCoordinator(t, []types.ProverType{types.MakeProverType(proofType)}) if !r.useCacheToken || r.token == "" {
if errCode != 0 { token, errCode, errMsg := r.connectToCoordinator(t, []types.ProverType{types.MakeProverType(proofType)})
return nil, errCode, errMsg if errCode != 0 {
return nil, errCode, errMsg
}
assert.NotEmpty(t, token)
assert.Equal(t, token, r.token)
} }
assert.NotEmpty(t, token)
type response struct { type response struct {
ErrCode int `json:"errcode"` ErrCode int `json:"errcode"`
@@ -160,7 +174,7 @@ func (r *mockProver) getProverTask(t *testing.T, proofType message.ProofType) (*
client := resty.New() client := resty.New()
resp, err := client.R(). resp, err := client.R().
SetHeader("Content-Type", "application/json"). SetHeader("Content-Type", "application/json").
SetHeader("Authorization", fmt.Sprintf("Bearer %s", token)). SetHeader("Authorization", fmt.Sprintf("Bearer %s", r.token)).
SetBody(map[string]interface{}{"universal": true, "prover_height": 100, "task_types": []int{int(proofType)}}). SetBody(map[string]interface{}{"universal": true, "prover_height": 100, "task_types": []int{int(proofType)}}).
SetResult(&result). SetResult(&result).
Post("http://" + r.coordinatorURL + "/coordinator/v1/get_task") Post("http://" + r.coordinatorURL + "/coordinator/v1/get_task")
@@ -174,11 +188,14 @@ func (r *mockProver) getProverTask(t *testing.T, proofType message.ProofType) (*
//nolint:unparam //nolint:unparam
func (r *mockProver) tryGetProverTask(t *testing.T, proofType message.ProofType) (int, string) { func (r *mockProver) tryGetProverTask(t *testing.T, proofType message.ProofType) (int, string) {
// get task from coordinator // get task from coordinator
token, errCode, errMsg := r.connectToCoordinator(t, []types.ProverType{types.MakeProverType(proofType)}) if !r.useCacheToken || r.token == "" {
if errCode != 0 { token, errCode, errMsg := r.connectToCoordinator(t, []types.ProverType{types.MakeProverType(proofType)})
return errCode, errMsg if errCode != 0 {
return errCode, errMsg
}
assert.NotEmpty(t, token)
assert.Equal(t, token, r.token)
} }
assert.NotEmpty(t, token)
type response struct { type response struct {
ErrCode int `json:"errcode"` ErrCode int `json:"errcode"`
@@ -190,8 +207,8 @@ func (r *mockProver) tryGetProverTask(t *testing.T, proofType message.ProofType)
client := resty.New() client := resty.New()
resp, err := client.R(). resp, err := client.R().
SetHeader("Content-Type", "application/json"). SetHeader("Content-Type", "application/json").
SetHeader("Authorization", fmt.Sprintf("Bearer %s", token)). SetHeader("Authorization", fmt.Sprintf("Bearer %s", r.token)).
SetBody(map[string]interface{}{"prover_height": 100, "task_type": int(proofType), "universal": true}). SetBody(map[string]interface{}{"prover_height": 100, "task_types": []int{int(proofType)}, "universal": true}).
SetResult(&result). SetResult(&result).
Post("http://" + r.coordinatorURL + "/coordinator/v1/get_task") Post("http://" + r.coordinatorURL + "/coordinator/v1/get_task")
assert.NoError(t, err) assert.NoError(t, err)
@@ -249,10 +266,13 @@ func (r *mockProver) submitProof(t *testing.T, proverTaskSchema *types.GetTaskSc
Universal: true, Universal: true,
} }
token, authErrCode, errMsg := r.connectToCoordinator(t, []types.ProverType{types.MakeProverType(message.ProofType(proverTaskSchema.TaskType))}) if !r.useCacheToken || r.token == "" {
assert.Equal(t, authErrCode, 0) token, authErrCode, errMsg := r.connectToCoordinator(t, []types.ProverType{types.MakeProverType(message.ProofType(proverTaskSchema.TaskType))})
assert.Equal(t, errMsg, "") assert.Equal(t, authErrCode, 0)
assert.NotEmpty(t, token) assert.Equal(t, errMsg, "")
assert.NotEmpty(t, token)
assert.Equal(t, token, r.token)
}
submitProofData, err := json.Marshal(submitProof) submitProofData, err := json.Marshal(submitProof)
assert.NoError(t, err) assert.NoError(t, err)
@@ -262,7 +282,7 @@ func (r *mockProver) submitProof(t *testing.T, proverTaskSchema *types.GetTaskSc
client := resty.New() client := resty.New()
resp, err := client.R(). resp, err := client.R().
SetHeader("Content-Type", "application/json"). SetHeader("Content-Type", "application/json").
SetHeader("Authorization", fmt.Sprintf("Bearer %s", token)). SetHeader("Authorization", fmt.Sprintf("Bearer %s", r.token)).
SetBody(string(submitProofData)). SetBody(string(submitProofData)).
SetResult(&result). SetResult(&result).
Post("http://" + r.coordinatorURL + "/coordinator/v1/submit_proof") Post("http://" + r.coordinatorURL + "/coordinator/v1/submit_proof")

View File

@@ -0,0 +1,297 @@
package test
import (
"context"
"fmt"
"net/http"
"strings"
"testing"
"time"
"github.com/scroll-tech/da-codec/encoding"
"github.com/stretchr/testify/assert"
"scroll-tech/common/types"
"scroll-tech/common/types/message"
"scroll-tech/common/version"
"scroll-tech/coordinator/internal/config"
"scroll-tech/coordinator/internal/controller/proxy"
)
func testProxyClientCfg() *config.ProxyClient {
return &config.ProxyClient{
Secret: "test-secret-key",
ProxyName: "test-proxy",
ProxyVersion: version.Version,
}
}
var testCompatibileMode bool
func testProxyUpStreamCfg(coordinatorURL string) *config.UpStream {
return &config.UpStream{
BaseUrl: fmt.Sprintf("http://%s", coordinatorURL),
RetryWaitTime: 3,
ConnectionTimeoutSec: 30,
CompatibileMode: testCompatibileMode,
}
}
func testProxyClient(t *testing.T) {
// Setup coordinator and http server.
coordinatorURL := randomURL()
proofCollector, httpHandler := setupCoordinator(t, 1, coordinatorURL)
defer func() {
proofCollector.Stop()
assert.NoError(t, httpHandler.Shutdown(context.Background()))
}()
cliCfg := testProxyClientCfg()
upCfg := testProxyUpStreamCfg(coordinatorURL)
clientManager, err := proxy.NewClientManager("test_coordinator", cliCfg, upCfg)
assert.NoError(t, err)
assert.NotNil(t, clientManager)
// Create context with timeout
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
// Test Client method
client := clientManager.ClientAsProxy(ctx)
// Client should not be nil if login succeeds
// Note: This might be nil if the coordinator is not properly set up for proxy authentication
// but the test validates that the Client method completes without panic
assert.NotNil(t, client)
token1 := client.Token()
assert.NotEmpty(t, token1)
t.Logf("Client token: %s (%v)", token1, client)
if !upCfg.CompatibileMode {
time.Sleep(time.Second * 2)
client.Reset()
client = clientManager.ClientAsProxy(ctx)
assert.NotNil(t, client)
token2 := client.Token()
assert.NotEmpty(t, token2)
t.Logf("Client token (sec): %s (%v)", token2, client)
assert.NotEqual(t, token1, token2, "token should not be identical")
}
}
func testProxyHandshake(t *testing.T) {
// Setup proxy http server.
proxyURL := randomURL()
proxyHttpHandler := launchProxy(t, proxyURL, []string{}, false)
defer func() {
assert.NoError(t, proxyHttpHandler.Shutdown(context.Background()))
}()
chunkProver := newMockProver(t, "prover_chunk_test", proxyURL, message.ProofTypeChunk, version.Version)
assert.True(t, chunkProver.healthCheckSuccess(t))
}
func testProxyGetTask(t *testing.T) {
// Setup coordinator and http server.
urls := randmURLBatch(2)
coordinatorURL := urls[0]
collector, httpHandler := setupCoordinator(t, 3, coordinatorURL)
defer func() {
collector.Stop()
assert.NoError(t, httpHandler.Shutdown(context.Background()))
}()
proxyURL := urls[1]
proxyHttpHandler := launchProxy(t, proxyURL, []string{coordinatorURL}, false)
defer func() {
assert.NoError(t, proxyHttpHandler.Shutdown(context.Background()))
}()
chunkProver := newMockProver(t, "prover_chunk_test", proxyURL, message.ProofTypeChunk, version.Version)
chunkProver.setUseCacheToken(true)
code, _ := chunkProver.tryGetProverTask(t, message.ProofTypeChunk)
assert.Equal(t, int(types.ErrCoordinatorEmptyProofData), code)
err := l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block1, block2})
assert.NoError(t, err)
dbChunk, err := chunkOrm.InsertChunk(context.Background(), chunk)
assert.NoError(t, err)
err = l2BlockOrm.UpdateChunkHashInRange(context.Background(), 0, 100, dbChunk.Hash)
assert.NoError(t, err)
task, code, msg := chunkProver.getProverTask(t, message.ProofTypeChunk)
assert.Empty(t, code)
if code == 0 {
t.Log("get task id", task.TaskID)
} else {
t.Log("get task error msg", msg)
}
}
func testProxyProof(t *testing.T) {
urls := randmURLBatch(3)
coordinatorURL0 := urls[0]
setupCoordinatorDb(t)
collector0, httpHandler0 := launchCoordinator(t, 3, coordinatorURL0)
defer func() {
collector0.Stop()
httpHandler0.Shutdown(context.Background())
}()
coordinatorURL1 := urls[1]
collector1, httpHandler1 := launchCoordinator(t, 3, coordinatorURL1)
defer func() {
collector1.Stop()
httpHandler1.Shutdown(context.Background())
}()
coordinators := map[string]*http.Server{
"coordinator_0": httpHandler0,
"coordinator_1": httpHandler1,
}
proxyURL := urls[2]
proxyHttpHandler := launchProxy(t, proxyURL, []string{coordinatorURL0, coordinatorURL1}, false)
defer func() {
assert.NoError(t, proxyHttpHandler.Shutdown(context.Background()))
}()
err := l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block1, block2})
assert.NoError(t, err)
dbChunk, err := chunkOrm.InsertChunk(context.Background(), chunk)
assert.NoError(t, err)
err = l2BlockOrm.UpdateChunkHashInRange(context.Background(), 0, 100, dbChunk.Hash)
assert.NoError(t, err)
chunkProver := newMockProver(t, "prover_chunk_test", proxyURL, message.ProofTypeChunk, version.Version)
chunkProver.setUseCacheToken(true)
task, code, msg := chunkProver.getProverTask(t, message.ProofTypeChunk)
assert.Empty(t, code)
if code == 0 {
t.Log("get task", task)
parts, _, _ := strings.Cut(task.TaskID, ":")
// close the coordinator which do not dispatch task first, so if we submit to wrong target,
// there would be a chance the submit failed (to the closed coordinator)
for n, srv := range coordinators {
if n != parts {
t.Log("close coordinator", n)
assert.NoError(t, srv.Shutdown(context.Background()))
}
}
exceptProofStatus := verifiedSuccess
chunkProver.submitProof(t, task, exceptProofStatus, types.Success)
} else {
t.Log("get task error msg", msg)
}
// verify proof status
var (
tick = time.Tick(1500 * time.Millisecond)
tickStop = time.Tick(time.Minute)
)
var (
chunkProofStatus types.ProvingStatus
chunkActiveAttempts int16
chunkMaxAttempts int16
)
for {
select {
case <-tick:
chunkProofStatus, err = chunkOrm.GetProvingStatusByHash(context.Background(), dbChunk.Hash)
assert.NoError(t, err)
if chunkProofStatus == types.ProvingTaskVerified {
return
}
chunkActiveAttempts, chunkMaxAttempts, err = chunkOrm.GetAttemptsByHash(context.Background(), dbChunk.Hash)
assert.NoError(t, err)
assert.Equal(t, 1, int(chunkMaxAttempts))
assert.Equal(t, 0, int(chunkActiveAttempts))
case <-tickStop:
t.Error("failed to check proof status", "chunkProofStatus", chunkProofStatus.String())
return
}
}
}
func testProxyPersistent(t *testing.T) {
urls := randmURLBatch(4)
coordinatorURL0 := urls[0]
setupCoordinatorDb(t)
collector0, httpHandler0 := launchCoordinator(t, 3, coordinatorURL0)
defer func() {
collector0.Stop()
httpHandler0.Shutdown(context.Background())
}()
coordinatorURL1 := urls[1]
collector1, httpHandler1 := launchCoordinator(t, 3, coordinatorURL1)
defer func() {
collector1.Stop()
httpHandler1.Shutdown(context.Background())
}()
setupProxyDb(t)
proxyURL1 := urls[2]
proxyHttpHandler := launchProxy(t, proxyURL1, []string{coordinatorURL0, coordinatorURL1}, true)
defer func() {
assert.NoError(t, proxyHttpHandler.Shutdown(context.Background()))
}()
proxyURL2 := urls[3]
proxyHttpHandler2 := launchProxy(t, proxyURL2, []string{coordinatorURL0, coordinatorURL1}, true)
defer func() {
assert.NoError(t, proxyHttpHandler2.Shutdown(context.Background()))
}()
err := l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block1, block2})
assert.NoError(t, err)
dbChunk, err := chunkOrm.InsertChunk(context.Background(), chunk)
assert.NoError(t, err)
err = l2BlockOrm.UpdateChunkHashInRange(context.Background(), 0, 100, dbChunk.Hash)
assert.NoError(t, err)
chunkProver := newMockProver(t, "prover_chunk_test", proxyURL1, message.ProofTypeChunk, version.Version)
chunkProver.setUseCacheToken(true)
task, _, _ := chunkProver.getProverTask(t, message.ProofTypeChunk)
assert.NotNil(t, task)
taskFrom, _, _ := strings.Cut(task.TaskID, ":")
t.Log("get task from coordinator:", taskFrom)
chunkProver.resetConnection(proxyURL2)
task, _, _ = chunkProver.getProverTask(t, message.ProofTypeChunk)
assert.NotNil(t, task)
taskFrom2, _, _ := strings.Cut(task.TaskID, ":")
assert.Equal(t, taskFrom, taskFrom2)
}
func TestProxyClient(t *testing.T) {
testCompatibileMode = false
// Set up the test environment.
setEnv(t)
t.Run("TestProxyClient", testProxyClient)
t.Run("TestProxyHandshake", testProxyHandshake)
t.Run("TestProxyGetTask", testProxyGetTask)
t.Run("TestProxyValidProof", testProxyProof)
t.Run("testProxyPersistent", testProxyPersistent)
}
func TestProxyClientCompatibleMode(t *testing.T) {
testCompatibileMode = true
// Set up the test environment.
setEnv(t)
t.Run("TestProxyClient", testProxyClient)
t.Run("TestProxyHandshake", testProxyHandshake)
t.Run("TestProxyGetTask", testProxyGetTask)
t.Run("TestProxyValidProof", testProxyProof)
t.Run("testProxyPersistent", testProxyPersistent)
}

View File

@@ -28,10 +28,10 @@ pub struct BatchHeaderValidiumWithHash {
/// Parse header types passed from golang side and adapt to the /// Parse header types passed from golang side and adapt to the
/// definition in zkvm-prover's types /// definition in zkvm-prover's types
/// We distinguish the header type in golang side according to the codec /// We distinguish the header type in golang side according to the STF
/// version, i.e. v7 - v9 (current), and validium /// version, i.e. v6, v7-v10 (current), and validium
/// And adapt it to the corresponding header version used in zkvm-prover's witness /// And adapt it to the corresponding batch header type used in zkvm-prover's witness
/// definition, i.e. v7- v8 (current), and validium /// definition, i.e. v6, v7 (current), and validium
#[derive(Clone, serde::Deserialize, serde::Serialize)] #[derive(Clone, serde::Deserialize, serde::Serialize)]
#[serde(untagged)] #[serde(untagged)]
#[allow(non_camel_case_types)] #[allow(non_camel_case_types)]
@@ -40,18 +40,18 @@ pub enum BatchHeaderV {
Validium(BatchHeaderValidiumWithHash), Validium(BatchHeaderValidiumWithHash),
/// Header for scroll's STF version v6. /// Header for scroll's STF version v6.
V6(BatchHeaderV6), V6(BatchHeaderV6),
/// Header for scroll's STF versions v7, v8, v9. /// Header for scroll's STF versions v7 - v10.
/// ///
/// Since the codec essentially is unchanged for the above STF versions, we do not define new /// Since the codec essentially is unchanged for the above STF versions, we do not define new
/// variants, instead re-using the [`BatchHeaderV7`] variant. /// variants, instead re-using the [`BatchHeaderV7`] variant.
V7_V8_V9(BatchHeaderV7), V7_to_V10(BatchHeaderV7),
} }
impl core::fmt::Display for BatchHeaderV { impl core::fmt::Display for BatchHeaderV {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
match self { match self {
BatchHeaderV::V6(_) => write!(f, "V6"), BatchHeaderV::V6(_) => write!(f, "V6"),
BatchHeaderV::V7_V8_V9(_) => write!(f, "V7_V8_V9"), BatchHeaderV::V7_to_V10(_) => write!(f, "V7 - V10"),
BatchHeaderV::Validium(_) => write!(f, "Validium"), BatchHeaderV::Validium(_) => write!(f, "Validium"),
} }
} }
@@ -61,26 +61,29 @@ impl BatchHeaderV {
pub fn batch_hash(&self) -> B256 { pub fn batch_hash(&self) -> B256 {
match self { match self {
BatchHeaderV::V6(h) => h.batch_hash(), BatchHeaderV::V6(h) => h.batch_hash(),
BatchHeaderV::V7_V8_V9(h) => h.batch_hash(), BatchHeaderV::V7_to_V10(h) => h.batch_hash(),
BatchHeaderV::Validium(h) => h.header.batch_hash(), BatchHeaderV::Validium(h) => h.header.batch_hash(),
} }
} }
pub fn must_v6_header(&self) -> &BatchHeaderV6 { pub fn to_zkvm_batch_header_v6(&self) -> &BatchHeaderV6 {
match self { match self {
BatchHeaderV::V6(h) => h, BatchHeaderV::V6(h) => h,
_ => unreachable!("A header of {} is considered to be v6", self), _ => unreachable!("A header of {} is considered to be v6", self),
} }
} }
pub fn must_v7_v8_v9_header(&self) -> &BatchHeaderV7 { pub fn to_zkvm_batch_header_v7_to_v10(&self) -> &BatchHeaderV7 {
match self { match self {
BatchHeaderV::V7_V8_V9(h) => h, BatchHeaderV::V7_to_V10(h) => h,
_ => unreachable!("A header of {} is considered to be in [v7, v8, v9]", self), _ => unreachable!(
"A header of {} is considered to be in [v7, v8, v9, v10]",
self
),
} }
} }
pub fn must_validium_header(&self) -> &BatchHeaderValidium { pub fn to_zkvm_batch_header_validium(&self) -> &BatchHeaderValidium {
match self { match self {
BatchHeaderV::Validium(h) => &h.header, BatchHeaderV::Validium(h) => &h.header,
_ => unreachable!("A header of {} is considered to be validium", self), _ => unreachable!("A header of {} is considered to be validium", self),
@@ -154,11 +157,11 @@ impl BatchProvingTask {
version.fork, version.fork,
ForkName::EuclidV1, ForkName::EuclidV1,
), ),
BatchHeaderV::V7_V8_V9(_) => assert!( BatchHeaderV::V7_to_V10(_) => assert!(
matches!(version.fork, ForkName::EuclidV2 | ForkName::Feynman | ForkName::Galileo), matches!(version.fork, ForkName::EuclidV2 | ForkName::Feynman | ForkName::Galileo | ForkName::GalileoV2),
"hardfork mismatch for da-codec@v7/8/9 header: found={}, expected={:?}", "hardfork mismatch for da-codec@v7/8/9/10 header: found={}, expected={:?}",
version.fork, version.fork,
[ForkName::EuclidV2, ForkName::Feynman, ForkName::Galileo], [ForkName::EuclidV2, ForkName::Feynman, ForkName::Galileo, ForkName::GalileoV2],
), ),
} }
@@ -228,23 +231,25 @@ impl BatchProvingTask {
let reference_header = match (version.domain, version.stf_version) { let reference_header = match (version.domain, version.stf_version) {
(Domain::Scroll, STFVersion::V6) => { (Domain::Scroll, STFVersion::V6) => {
ReferenceHeader::V6(*self.batch_header.must_v6_header()) ReferenceHeader::V6(*self.batch_header.to_zkvm_batch_header_v6())
} }
// The da-codec for STF versions v7, v8, v9 is identical. In zkvm-prover we do not // The da-codec for STF versions v7, v8, v9, v10 is identical. In zkvm-prover we do not
// create additional variants to indicate the identical behaviour of codec. Instead we // create additional variants to indicate the identical behaviour of codec. Instead we
// add a separate variant for the STF version. // add a separate variant for the STF version.
// //
// We handle the different STF versions here however build the same batch header since // We handle the different STF versions here however build the same batch header since
// that type does not change. The batch header's version byte constructed in the // that type does not change. The batch header's version byte constructed in the
// coordinator actually defines the STF version (v7, v8 or v9) and we can derive the // coordinator actually defines the STF version (v7, v8 or v9, v10) and we can derive
// hard-fork (feynman or galileo) and the codec from the version byte. // the hard-fork (e.g. feynman or galileo) and the codec from the version
// byte.
// //
// Refer [`scroll_zkvm_types::public_inputs::Version`]. // Refer [`scroll_zkvm_types::public_inputs::Version`].
(Domain::Scroll, STFVersion::V7 | STFVersion::V8 | STFVersion::V9) => { (
ReferenceHeader::V7_V8_V9(*self.batch_header.must_v7_v8_v9_header()) Domain::Scroll,
} STFVersion::V7 | STFVersion::V8 | STFVersion::V9 | STFVersion::V10,
) => ReferenceHeader::V7_V8_V9(*self.batch_header.to_zkvm_batch_header_v7_to_v10()),
(Domain::Validium, STFVersion::V1) => { (Domain::Validium, STFVersion::V1) => {
ReferenceHeader::Validium(*self.batch_header.must_validium_header()) ReferenceHeader::Validium(*self.batch_header.to_zkvm_batch_header_validium())
} }
(domain, stf_version) => { (domain, stf_version) => {
unreachable!("unsupported domain={domain:?},stf-version={stf_version:?}") unreachable!("unsupported domain={domain:?},stf-version={stf_version:?}")

View File

@@ -144,7 +144,6 @@ impl LocalProverConfig {
#[derive(Clone, Serialize, Deserialize)] #[derive(Clone, Serialize, Deserialize)]
pub struct CircuitConfig { pub struct CircuitConfig {
pub hard_fork_name: String,
/// The path to save assets for a specified hard fork phase /// The path to save assets for a specified hard fork phase
pub workspace_path: String, pub workspace_path: String,
#[serde(flatten)] #[serde(flatten)]

View File

@@ -8,7 +8,7 @@ require (
github.com/jmoiron/sqlx v1.3.5 github.com/jmoiron/sqlx v1.3.5
github.com/lib/pq v1.10.9 github.com/lib/pq v1.10.9
github.com/pressly/goose/v3 v3.16.0 github.com/pressly/goose/v3 v3.16.0
github.com/scroll-tech/go-ethereum v1.10.14-0.20251117065849-b5c3dd1aa975 github.com/scroll-tech/go-ethereum v1.10.14-0.20251128092113-8629f088d78f
github.com/stretchr/testify v1.10.0 github.com/stretchr/testify v1.10.0
github.com/urfave/cli/v2 v2.25.7 github.com/urfave/cli/v2 v2.25.7
) )

View File

@@ -121,8 +121,8 @@ github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU
github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/scroll-tech/go-ethereum v1.10.14-0.20251117065849-b5c3dd1aa975 h1:KluZffkRRJ4K9UyvH/r2g8Lp16/NSK8j26MR33hHmoQ= github.com/scroll-tech/go-ethereum v1.10.14-0.20251128092113-8629f088d78f h1:j6SjP98MoWFFX9TwB1/nFYEkayqHQsrtE66Ll2C+oT0=
github.com/scroll-tech/go-ethereum v1.10.14-0.20251117065849-b5c3dd1aa975/go.mod h1:6BVek7YliYh+YeHOSjguPw9GT9BhVBfThArxzVlpqdQ= github.com/scroll-tech/go-ethereum v1.10.14-0.20251128092113-8629f088d78f/go.mod h1:Aa/kD1XB+OV/7rRxMQrjcPCB4b0pKyLH0gsTrtuHi38=
github.com/segmentio/asm v1.2.0 h1:9BQrFxC+YOHJlTlHGkTrFWf59nbL3XnCoFLTwDCI7ys= github.com/segmentio/asm v1.2.0 h1:9BQrFxC+YOHJlTlHGkTrFWf59nbL3XnCoFLTwDCI7ys=
github.com/segmentio/asm v1.2.0/go.mod h1:BqMnlJP91P8d+4ibuonYZw9mfnzI9HfxselHZr5aAcs= github.com/segmentio/asm v1.2.0/go.mod h1:BqMnlJP91P8d+4ibuonYZw9mfnzI9HfxselHZr5aAcs=
github.com/sethvargo/go-retry v0.2.4 h1:T+jHEQy/zKJf5s95UkguisicE0zuF9y7+/vgz08Ocec= github.com/sethvargo/go-retry v0.2.4 h1:T+jHEQy/zKJf5s95UkguisicE0zuF9y7+/vgz08Ocec=

View File

@@ -9,13 +9,14 @@ import (
"github.com/pressly/goose/v3" "github.com/pressly/goose/v3"
) )
//go:embed migrations/*.sql //go:embed migrations
var embedMigrations embed.FS var embedMigrations embed.FS
// MigrationsDir migration dir // MigrationsDir migration dir
const MigrationsDir string = "migrations" const MigrationsDir string = "migrations"
func init() { func init() {
// note goose ignore ono-sql files by default so we do not need to specify *.sql
goose.SetBaseFS(embedMigrations) goose.SetBaseFS(embedMigrations)
goose.SetSequential(true) goose.SetSequential(true)
goose.SetTableName("scroll_migrations") goose.SetTableName("scroll_migrations")
@@ -24,6 +25,41 @@ func init() {
goose.SetVerbose(verbose) goose.SetVerbose(verbose)
} }
// MigrateModule migrate db used by other module with specified goose TableName
// sql file for that module must be put as a sub-directory under `MigrationsDir`
func MigrateModule(db *sql.DB, moduleName string) error {
goose.SetTableName(moduleName + "_migrations")
defer func() {
goose.SetTableName("scroll_migrations")
}()
return goose.Up(db, MigrationsDir+"/"+moduleName, goose.WithAllowMissing())
}
// RollbackModule rollback the specified module to the given version
func RollbackModule(db *sql.DB, moduleName string, version *int64) error {
goose.SetTableName(moduleName + "_migrations")
defer func() {
goose.SetTableName("scroll_migrations")
}()
moduleDir := MigrationsDir + "/" + moduleName
if version != nil {
return goose.DownTo(db, moduleDir, *version)
}
return goose.Down(db, moduleDir)
}
// ResetModuleDB clean and migrate db for a module.
func ResetModuleDB(db *sql.DB, moduleName string) error {
if err := RollbackModule(db, moduleName, new(int64)); err != nil {
return err
}
return MigrateModule(db, moduleName)
}
// Migrate migrate db // Migrate migrate db
func Migrate(db *sql.DB) error { func Migrate(db *sql.DB) error {
//return goose.Up(db, MIGRATIONS_DIR, goose.WithAllowMissing()) //return goose.Up(db, MIGRATIONS_DIR, goose.WithAllowMissing())

View File

@@ -0,0 +1,30 @@
-- +goose Up
-- +goose StatementBegin
create table prover_sessions
(
public_key TEXT NOT NULL,
upstream TEXT NOT NULL,
up_token TEXT NOT NULL,
expired TIMESTAMP(0) NOT NULL,
constraint uk_prover_sessions_public_key_upstream unique (public_key, upstream)
);
create index idx_prover_sessions_expired on prover_sessions (expired);
create table priority_upstream
(
public_key TEXT NOT NULL,
upstream TEXT NOT NULL,
update_time TIMESTAMP(0) NOT NULL DEFAULT now()
);
create unique index idx_priority_upstream_public_key on priority_upstream (public_key);
-- +goose StatementEnd
-- +goose Down
-- +goose StatementBegin
drop index if exists idx_prover_sessions_expired;
drop table if exists prover_sessions;
drop table if exists priority_upstream;
-- +goose StatementEnd

View File

@@ -1413,16 +1413,14 @@ github.com/scroll-tech/da-codec v0.1.3-0.20250609154559-8935de62c148 h1:cyK1ifU2
github.com/scroll-tech/da-codec v0.1.3-0.20250609154559-8935de62c148/go.mod h1:gz5x3CsLy5htNTbv4PWRPBU9nSAujfx1U2XtFcXoFuk= github.com/scroll-tech/da-codec v0.1.3-0.20250609154559-8935de62c148/go.mod h1:gz5x3CsLy5htNTbv4PWRPBU9nSAujfx1U2XtFcXoFuk=
github.com/scroll-tech/da-codec v0.1.3-0.20250626091118-58b899494da6/go.mod h1:Z6kN5u2khPhiqHyk172kGB7o38bH/nj7Ilrb/46wZGg= github.com/scroll-tech/da-codec v0.1.3-0.20250626091118-58b899494da6/go.mod h1:Z6kN5u2khPhiqHyk172kGB7o38bH/nj7Ilrb/46wZGg=
github.com/scroll-tech/da-codec v0.1.3-0.20250825071838-cddc263e5ef6/go.mod h1:Z6kN5u2khPhiqHyk172kGB7o38bH/nj7Ilrb/46wZGg= github.com/scroll-tech/da-codec v0.1.3-0.20250825071838-cddc263e5ef6/go.mod h1:Z6kN5u2khPhiqHyk172kGB7o38bH/nj7Ilrb/46wZGg=
github.com/scroll-tech/da-codec v0.9.0 h1:UvHNdSJuVhi/j9pOH7SXkRck52/zeE8T35Fx6qTPW70=
github.com/scroll-tech/da-codec v0.9.0/go.mod h1:w+vwIvNiWdiNHVE9yIY2Klx6G4s+SQhEJVSmVG/IsEQ=
github.com/scroll-tech/ecies-go/v2 v2.0.10-beta.1/go.mod h1:A+pHaITd+ogBm4Rk35xebF9OPiyMYlFlgqBOiY5PSjg= github.com/scroll-tech/ecies-go/v2 v2.0.10-beta.1/go.mod h1:A+pHaITd+ogBm4Rk35xebF9OPiyMYlFlgqBOiY5PSjg=
github.com/scroll-tech/go-ethereum v1.10.14-0.20240607130425-e2becce6a1a4/go.mod h1:byf/mZ8jLYUCnUePTicjJWn+RvKdxDn7buS6glTnMwQ= github.com/scroll-tech/go-ethereum v1.10.14-0.20240607130425-e2becce6a1a4/go.mod h1:byf/mZ8jLYUCnUePTicjJWn+RvKdxDn7buS6glTnMwQ=
github.com/scroll-tech/go-ethereum v1.10.14-0.20240821074444-b3fa00861e5e/go.mod h1:swB5NSp8pKNDuYsTxfR08bHS6L56i119PBx8fxvV8Cs= github.com/scroll-tech/go-ethereum v1.10.14-0.20240821074444-b3fa00861e5e/go.mod h1:swB5NSp8pKNDuYsTxfR08bHS6L56i119PBx8fxvV8Cs=
github.com/scroll-tech/go-ethereum v1.10.14-0.20241010064814-3d88e870ae22/go.mod h1:r9FwtxCtybMkTbWYCyBuevT9TW3zHmOTHqD082Uh+Oo= github.com/scroll-tech/go-ethereum v1.10.14-0.20241010064814-3d88e870ae22/go.mod h1:r9FwtxCtybMkTbWYCyBuevT9TW3zHmOTHqD082Uh+Oo=
github.com/scroll-tech/go-ethereum v1.10.14-0.20250206083728-ea43834c198f/go.mod h1:Ik3OBLl7cJxPC+CFyCBYNXBPek4wpdzkWehn/y5qLM8= github.com/scroll-tech/go-ethereum v1.10.14-0.20250206083728-ea43834c198f/go.mod h1:Ik3OBLl7cJxPC+CFyCBYNXBPek4wpdzkWehn/y5qLM8=
github.com/scroll-tech/go-ethereum v1.10.14-0.20250225152658-bcfdb48dd939/go.mod h1:AgU8JJxC7+nfs7R7ma35AU7dMAGW7wCw3dRZRefIKyQ= github.com/scroll-tech/go-ethereum v1.10.14-0.20250225152658-bcfdb48dd939/go.mod h1:AgU8JJxC7+nfs7R7ma35AU7dMAGW7wCw3dRZRefIKyQ=
github.com/scroll-tech/go-ethereum v1.10.14-0.20251117071111-47c22325665e h1:7U1/JilCPIqJTYNgKzlfBEkRNORRwJ8+PCmdR8/XK6A= github.com/scroll-tech/go-ethereum v1.10.14-0.20251128092359-25d5bf6b817b h1:pMQKnroJoS/FeL1aOWkz7/u1iBHUP8PWjZstNuzoUGE=
github.com/scroll-tech/go-ethereum v1.10.14-0.20251117071111-47c22325665e/go.mod h1:6BVek7YliYh+YeHOSjguPw9GT9BhVBfThArxzVlpqdQ= github.com/scroll-tech/go-ethereum v1.10.14-0.20251128092359-25d5bf6b817b/go.mod h1:Aa/kD1XB+OV/7rRxMQrjcPCB4b0pKyLH0gsTrtuHi38=
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I=
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
github.com/segmentio/kafka-go v0.1.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo= github.com/segmentio/kafka-go v0.1.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo=

View File

@@ -58,7 +58,8 @@
"min_batches": 1, "min_batches": 1,
"max_batches": 6, "max_batches": 6,
"timeout": 7200, "timeout": 7200,
"backlog_max": 75 "backlog_max": 75,
"blob_fee_tolerance": 500000000
}, },
"gas_oracle_config": { "gas_oracle_config": {
"min_gas_price": 0, "min_gas_price": 0,

View File

@@ -15,8 +15,8 @@ require (
github.com/holiman/uint256 v1.3.2 github.com/holiman/uint256 v1.3.2
github.com/mitchellh/mapstructure v1.5.0 github.com/mitchellh/mapstructure v1.5.0
github.com/prometheus/client_golang v1.16.0 github.com/prometheus/client_golang v1.16.0
github.com/scroll-tech/da-codec v0.9.0 github.com/scroll-tech/da-codec v0.10.0
github.com/scroll-tech/go-ethereum v1.10.14-0.20251117065849-b5c3dd1aa975 github.com/scroll-tech/go-ethereum v1.10.14-0.20251128092113-8629f088d78f
github.com/smartystreets/goconvey v1.8.0 github.com/smartystreets/goconvey v1.8.0
github.com/spf13/viper v1.19.0 github.com/spf13/viper v1.19.0
github.com/stretchr/testify v1.10.0 github.com/stretchr/testify v1.10.0

View File

@@ -287,10 +287,10 @@ github.com/sagikazarmark/locafero v0.4.0 h1:HApY1R9zGo4DBgr7dqsTH/JJxLTTsOt7u6ke
github.com/sagikazarmark/locafero v0.4.0/go.mod h1:Pe1W6UlPYUk/+wc/6KFhbORCfqzgYEpgQ3O5fPuL3H4= github.com/sagikazarmark/locafero v0.4.0/go.mod h1:Pe1W6UlPYUk/+wc/6KFhbORCfqzgYEpgQ3O5fPuL3H4=
github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE= github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE=
github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ= github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ=
github.com/scroll-tech/da-codec v0.9.0 h1:UvHNdSJuVhi/j9pOH7SXkRck52/zeE8T35Fx6qTPW70= github.com/scroll-tech/da-codec v0.10.0 h1:IPHxyTyXTWPV0Q+DZ08cod2fWkhUvrfysmj/VBpB+WU=
github.com/scroll-tech/da-codec v0.9.0/go.mod h1:w+vwIvNiWdiNHVE9yIY2Klx6G4s+SQhEJVSmVG/IsEQ= github.com/scroll-tech/da-codec v0.10.0/go.mod h1:MBlIP4wCXPcUDZ/Ci2B7n/2IbVU1WBo9OTFTZ5ffE0U=
github.com/scroll-tech/go-ethereum v1.10.14-0.20251117065849-b5c3dd1aa975 h1:KluZffkRRJ4K9UyvH/r2g8Lp16/NSK8j26MR33hHmoQ= github.com/scroll-tech/go-ethereum v1.10.14-0.20251128092113-8629f088d78f h1:j6SjP98MoWFFX9TwB1/nFYEkayqHQsrtE66Ll2C+oT0=
github.com/scroll-tech/go-ethereum v1.10.14-0.20251117065849-b5c3dd1aa975/go.mod h1:6BVek7YliYh+YeHOSjguPw9GT9BhVBfThArxzVlpqdQ= github.com/scroll-tech/go-ethereum v1.10.14-0.20251128092113-8629f088d78f/go.mod h1:Aa/kD1XB+OV/7rRxMQrjcPCB4b0pKyLH0gsTrtuHi38=
github.com/scroll-tech/zktrie v0.8.4 h1:UagmnZ4Z3ITCk+aUq9NQZJNAwnWl4gSxsLb2Nl7IgRE= github.com/scroll-tech/zktrie v0.8.4 h1:UagmnZ4Z3ITCk+aUq9NQZJNAwnWl4gSxsLb2Nl7IgRE=
github.com/scroll-tech/zktrie v0.8.4/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk= github.com/scroll-tech/zktrie v0.8.4/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKlUeu/erjjvaPEYiI= github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKlUeu/erjjvaPEYiI=

View File

@@ -48,6 +48,10 @@ type BatchSubmission struct {
TimeoutSec int64 `json:"timeout"` TimeoutSec int64 `json:"timeout"`
// The maximum number of pending batches to keep in the backlog. // The maximum number of pending batches to keep in the backlog.
BacklogMax int64 `json:"backlog_max"` BacklogMax int64 `json:"backlog_max"`
// BlobFeeTolerance is the absolute tolerance (in wei) added to the target blob fee.
// If the current fee is below target + tolerance, we proceed with submission.
// This prevents skipping submission when the price difference is negligible.
BlobFeeTolerance uint64 `json:"blob_fee_tolerance"`
} }
// ChainMonitor this config is used to get batch status from chain_monitor API. // ChainMonitor this config is used to get batch status from chain_monitor API.

View File

@@ -167,7 +167,7 @@ func (b *BlobUploader) constructBlobCodec(dbBatch *orm.Batch) (*kzg4844.Blob, er
Chunks: chunks, Chunks: chunks,
} }
case encoding.CodecV7, encoding.CodecV8, encoding.CodecV9: case encoding.CodecV7, encoding.CodecV8, encoding.CodecV9, encoding.CodecV10:
encodingBatch = &encoding.Batch{ encodingBatch = &encoding.Batch{
Index: dbBatch.Index, Index: dbBatch.Index,
ParentBatchHash: common.HexToHash(dbBatch.ParentBatchHash), ParentBatchHash: common.HexToHash(dbBatch.ParentBatchHash),

View File

@@ -452,6 +452,7 @@ func (r *Layer2Relayer) ProcessPendingBatches() {
// The next call of ProcessPendingBatches will then start with the batch with the different codec version. // The next call of ProcessPendingBatches will then start with the batch with the different codec version.
batchesToSubmitLen := len(batchesToSubmit) batchesToSubmitLen := len(batchesToSubmit)
if batchesToSubmitLen > 0 && batchesToSubmit[batchesToSubmitLen-1].Batch.CodecVersion != dbBatch.CodecVersion { if batchesToSubmitLen > 0 && batchesToSubmit[batchesToSubmitLen-1].Batch.CodecVersion != dbBatch.CodecVersion {
forceSubmit = true
break break
} }
@@ -488,7 +489,7 @@ func (r *Layer2Relayer) ProcessPendingBatches() {
codecVersion := encoding.CodecVersion(firstBatch.CodecVersion) codecVersion := encoding.CodecVersion(firstBatch.CodecVersion)
switch codecVersion { switch codecVersion {
case encoding.CodecV7, encoding.CodecV8, encoding.CodecV9: case encoding.CodecV7, encoding.CodecV8, encoding.CodecV9, encoding.CodecV10:
if r.cfg.ValidiumMode { if r.cfg.ValidiumMode {
if len(batchesToSubmit) != 1 { if len(batchesToSubmit) != 1 {
log.Error("validium mode only supports committing one batch at a time", "codecVersion", codecVersion, "start index", firstBatch.Index, "end index", lastBatch.Index, "batches count", len(batchesToSubmit)) log.Error("validium mode only supports committing one batch at a time", "codecVersion", codecVersion, "start index", firstBatch.Index, "end index", lastBatch.Index, "batches count", len(batchesToSubmit))
@@ -747,7 +748,7 @@ func (r *Layer2Relayer) finalizeBundle(bundle *orm.Bundle, withProof bool) error
var calldata []byte var calldata []byte
switch encoding.CodecVersion(bundle.CodecVersion) { switch encoding.CodecVersion(bundle.CodecVersion) {
case encoding.CodecV7, encoding.CodecV8, encoding.CodecV9: case encoding.CodecV7, encoding.CodecV8, encoding.CodecV9, encoding.CodecV10:
if r.cfg.ValidiumMode { if r.cfg.ValidiumMode {
calldata, err = r.constructFinalizeBundlePayloadValidium(dbBatch, endChunk, aggProof) calldata, err = r.constructFinalizeBundlePayloadValidium(dbBatch, endChunk, aggProof)
if err != nil { if err != nil {
@@ -1050,7 +1051,7 @@ func (r *Layer2Relayer) constructCommitBatchPayloadValidium(batch *dbBatchWithCh
commitment := common.HexToHash(lastChunk.EndBlockHash) commitment := common.HexToHash(lastChunk.EndBlockHash)
var version uint8 var version uint8
if encoding.CodecVersion(batch.Batch.CodecVersion) == encoding.CodecV8 || encoding.CodecVersion(batch.Batch.CodecVersion) == encoding.CodecV9 { if encoding.CodecVersion(batch.Batch.CodecVersion) == encoding.CodecV8 || encoding.CodecVersion(batch.Batch.CodecVersion) == encoding.CodecV9 || encoding.CodecVersion(batch.Batch.CodecVersion) == encoding.CodecV10 {
// Validium version line starts with v1, // Validium version line starts with v1,
// but rollup-relayer behavior follows v8. // but rollup-relayer behavior follows v8.
version = 1 version = 1
@@ -1254,16 +1255,20 @@ func (r *Layer2Relayer) skipSubmitByFee(oldest time.Time, metrics *l2RelayerMetr
target := calculateTargetPrice(windowSec, r.batchStrategy, oldest, hist) target := calculateTargetPrice(windowSec, r.batchStrategy, oldest, hist)
current := hist[len(hist)-1] current := hist[len(hist)-1]
// apply absolute tolerance offset to target
tolerance := new(big.Int).SetUint64(r.cfg.BatchSubmission.BlobFeeTolerance)
threshold := new(big.Int).Add(target, tolerance)
currentFloat, _ := current.Float64() currentFloat, _ := current.Float64()
targetFloat, _ := target.Float64() targetFloat, _ := target.Float64()
metrics.rollupL2RelayerCurrentBlobPrice.Set(currentFloat) metrics.rollupL2RelayerCurrentBlobPrice.Set(currentFloat)
metrics.rollupL2RelayerTargetBlobPrice.Set(targetFloat) metrics.rollupL2RelayerTargetBlobPrice.Set(targetFloat)
// if current fee > target and still inside the timeout window, skip // if current fee > threshold (target + tolerance) and still inside the timeout window, skip
if current.Cmp(target) > 0 && time.Since(oldest) < time.Duration(windowSec)*time.Second { if current.Cmp(threshold) > 0 && time.Since(oldest) < time.Duration(windowSec)*time.Second {
return true, fmt.Errorf( return true, fmt.Errorf(
"blob-fee above target & window not yet passed; current=%s target=%s age=%s", "blob-fee above threshold & window not yet passed; current=%s target=%s threshold=%s tolerance=%s age=%s",
current.String(), target.String(), time.Since(oldest), current.String(), target.String(), threshold.String(), tolerance.String(), time.Since(oldest),
) )
} }

View File

@@ -186,7 +186,7 @@ func encodeBatchHeaderValidium(b *encoding.Batch, codecVersion encoding.CodecVer
) )
var version uint8 var version uint8
if codecVersion == encoding.CodecV8 || codecVersion == encoding.CodecV9 { if codecVersion == encoding.CodecV8 || codecVersion == encoding.CodecV9 || codecVersion == encoding.CodecV10 {
// Validium version line starts with v1, // Validium version line starts with v1,
// but rollup-relayer behavior follows v8. // but rollup-relayer behavior follows v8.
version = 1 version = 1

View File

@@ -0,0 +1,84 @@
package main
import (
"context"
"fmt"
"math/big"
"github.com/scroll-tech/da-codec/encoding"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/log"
"github.com/scroll-tech/go-ethereum/rpc"
)
func fetchAndStoreBlocks(ctx context.Context, from, to uint64) ([]*encoding.Block, error) {
validiumMode := cfg.ValidiumMode
cfg := cfg.FetchConfig
client, err := rpc.Dial(cfg.Endpoint)
if err != nil {
return nil, fmt.Errorf("failed to connect l2 geth, endpoint %s, err %v", cfg.Endpoint, err)
}
defer client.Close()
ethCli := ethclient.NewClient(client)
var blocks []*encoding.Block
for number := from; number <= to; number++ {
log.Debug("retrieving block", "height", number)
block, err := ethCli.BlockByNumber(ctx, new(big.Int).SetUint64(number))
if err != nil {
return nil, fmt.Errorf("failed to BlockByNumber: %v. number: %v", err, number)
}
blockTxs := block.Transactions()
var count int
for _, tx := range blockTxs {
if tx.IsL1MessageTx() {
count++
}
}
log.Info("retrieved block", "height", block.Header().Number, "hash", block.Header().Hash().String(), "L1 message count", count)
// use original (encrypted) L1 message txs in validium mode
if validiumMode {
var txs []*types.Transaction
if count > 0 {
log.Info("Fetching encrypted messages in validium mode")
err = client.CallContext(ctx, &txs, "scroll_getL1MessagesInBlock", block.Hash(), "synced")
if err != nil {
return nil, fmt.Errorf("failed to get L1 messages: %v, block hash: %v", err, block.Hash().Hex())
}
}
// sanity check
if len(txs) != count {
return nil, fmt.Errorf("L1 message count mismatch: expected %d, got %d", count, len(txs))
}
for ii := 0; ii < count; ii++ {
// sanity check
if blockTxs[ii].AsL1MessageTx().QueueIndex != txs[ii].AsL1MessageTx().QueueIndex {
return nil, fmt.Errorf("L1 message queue index mismatch at index %d: expected %d, got %d", ii, blockTxs[ii].AsL1MessageTx().QueueIndex, txs[ii].AsL1MessageTx().QueueIndex)
}
log.Info("Replacing L1 message tx in validium mode", "index", ii, "queueIndex", txs[ii].AsL1MessageTx().QueueIndex, "decryptedTxHash", blockTxs[ii].Hash().Hex(), "originalTxHash", txs[ii].Hash().Hex())
blockTxs[ii] = txs[ii]
}
}
withdrawRoot, err3 := ethCli.StorageAt(ctx, cfg.L2MessageQueueAddress, cfg.WithdrawTrieRootSlot, big.NewInt(int64(number)))
if err3 != nil {
return nil, fmt.Errorf("failed to get withdrawRoot: %v. number: %v", err3, number)
}
blocks = append(blocks, &encoding.Block{
Header: block.Header(),
Transactions: encoding.TxsToTxsData(blockTxs),
WithdrawRoot: common.BytesToHash(withdrawRoot),
})
}
return blocks, nil
}

View File

@@ -42,13 +42,21 @@ func randomPickKfromN(n, k int, rng *rand.Rand) []int {
return ret return ret
} }
func importData(ctx context.Context, beginBlk, endBlk uint64, chkNum, batchNum, bundleNum int, seed int64) (*importRecord, error) { func importData(ctx context.Context, beginBlk, endBlk uint64, blocks []*encoding.Block, chkNum, batchNum, bundleNum int, seed int64) (*importRecord, error) {
db, err := database.InitDB(cfg.DBConfig) db, err := database.InitDB(cfg.DBConfig)
if err != nil { if err != nil {
return nil, err return nil, err
} }
if len(blocks) > 0 {
log.Info("import block")
blockOrm := orm.NewL2Block(db)
if err := blockOrm.InsertL2Blocks(ctx, blocks); err != nil {
return nil, err
}
}
ret := &importRecord{} ret := &importRecord{}
// Create a new random source with the provided seed // Create a new random source with the provided seed
source := rand.NewSource(seed) source := rand.NewSource(seed)

View File

@@ -10,6 +10,7 @@ import (
"strings" "strings"
"github.com/scroll-tech/da-codec/encoding" "github.com/scroll-tech/da-codec/encoding"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/log" "github.com/scroll-tech/go-ethereum/log"
"github.com/urfave/cli/v2" "github.com/urfave/cli/v2"
@@ -40,12 +41,6 @@ var seedFlag = cli.Int64Flag{
Value: 0, Value: 0,
} }
var codecFlag = cli.IntFlag{
Name: "codec",
Usage: "codec version, valid from 6, default(auto) is 0",
Value: 0,
}
func parseThreeIntegers(value string) (int, int, int, error) { func parseThreeIntegers(value string) (int, int, int, error) {
// Split the input string by comma // Split the input string by comma
parts := strings.Split(value, ",") parts := strings.Split(value, ",")
@@ -84,10 +79,21 @@ func parseThreeIntegers(value string) (int, int, int, error) {
return values[0], values[1], values[2], nil return values[0], values[1], values[2], nil
} }
type fetchConfig struct {
// node url.
Endpoint string `json:"endpoint"`
// The L2MessageQueue contract address deployed on layer 2 chain.
L2MessageQueueAddress common.Address `json:"l2_message_queue_address"`
// The WithdrawTrieRootSlot in L2MessageQueue contract.
WithdrawTrieRootSlot common.Hash `json:"withdraw_trie_root_slot,omitempty"`
}
// load a comptabile type of config for rollup // load a comptabile type of config for rollup
type config struct { type config struct {
DBConfig *database.Config `json:"db_config"` DBConfig *database.Config `json:"db_config"`
FetchConfig *fetchConfig `json:"fetch_config,omitempty"`
ValidiumMode bool `json:"validium_mode"` ValidiumMode bool `json:"validium_mode"`
CodecVersion int `json:"codec_version"`
} }
func init() { func init() {
@@ -97,7 +103,7 @@ func init() {
app.Name = "integration-test-tool" app.Name = "integration-test-tool"
app.Usage = "The Scroll L2 Integration Test Tool" app.Usage = "The Scroll L2 Integration Test Tool"
app.Version = version.Version app.Version = version.Version
app.Flags = append(app.Flags, &codecFlag, &seedFlag, &outputNumFlag, &outputPathFlag) app.Flags = append(app.Flags, &seedFlag, &outputNumFlag, &outputPathFlag)
app.Flags = append(app.Flags, utils.CommonFlags...) app.Flags = append(app.Flags, utils.CommonFlags...)
app.Before = func(ctx *cli.Context) error { app.Before = func(ctx *cli.Context) error {
if err := utils.LogSetup(ctx); err != nil { if err := utils.LogSetup(ctx); err != nil {
@@ -120,13 +126,13 @@ func newConfig(file string) (*config, error) {
return nil, err return nil, err
} }
cfg := &config{} loadCfg := &config{}
err = json.Unmarshal(buf, cfg) err = json.Unmarshal(buf, loadCfg)
if err != nil { if err != nil {
return nil, err return nil, err
} }
return cfg, nil return loadCfg, nil
} }
func action(ctx *cli.Context) error { func action(ctx *cli.Context) error {
@@ -135,9 +141,8 @@ func action(ctx *cli.Context) error {
return fmt.Errorf("specify begin and end block number") return fmt.Errorf("specify begin and end block number")
} }
codecFl := ctx.Int(codecFlag.Name) if cfg.CodecVersion != 0 {
if codecFl != 0 { switch cfg.CodecVersion {
switch codecFl {
case 6: case 6:
codecCfg = encoding.CodecV6 codecCfg = encoding.CodecV6
case 7: case 7:
@@ -146,8 +151,10 @@ func action(ctx *cli.Context) error {
codecCfg = encoding.CodecV8 codecCfg = encoding.CodecV8
case 9: case 9:
codecCfg = encoding.CodecV9 codecCfg = encoding.CodecV9
case 10:
codecCfg = encoding.CodecV10
default: default:
return fmt.Errorf("invalid codec version %d", codecFl) return fmt.Errorf("invalid codec version %d", cfg.CodecVersion)
} }
log.Info("set codec", "version", codecCfg) log.Info("set codec", "version", codecCfg)
} }
@@ -161,6 +168,14 @@ func action(ctx *cli.Context) error {
return fmt.Errorf("invalid begin block number: %w", err) return fmt.Errorf("invalid begin block number: %w", err)
} }
var import_blocks []*encoding.Block
if cfg.FetchConfig != nil {
import_blocks, err = fetchAndStoreBlocks(ctx.Context, beginBlk, endBlk)
if err != nil {
return err
}
}
chkNum, batchNum, bundleNum, err := parseThreeIntegers(ctx.String(outputNumFlag.Name)) chkNum, batchNum, bundleNum, err := parseThreeIntegers(ctx.String(outputNumFlag.Name))
if err != nil { if err != nil {
return err return err
@@ -174,7 +189,7 @@ func action(ctx *cli.Context) error {
outputPath := ctx.String(outputPathFlag.Name) outputPath := ctx.String(outputPathFlag.Name)
log.Info("output", "Seed", seed, "file", outputPath) log.Info("output", "Seed", seed, "file", outputPath)
ret, err := importData(ctx.Context, beginBlk, endBlk, chkNum, batchNum, bundleNum, seed) ret, err := importData(ctx.Context, beginBlk, endBlk, import_blocks, chkNum, batchNum, bundleNum, seed)
if err != nil { if err != nil {
return err return err
} }

View File

@@ -5,8 +5,8 @@ go 1.22
toolchain go1.22.2 toolchain go1.22.2
require ( require (
github.com/scroll-tech/da-codec v0.9.0 github.com/scroll-tech/da-codec v0.10.0
github.com/scroll-tech/go-ethereum v1.10.14-0.20251117065849-b5c3dd1aa975 github.com/scroll-tech/go-ethereum v1.10.14-0.20251128092113-8629f088d78f
github.com/stretchr/testify v1.10.0 github.com/stretchr/testify v1.10.0
gorm.io/gorm v1.25.7-0.20240204074919-46816ad31dde gorm.io/gorm v1.25.7-0.20240204074919-46816ad31dde
) )

View File

@@ -93,10 +93,10 @@ github.com/rjeczalik/notify v0.9.1 h1:CLCKso/QK1snAlnhNR/CNvNiFU2saUtjV0bx3EwNeC
github.com/rjeczalik/notify v0.9.1/go.mod h1:rKwnCoCGeuQnwBtTSPL9Dad03Vh2n40ePRrjvIXnJho= github.com/rjeczalik/notify v0.9.1/go.mod h1:rKwnCoCGeuQnwBtTSPL9Dad03Vh2n40ePRrjvIXnJho=
github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
github.com/scroll-tech/da-codec v0.9.0 h1:UvHNdSJuVhi/j9pOH7SXkRck52/zeE8T35Fx6qTPW70= github.com/scroll-tech/da-codec v0.10.0 h1:IPHxyTyXTWPV0Q+DZ08cod2fWkhUvrfysmj/VBpB+WU=
github.com/scroll-tech/da-codec v0.9.0/go.mod h1:w+vwIvNiWdiNHVE9yIY2Klx6G4s+SQhEJVSmVG/IsEQ= github.com/scroll-tech/da-codec v0.10.0/go.mod h1:MBlIP4wCXPcUDZ/Ci2B7n/2IbVU1WBo9OTFTZ5ffE0U=
github.com/scroll-tech/go-ethereum v1.10.14-0.20251117065849-b5c3dd1aa975 h1:KluZffkRRJ4K9UyvH/r2g8Lp16/NSK8j26MR33hHmoQ= github.com/scroll-tech/go-ethereum v1.10.14-0.20251128092113-8629f088d78f h1:j6SjP98MoWFFX9TwB1/nFYEkayqHQsrtE66Ll2C+oT0=
github.com/scroll-tech/go-ethereum v1.10.14-0.20251117065849-b5c3dd1aa975/go.mod h1:6BVek7YliYh+YeHOSjguPw9GT9BhVBfThArxzVlpqdQ= github.com/scroll-tech/go-ethereum v1.10.14-0.20251128092113-8629f088d78f/go.mod h1:Aa/kD1XB+OV/7rRxMQrjcPCB4b0pKyLH0gsTrtuHi38=
github.com/scroll-tech/zktrie v0.8.4 h1:UagmnZ4Z3ITCk+aUq9NQZJNAwnWl4gSxsLb2Nl7IgRE= github.com/scroll-tech/zktrie v0.8.4 h1:UagmnZ4Z3ITCk+aUq9NQZJNAwnWl4gSxsLb2Nl7IgRE=
github.com/scroll-tech/zktrie v0.8.4/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk= github.com/scroll-tech/zktrie v0.8.4/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKlUeu/erjjvaPEYiI= github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKlUeu/erjjvaPEYiI=

View File

@@ -9,6 +9,9 @@ ifndef END_BLOCK
$(error END_BLOCK is not set. Define it in .make.env or pass END_BLOCK=<end_block>) $(error END_BLOCK is not set. Define it in .make.env or pass END_BLOCK=<end_block>)
endif endif
BLOCK_PRE_MIGRATIONS := $(wildcard conf/*.sql)
.OPTIONAL: $(BLOCK_PRE_MIGRATIONS)
all: setup_db test_tool import_data all: setup_db test_tool import_data
clean: clean:
@@ -25,6 +28,11 @@ check_vars: | conf
exit 1; \ exit 1; \
fi fi
migration_blocks: $(BLOCK_PRE_MIGRATIONS)
ifneq ($(strip $(BLOCK_PRE_MIGRATIONS)),)
GOOSE_MIGRATION_DIR=conf ${GOOSE_CMD} up-to 100
endif
setup_db: clean setup_db: clean
docker compose up --detach docker compose up --detach
@echo "Waiting for PostgreSQL to be ready..." @echo "Waiting for PostgreSQL to be ready..."
@@ -42,30 +50,18 @@ setup_db: clean
fi; \ fi; \
done done
${GOOSE_CMD} up ${GOOSE_CMD} up
GOOSE_MIGRATION_DIR=conf ${GOOSE_CMD} up-to 100
reset_db: reset_db:
GOOSE_MIGRATION_DIR=conf ${GOOSE_CMD} down
${GOOSE_CMD} down-to 0 ${GOOSE_CMD} down-to 0
${GOOSE_CMD} up ${GOOSE_CMD} up
GOOSE_MIGRATION_DIR=conf ${GOOSE_CMD} up-to 100
test_tool: test_tool:
go build -o $(PWD)/build/bin/e2e_tool ../../rollup/tests/integration_tool go build -o $(PWD)/build/bin/e2e_tool ../../rollup/tests/integration_tool
build/bin/e2e_tool: test_tool build/bin/e2e_tool: test_tool
import_data_euclid: build/bin/e2e_tool check_vars import_data: build/bin/e2e_tool check_vars migration_blocks
build/bin/e2e_tool --config conf/config.json --codec 7 ${BEGIN_BLOCK} ${END_BLOCK} build/bin/e2e_tool --config conf/config.json ${BEGIN_BLOCK} ${END_BLOCK}
import_data_feynman: build/bin/e2e_tool check_vars
build/bin/e2e_tool --config conf/config.json --codec 8 ${BEGIN_BLOCK} ${END_BLOCK}
import_data_galileo: build/bin/e2e_tool check_vars
build/bin/e2e_tool --config conf/config.json --codec 9 ${BEGIN_BLOCK} ${END_BLOCK}
import_data: build/bin/e2e_tool check_vars
build/bin/e2e_tool --config conf/config.json --codec ${CODEC_VERSION} ${BEGIN_BLOCK} ${END_BLOCK}
reimport_data: reset_db import_data reimport_data: reset_db import_data

View File

@@ -1,4 +1,3 @@
BEGIN_BLOCK?=35 BEGIN_BLOCK?=35
END_BLOCK?=49 END_BLOCK?=49
CODEC_VERSION?=8
SCROLL_FORK_NAME=feynman SCROLL_FORK_NAME=feynman

View File

@@ -5,5 +5,6 @@
"maxOpenNum": 5, "maxOpenNum": 5,
"maxIdleNum": 1 "maxIdleNum": 1
}, },
"validium_mode": true "validium_mode": true,
"codec_version": 8
} }

File diff suppressed because one or more lines are too long

View File

@@ -25,7 +25,7 @@ SELECT 'INSERT INTO l2_block (number, hash, parent_hash, header, withdraw_root,
quote_literal(transactions) || quote_literal(transactions) ||
');' ');'
FROM l2_block FROM l2_block
WHERE number >= 20278000 and number <= 20278050 WHERE number >= 15206780 and number <= 15206809
ORDER BY number ASC; ORDER BY number ASC;
-- Write footer -- Write footer

View File

@@ -1,4 +1,3 @@
BEGIN_BLOCK?=10973711 BEGIN_BLOCK?=10973711
END_BLOCK?=10973721 END_BLOCK?=10973721
CODEC_VERSION?=8
SCROLL_FORK_NAME=feynman SCROLL_FORK_NAME=feynman

View File

@@ -5,5 +5,6 @@
"maxOpenNum": 5, "maxOpenNum": 5,
"maxIdleNum": 1 "maxIdleNum": 1
}, },
"validium_mode": false "validium_mode": false,
"codec_version": 8
} }

View File

@@ -1,4 +1,3 @@
BEGIN_BLOCK?=20278022 BEGIN_BLOCK?=15206785
END_BLOCK?=20278025 END_BLOCK?=15206794
CODEC_VERSION?=9
SCROLL_FORK_NAME=galileo SCROLL_FORK_NAME=galileo

File diff suppressed because one or more lines are too long

View File

@@ -5,5 +5,6 @@
"maxOpenNum": 5, "maxOpenNum": 5,
"maxIdleNum": 1 "maxIdleNum": 1
}, },
"validium_mode": false "validium_mode": false,
"codec_version": 9
} }

View File

@@ -0,0 +1,3 @@
BEGIN_BLOCK?=20239245
END_BLOCK?=20239250
SCROLL_FORK_NAME=galileoV2

View File

@@ -0,0 +1,15 @@
{
"db_config": {
"driver_name": "postgres",
"dsn": "postgres://dev:dev@localhost:5432/scroll?sslmode=disable",
"maxOpenNum": 5,
"maxIdleNum": 1
},
"fetch_config": {
"endpoint": "http://l2-sequencer-galileo-6.devnet.scroll.tech:8545",
"l2_message_queue_address": "0x5300000000000000000000000000000000000000"
},
"validium_mode": false,
"codec_version": 10
}

View File

@@ -0,0 +1,40 @@
{
"prover_manager": {
"provers_per_session": 1,
"session_attempts": 5,
"external_prover_threshold": 32,
"bundle_collection_time_sec": 180,
"batch_collection_time_sec": 180,
"chunk_collection_time_sec": 180,
"verifier": {
"min_prover_version": "v4.4.33",
"verifiers": [
{
"assets_path": "assets",
"fork_name": "galileoV2"
}
]
}
},
"db": {
"driver_name": "postgres",
"dsn": "postgres://dev:dev@localhost/scroll?sslmode=disable",
"maxOpenNum": 200,
"maxIdleNum": 20
},
"l2": {
"validium_mode": false,
"chain_id": 534351,
"l2geth": {
"endpoint": "<serach for a public rpc endpoint like alchemy>"
}
},
"auth": {
"secret": "prover secret key",
"challenge_expire_duration_sec": 3600,
"login_expire_duration_sec": 3600
},
"sequencer": {
"decryption_key": "not need"
}
}

File diff suppressed because one or more lines are too long

View File

@@ -62,4 +62,6 @@ test_run:
test_e2e_run: ${E2E_HANDLE_SET} test_e2e_run: ${E2E_HANDLE_SET}
GO_TAG=${GO_TAG} GIT_REV=${GIT_REV} ZK_VERSION=${ZK_VERSION} cargo run --release -p prover -- --config ./config.json handle ${E2E_HANDLE_SET} GO_TAG=${GO_TAG} GIT_REV=${GIT_REV} ZK_VERSION=${ZK_VERSION} cargo run --release -p prover -- --config ./config.json handle ${E2E_HANDLE_SET}
test_e2e_run_gpu: ${E2E_HANDLE_SET}
GO_TAG=${GO_TAG} GIT_REV=${GIT_REV} ZK_VERSION=${ZK_VERSION} cargo run --release --features cuda -p prover -- --config ./config.json handle ${E2E_HANDLE_SET}

View File

@@ -21,13 +21,15 @@
}, },
"circuits": { "circuits": {
"feynman": { "feynman": {
"hard_fork_name": "feynman", "workspace_path": ".work/feynman",
"base_url": "https://circuit-release.s3.us-west-2.amazonaws.com/scroll-zkvm/releases/feynman/", "base_url": "https://circuit-release.s3.us-west-2.amazonaws.com/scroll-zkvm/releases/feynman/"
"workspace_path": ".work/feynman"
}, },
"galileo": { "galileo": {
"hard_fork_name": "galileo", "workspace_path": ".work/galileo",
"base_url": "https://circuit-release.s3.us-west-2.amazonaws.com/scroll-zkvm/releases/galileo/", "base_url": "https://circuit-release.s3.us-west-2.amazonaws.com/scroll-zkvm/releases/galileo/"
},
"galileoV2": {
"base_url": "https://circuit-release.s3.us-west-2.amazonaws.com/scroll-zkvm/releases/galileov2/",
"workspace_path": ".work/galileo" "workspace_path": ".work/galileo"
} }
} }