mirror of
https://github.com/scroll-tech/scroll.git
synced 2026-01-11 15:08:09 -05:00
Compare commits
70 Commits
v4.7.4
...
coordinato
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
14e2633ba3 | ||
|
|
7de388ef1a | ||
|
|
21326c25e6 | ||
|
|
27dd62eac3 | ||
|
|
22479a7952 | ||
|
|
690bc01c41 | ||
|
|
e75d6c16a9 | ||
|
|
752e4e1117 | ||
|
|
2ecc42e2f5 | ||
|
|
9c2bc02f64 | ||
|
|
9e5579c4cb | ||
|
|
ac4a72003c | ||
|
|
19447984bd | ||
|
|
d66d705456 | ||
|
|
c938d6c25e | ||
|
|
cf9e3680c0 | ||
|
|
e9470ff7a5 | ||
|
|
51b1e79b31 | ||
|
|
c22d9ecad1 | ||
|
|
e7551650b2 | ||
|
|
20fde41be8 | ||
|
|
4df1dd8acd | ||
|
|
6696aac16a | ||
|
|
4b79e63c9b | ||
|
|
ac0396db3c | ||
|
|
17e6c5b7ac | ||
|
|
b6e33456fa | ||
|
|
7572bf8923 | ||
|
|
5d41788b07 | ||
|
|
8f8a537fba | ||
|
|
b1c3a4ecc0 | ||
|
|
d9a29cddce | ||
|
|
c992157eb4 | ||
|
|
404c664e10 | ||
|
|
8a15836d20 | ||
|
|
4365aafa9a | ||
|
|
6ee026fa16 | ||
|
|
c79ad57fb7 | ||
|
|
fa5b113248 | ||
|
|
884b050866 | ||
|
|
1d9fa41535 | ||
|
|
b7f23c6734 | ||
|
|
057e22072c | ||
|
|
c7b83a0784 | ||
|
|
92ca7a6b76 | ||
|
|
256c90af6f | ||
|
|
50f3e1a97c | ||
|
|
2721503657 | ||
|
|
a04b64df03 | ||
|
|
78dbe6cde1 | ||
|
|
9df6429d98 | ||
|
|
e6be62f633 | ||
|
|
c72ee5d679 | ||
|
|
4725d8a73c | ||
|
|
322766f54f | ||
|
|
5614ec3b86 | ||
|
|
5a07a1652b | ||
|
|
64ef0f4ec0 | ||
|
|
321dd43af8 | ||
|
|
624a7a29b8 | ||
|
|
4f878d9231 | ||
|
|
7b3a65b35b | ||
|
|
0d238d77a6 | ||
|
|
76ecdf064a | ||
|
|
5c6c225f76 | ||
|
|
3adb2e0a1b | ||
|
|
412ad56a64 | ||
|
|
9796d16f6c | ||
|
|
1f2b857671 | ||
|
|
5dbb5c5fb7 |
43
.github/workflows/docker.yml
vendored
43
.github/workflows/docker.yml
vendored
@@ -360,6 +360,49 @@ jobs:
|
||||
scrolltech/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
|
||||
coordinator-proxy:
|
||||
runs-on:
|
||||
group: scroll-reth-runner-group
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- name: Configure AWS credentials
|
||||
uses: aws-actions/configure-aws-credentials@v4
|
||||
with:
|
||||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
aws-region: ${{ env.AWS_REGION }}
|
||||
- name: Login to Amazon ECR
|
||||
id: login-ecr
|
||||
uses: aws-actions/amazon-ecr-login@v2
|
||||
- name: check repo and create it if not exist
|
||||
env:
|
||||
REPOSITORY: coordinator-proxy
|
||||
run: |
|
||||
aws --region ${{ env.AWS_REGION }} ecr describe-repositories --repository-names ${{ env.REPOSITORY }} && : || aws --region ${{ env.AWS_REGION }} ecr create-repository --repository-name ${{ env.REPOSITORY }}
|
||||
- name: Build and push
|
||||
uses: docker/build-push-action@v3
|
||||
env:
|
||||
ECR_REGISTRY: ${{ steps.login-ecr.outputs.registry }}
|
||||
REPOSITORY: coordinator-proxy
|
||||
IMAGE_TAG: ${{ github.ref_name }}
|
||||
with:
|
||||
context: .
|
||||
file: ./build/dockerfiles/coordinator-proxy.Dockerfile
|
||||
push: true
|
||||
tags: |
|
||||
scrolltech/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
|
||||
coordinator-cron:
|
||||
runs-on:
|
||||
group: scroll-reth-runner-group
|
||||
|
||||
163
Cargo.lock
generated
163
Cargo.lock
generated
@@ -1347,7 +1347,7 @@ dependencies = [
|
||||
"bitflags 2.10.0",
|
||||
"cexpr",
|
||||
"clang-sys",
|
||||
"itertools 0.12.1",
|
||||
"itertools 0.11.0",
|
||||
"lazy_static",
|
||||
"lazycell",
|
||||
"proc-macro2",
|
||||
@@ -1814,7 +1814,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "117725a109d387c937a1533ce01b450cbde6b88abceea8473c4d7a85853cda3c"
|
||||
dependencies = [
|
||||
"lazy_static",
|
||||
"windows-sys 0.59.0",
|
||||
"windows-sys 0.48.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -2427,7 +2427,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "encoder-standard"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/da-codec#7a92e859b55094ba5b5c7d556c49c4dbd3f47ddb"
|
||||
source = "git+https://github.com/scroll-tech/da-codec#afa161a4487fe3ba600bfdb792daeb3dcc21fa25"
|
||||
dependencies = [
|
||||
"zstd",
|
||||
]
|
||||
@@ -3851,15 +3851,6 @@ dependencies = [
|
||||
"either",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "itertools"
|
||||
version = "0.12.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ba291022dbbd398a455acf126c1e341954079855bc60dfdda641363bd6922569"
|
||||
dependencies = [
|
||||
"either",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "itertools"
|
||||
version = "0.13.0"
|
||||
@@ -4019,7 +4010,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "07033963ba89ebaf1584d767badaa2e8fcec21aedea6b8c0346d487d49c28667"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"windows-targets 0.52.6",
|
||||
"windows-targets 0.48.5",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -4588,7 +4579,7 @@ version = "0.7.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "af1844ef2428cc3e1cb900be36181049ef3d3193c63e43026cfe202983b27a56"
|
||||
dependencies = [
|
||||
"proc-macro-crate 3.3.0",
|
||||
"proc-macro-crate 1.3.1",
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn 2.0.101",
|
||||
@@ -7116,7 +7107,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "reth-chainspec"
|
||||
version = "1.8.2"
|
||||
source = "git+https://github.com/scroll-tech/reth?tag=scroll-v91#857624cf982bc814b0da14f3923ccc1ba5cc7dc7"
|
||||
source = "git+https://github.com/scroll-tech/reth?tag=scroll-v91.2#11d0a3f73186dee7a1ba0d51ea5416dc8fef3e46"
|
||||
dependencies = [
|
||||
"alloy-chains",
|
||||
"alloy-consensus",
|
||||
@@ -7136,7 +7127,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "reth-codecs"
|
||||
version = "1.8.2"
|
||||
source = "git+https://github.com/scroll-tech/reth?tag=scroll-v91#857624cf982bc814b0da14f3923ccc1ba5cc7dc7"
|
||||
source = "git+https://github.com/scroll-tech/reth?tag=scroll-v91.2#11d0a3f73186dee7a1ba0d51ea5416dc8fef3e46"
|
||||
dependencies = [
|
||||
"alloy-consensus",
|
||||
"alloy-eips 1.0.41",
|
||||
@@ -7154,7 +7145,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "reth-codecs-derive"
|
||||
version = "1.8.2"
|
||||
source = "git+https://github.com/scroll-tech/reth?tag=scroll-v91#857624cf982bc814b0da14f3923ccc1ba5cc7dc7"
|
||||
source = "git+https://github.com/scroll-tech/reth?tag=scroll-v91.2#11d0a3f73186dee7a1ba0d51ea5416dc8fef3e46"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
@@ -7164,7 +7155,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "reth-consensus"
|
||||
version = "1.8.2"
|
||||
source = "git+https://github.com/scroll-tech/reth?tag=scroll-v91#857624cf982bc814b0da14f3923ccc1ba5cc7dc7"
|
||||
source = "git+https://github.com/scroll-tech/reth?tag=scroll-v91.2#11d0a3f73186dee7a1ba0d51ea5416dc8fef3e46"
|
||||
dependencies = [
|
||||
"alloy-consensus",
|
||||
"alloy-primitives",
|
||||
@@ -7177,7 +7168,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "reth-consensus-common"
|
||||
version = "1.8.2"
|
||||
source = "git+https://github.com/scroll-tech/reth?tag=scroll-v91#857624cf982bc814b0da14f3923ccc1ba5cc7dc7"
|
||||
source = "git+https://github.com/scroll-tech/reth?tag=scroll-v91.2#11d0a3f73186dee7a1ba0d51ea5416dc8fef3e46"
|
||||
dependencies = [
|
||||
"alloy-consensus",
|
||||
"alloy-eips 1.0.41",
|
||||
@@ -7189,7 +7180,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "reth-db-models"
|
||||
version = "1.8.2"
|
||||
source = "git+https://github.com/scroll-tech/reth?tag=scroll-v91#857624cf982bc814b0da14f3923ccc1ba5cc7dc7"
|
||||
source = "git+https://github.com/scroll-tech/reth?tag=scroll-v91.2#11d0a3f73186dee7a1ba0d51ea5416dc8fef3e46"
|
||||
dependencies = [
|
||||
"alloy-eips 1.0.41",
|
||||
"alloy-primitives",
|
||||
@@ -7199,7 +7190,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "reth-errors"
|
||||
version = "1.8.2"
|
||||
source = "git+https://github.com/scroll-tech/reth?tag=scroll-v91#857624cf982bc814b0da14f3923ccc1ba5cc7dc7"
|
||||
source = "git+https://github.com/scroll-tech/reth?tag=scroll-v91.2#11d0a3f73186dee7a1ba0d51ea5416dc8fef3e46"
|
||||
dependencies = [
|
||||
"reth-consensus",
|
||||
"reth-execution-errors",
|
||||
@@ -7210,7 +7201,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "reth-ethereum-consensus"
|
||||
version = "1.8.2"
|
||||
source = "git+https://github.com/scroll-tech/reth?tag=scroll-v91#857624cf982bc814b0da14f3923ccc1ba5cc7dc7"
|
||||
source = "git+https://github.com/scroll-tech/reth?tag=scroll-v91.2#11d0a3f73186dee7a1ba0d51ea5416dc8fef3e46"
|
||||
dependencies = [
|
||||
"alloy-consensus",
|
||||
"alloy-eips 1.0.41",
|
||||
@@ -7226,7 +7217,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "reth-ethereum-forks"
|
||||
version = "1.8.2"
|
||||
source = "git+https://github.com/scroll-tech/reth?tag=scroll-v91#857624cf982bc814b0da14f3923ccc1ba5cc7dc7"
|
||||
source = "git+https://github.com/scroll-tech/reth?tag=scroll-v91.2#11d0a3f73186dee7a1ba0d51ea5416dc8fef3e46"
|
||||
dependencies = [
|
||||
"alloy-eip2124",
|
||||
"alloy-hardforks",
|
||||
@@ -7238,7 +7229,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "reth-ethereum-primitives"
|
||||
version = "1.8.2"
|
||||
source = "git+https://github.com/scroll-tech/reth?tag=scroll-v91#857624cf982bc814b0da14f3923ccc1ba5cc7dc7"
|
||||
source = "git+https://github.com/scroll-tech/reth?tag=scroll-v91.2#11d0a3f73186dee7a1ba0d51ea5416dc8fef3e46"
|
||||
dependencies = [
|
||||
"alloy-consensus",
|
||||
"alloy-eips 1.0.41",
|
||||
@@ -7255,7 +7246,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "reth-evm"
|
||||
version = "1.8.2"
|
||||
source = "git+https://github.com/scroll-tech/reth?tag=scroll-v91#857624cf982bc814b0da14f3923ccc1ba5cc7dc7"
|
||||
source = "git+https://github.com/scroll-tech/reth?tag=scroll-v91.2#11d0a3f73186dee7a1ba0d51ea5416dc8fef3e46"
|
||||
dependencies = [
|
||||
"alloy-consensus",
|
||||
"alloy-eips 1.0.41",
|
||||
@@ -7277,7 +7268,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "reth-evm-ethereum"
|
||||
version = "1.8.2"
|
||||
source = "git+https://github.com/scroll-tech/reth?tag=scroll-v91#857624cf982bc814b0da14f3923ccc1ba5cc7dc7"
|
||||
source = "git+https://github.com/scroll-tech/reth?tag=scroll-v91.2#11d0a3f73186dee7a1ba0d51ea5416dc8fef3e46"
|
||||
dependencies = [
|
||||
"alloy-consensus",
|
||||
"alloy-eips 1.0.41",
|
||||
@@ -7297,7 +7288,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "reth-execution-errors"
|
||||
version = "1.8.2"
|
||||
source = "git+https://github.com/scroll-tech/reth?tag=scroll-v91#857624cf982bc814b0da14f3923ccc1ba5cc7dc7"
|
||||
source = "git+https://github.com/scroll-tech/reth?tag=scroll-v91.2#11d0a3f73186dee7a1ba0d51ea5416dc8fef3e46"
|
||||
dependencies = [
|
||||
"alloy-evm",
|
||||
"alloy-primitives",
|
||||
@@ -7310,7 +7301,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "reth-execution-types"
|
||||
version = "1.8.2"
|
||||
source = "git+https://github.com/scroll-tech/reth?tag=scroll-v91#857624cf982bc814b0da14f3923ccc1ba5cc7dc7"
|
||||
source = "git+https://github.com/scroll-tech/reth?tag=scroll-v91.2#11d0a3f73186dee7a1ba0d51ea5416dc8fef3e46"
|
||||
dependencies = [
|
||||
"alloy-consensus",
|
||||
"alloy-eips 1.0.41",
|
||||
@@ -7326,7 +7317,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "reth-network-peers"
|
||||
version = "1.8.2"
|
||||
source = "git+https://github.com/scroll-tech/reth?tag=scroll-v91#857624cf982bc814b0da14f3923ccc1ba5cc7dc7"
|
||||
source = "git+https://github.com/scroll-tech/reth?tag=scroll-v91.2#11d0a3f73186dee7a1ba0d51ea5416dc8fef3e46"
|
||||
dependencies = [
|
||||
"alloy-primitives",
|
||||
"alloy-rlp",
|
||||
@@ -7338,7 +7329,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "reth-primitives"
|
||||
version = "1.8.2"
|
||||
source = "git+https://github.com/scroll-tech/reth?tag=scroll-v91#857624cf982bc814b0da14f3923ccc1ba5cc7dc7"
|
||||
source = "git+https://github.com/scroll-tech/reth?tag=scroll-v91.2#11d0a3f73186dee7a1ba0d51ea5416dc8fef3e46"
|
||||
dependencies = [
|
||||
"alloy-consensus",
|
||||
"once_cell",
|
||||
@@ -7351,7 +7342,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "reth-primitives-traits"
|
||||
version = "1.8.2"
|
||||
source = "git+https://github.com/scroll-tech/reth?tag=scroll-v91#857624cf982bc814b0da14f3923ccc1ba5cc7dc7"
|
||||
source = "git+https://github.com/scroll-tech/reth?tag=scroll-v91.2#11d0a3f73186dee7a1ba0d51ea5416dc8fef3e46"
|
||||
dependencies = [
|
||||
"alloy-consensus",
|
||||
"alloy-eips 1.0.41",
|
||||
@@ -7379,7 +7370,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "reth-prune-types"
|
||||
version = "1.8.2"
|
||||
source = "git+https://github.com/scroll-tech/reth?tag=scroll-v91#857624cf982bc814b0da14f3923ccc1ba5cc7dc7"
|
||||
source = "git+https://github.com/scroll-tech/reth?tag=scroll-v91.2#11d0a3f73186dee7a1ba0d51ea5416dc8fef3e46"
|
||||
dependencies = [
|
||||
"alloy-primitives",
|
||||
"derive_more 2.0.1",
|
||||
@@ -7389,7 +7380,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "reth-revm"
|
||||
version = "1.8.2"
|
||||
source = "git+https://github.com/scroll-tech/reth?tag=scroll-v91#857624cf982bc814b0da14f3923ccc1ba5cc7dc7"
|
||||
source = "git+https://github.com/scroll-tech/reth?tag=scroll-v91.2#11d0a3f73186dee7a1ba0d51ea5416dc8fef3e46"
|
||||
dependencies = [
|
||||
"alloy-primitives",
|
||||
"reth-primitives-traits",
|
||||
@@ -7401,7 +7392,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "reth-scroll-chainspec"
|
||||
version = "1.8.2"
|
||||
source = "git+https://github.com/scroll-tech/reth?tag=scroll-v91#857624cf982bc814b0da14f3923ccc1ba5cc7dc7"
|
||||
source = "git+https://github.com/scroll-tech/reth?tag=scroll-v91.2#11d0a3f73186dee7a1ba0d51ea5416dc8fef3e46"
|
||||
dependencies = [
|
||||
"alloy-chains",
|
||||
"alloy-consensus",
|
||||
@@ -7426,7 +7417,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "reth-scroll-evm"
|
||||
version = "1.8.2"
|
||||
source = "git+https://github.com/scroll-tech/reth?tag=scroll-v91#857624cf982bc814b0da14f3923ccc1ba5cc7dc7"
|
||||
source = "git+https://github.com/scroll-tech/reth?tag=scroll-v91.2#11d0a3f73186dee7a1ba0d51ea5416dc8fef3e46"
|
||||
dependencies = [
|
||||
"alloy-consensus",
|
||||
"alloy-eips 1.0.41",
|
||||
@@ -7456,7 +7447,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "reth-scroll-forks"
|
||||
version = "1.8.2"
|
||||
source = "git+https://github.com/scroll-tech/reth?tag=scroll-v91#857624cf982bc814b0da14f3923ccc1ba5cc7dc7"
|
||||
source = "git+https://github.com/scroll-tech/reth?tag=scroll-v91.2#11d0a3f73186dee7a1ba0d51ea5416dc8fef3e46"
|
||||
dependencies = [
|
||||
"alloy-chains",
|
||||
"alloy-primitives",
|
||||
@@ -7470,7 +7461,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "reth-scroll-primitives"
|
||||
version = "1.8.2"
|
||||
source = "git+https://github.com/scroll-tech/reth?tag=scroll-v91#857624cf982bc814b0da14f3923ccc1ba5cc7dc7"
|
||||
source = "git+https://github.com/scroll-tech/reth?tag=scroll-v91.2#11d0a3f73186dee7a1ba0d51ea5416dc8fef3e46"
|
||||
dependencies = [
|
||||
"alloy-consensus",
|
||||
"alloy-eips 1.0.41",
|
||||
@@ -7487,7 +7478,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "reth-stages-types"
|
||||
version = "1.8.2"
|
||||
source = "git+https://github.com/scroll-tech/reth?tag=scroll-v91#857624cf982bc814b0da14f3923ccc1ba5cc7dc7"
|
||||
source = "git+https://github.com/scroll-tech/reth?tag=scroll-v91.2#11d0a3f73186dee7a1ba0d51ea5416dc8fef3e46"
|
||||
dependencies = [
|
||||
"alloy-primitives",
|
||||
"reth-trie-common",
|
||||
@@ -7496,7 +7487,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "reth-stateless"
|
||||
version = "1.8.2"
|
||||
source = "git+https://github.com/scroll-tech/reth?tag=scroll-v91#857624cf982bc814b0da14f3923ccc1ba5cc7dc7"
|
||||
source = "git+https://github.com/scroll-tech/reth?tag=scroll-v91.2#11d0a3f73186dee7a1ba0d51ea5416dc8fef3e46"
|
||||
dependencies = [
|
||||
"alloy-consensus",
|
||||
"alloy-primitives",
|
||||
@@ -7522,7 +7513,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "reth-static-file-types"
|
||||
version = "1.8.2"
|
||||
source = "git+https://github.com/scroll-tech/reth?tag=scroll-v91#857624cf982bc814b0da14f3923ccc1ba5cc7dc7"
|
||||
source = "git+https://github.com/scroll-tech/reth?tag=scroll-v91.2#11d0a3f73186dee7a1ba0d51ea5416dc8fef3e46"
|
||||
dependencies = [
|
||||
"alloy-primitives",
|
||||
"derive_more 2.0.1",
|
||||
@@ -7533,7 +7524,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "reth-storage-api"
|
||||
version = "1.8.2"
|
||||
source = "git+https://github.com/scroll-tech/reth?tag=scroll-v91#857624cf982bc814b0da14f3923ccc1ba5cc7dc7"
|
||||
source = "git+https://github.com/scroll-tech/reth?tag=scroll-v91.2#11d0a3f73186dee7a1ba0d51ea5416dc8fef3e46"
|
||||
dependencies = [
|
||||
"alloy-consensus",
|
||||
"alloy-eips 1.0.41",
|
||||
@@ -7555,7 +7546,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "reth-storage-errors"
|
||||
version = "1.8.2"
|
||||
source = "git+https://github.com/scroll-tech/reth?tag=scroll-v91#857624cf982bc814b0da14f3923ccc1ba5cc7dc7"
|
||||
source = "git+https://github.com/scroll-tech/reth?tag=scroll-v91.2#11d0a3f73186dee7a1ba0d51ea5416dc8fef3e46"
|
||||
dependencies = [
|
||||
"alloy-eips 1.0.41",
|
||||
"alloy-primitives",
|
||||
@@ -7571,7 +7562,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "reth-trie"
|
||||
version = "1.8.2"
|
||||
source = "git+https://github.com/scroll-tech/reth?tag=scroll-v91#857624cf982bc814b0da14f3923ccc1ba5cc7dc7"
|
||||
source = "git+https://github.com/scroll-tech/reth?tag=scroll-v91.2#11d0a3f73186dee7a1ba0d51ea5416dc8fef3e46"
|
||||
dependencies = [
|
||||
"alloy-consensus",
|
||||
"alloy-eips 1.0.41",
|
||||
@@ -7593,7 +7584,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "reth-trie-common"
|
||||
version = "1.8.2"
|
||||
source = "git+https://github.com/scroll-tech/reth?tag=scroll-v91#857624cf982bc814b0da14f3923ccc1ba5cc7dc7"
|
||||
source = "git+https://github.com/scroll-tech/reth?tag=scroll-v91.2#11d0a3f73186dee7a1ba0d51ea5416dc8fef3e46"
|
||||
dependencies = [
|
||||
"alloy-consensus",
|
||||
"alloy-primitives",
|
||||
@@ -7609,7 +7600,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "reth-trie-sparse"
|
||||
version = "1.8.2"
|
||||
source = "git+https://github.com/scroll-tech/reth?tag=scroll-v91#857624cf982bc814b0da14f3923ccc1ba5cc7dc7"
|
||||
source = "git+https://github.com/scroll-tech/reth?tag=scroll-v91.2#11d0a3f73186dee7a1ba0d51ea5416dc8fef3e46"
|
||||
dependencies = [
|
||||
"alloy-primitives",
|
||||
"alloy-rlp",
|
||||
@@ -7625,7 +7616,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "reth-zstd-compressors"
|
||||
version = "1.8.2"
|
||||
source = "git+https://github.com/scroll-tech/reth?tag=scroll-v91#857624cf982bc814b0da14f3923ccc1ba5cc7dc7"
|
||||
source = "git+https://github.com/scroll-tech/reth?tag=scroll-v91.2#11d0a3f73186dee7a1ba0d51ea5416dc8fef3e46"
|
||||
dependencies = [
|
||||
"zstd",
|
||||
]
|
||||
@@ -8231,7 +8222,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "risc0-ethereum-trie"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/risc0/risc0-ethereum#c1ddb41a44dc0730da883bbfa9fbe75ad335df1b"
|
||||
source = "git+https://github.com/risc0/risc0-ethereum#e475fe6c8dcff92fb5e67d6556cb11ba3ab4e494"
|
||||
dependencies = [
|
||||
"alloy-primitives",
|
||||
"alloy-rlp",
|
||||
@@ -8519,7 +8510,7 @@ checksum = "28d3b2b1366ec20994f1fd18c3c594f05c5dd4bc44d8bb0c1c632c8d6829481f"
|
||||
[[package]]
|
||||
name = "sbv-core"
|
||||
version = "2.0.0"
|
||||
source = "git+https://github.com/scroll-tech/stateless-block-verifier?tag=scroll-v91#dd7f8dad597344e472d239dc135ee952efd185f5"
|
||||
source = "git+https://github.com/scroll-tech/stateless-block-verifier?tag=scroll-v91.2#3a32848c9438432125751eae8837757f6b87562e"
|
||||
dependencies = [
|
||||
"auto_impl",
|
||||
"itertools 0.14.0",
|
||||
@@ -8536,7 +8527,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "sbv-helpers"
|
||||
version = "2.0.0"
|
||||
source = "git+https://github.com/scroll-tech/stateless-block-verifier?tag=scroll-v91#dd7f8dad597344e472d239dc135ee952efd185f5"
|
||||
source = "git+https://github.com/scroll-tech/stateless-block-verifier?tag=scroll-v91.2#3a32848c9438432125751eae8837757f6b87562e"
|
||||
dependencies = [
|
||||
"tracing",
|
||||
]
|
||||
@@ -8544,7 +8535,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "sbv-primitives"
|
||||
version = "2.0.0"
|
||||
source = "git+https://github.com/scroll-tech/stateless-block-verifier?tag=scroll-v91#dd7f8dad597344e472d239dc135ee952efd185f5"
|
||||
source = "git+https://github.com/scroll-tech/stateless-block-verifier?tag=scroll-v91.2#3a32848c9438432125751eae8837757f6b87562e"
|
||||
dependencies = [
|
||||
"alloy-consensus",
|
||||
"alloy-eips 1.0.41",
|
||||
@@ -8554,8 +8545,6 @@ dependencies = [
|
||||
"alloy-rpc-types-debug",
|
||||
"alloy-rpc-types-eth",
|
||||
"alloy-serde 1.0.41",
|
||||
"auto_impl",
|
||||
"itertools 0.14.0",
|
||||
"reth-chainspec",
|
||||
"reth-ethereum-forks",
|
||||
"reth-evm",
|
||||
@@ -8581,7 +8570,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "sbv-trie"
|
||||
version = "2.0.0"
|
||||
source = "git+https://github.com/scroll-tech/stateless-block-verifier?tag=scroll-v91#dd7f8dad597344e472d239dc135ee952efd185f5"
|
||||
source = "git+https://github.com/scroll-tech/stateless-block-verifier?tag=scroll-v91.2#3a32848c9438432125751eae8837757f6b87562e"
|
||||
dependencies = [
|
||||
"alloy-rlp",
|
||||
"alloy-trie 0.9.1",
|
||||
@@ -8594,10 +8583,9 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "sbv-utils"
|
||||
version = "2.0.0"
|
||||
source = "git+https://github.com/scroll-tech/stateless-block-verifier?tag=scroll-v91#dd7f8dad597344e472d239dc135ee952efd185f5"
|
||||
source = "git+https://github.com/scroll-tech/stateless-block-verifier?tag=scroll-v91.2#3a32848c9438432125751eae8837757f6b87562e"
|
||||
dependencies = [
|
||||
"alloy-provider",
|
||||
"alloy-rpc-client",
|
||||
"alloy-transport",
|
||||
"async-trait",
|
||||
"futures",
|
||||
@@ -8673,7 +8661,7 @@ checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49"
|
||||
[[package]]
|
||||
name = "scroll-alloy-consensus"
|
||||
version = "1.8.2"
|
||||
source = "git+https://github.com/scroll-tech/reth?tag=scroll-v91#857624cf982bc814b0da14f3923ccc1ba5cc7dc7"
|
||||
source = "git+https://github.com/scroll-tech/reth?tag=scroll-v91.2#11d0a3f73186dee7a1ba0d51ea5416dc8fef3e46"
|
||||
dependencies = [
|
||||
"alloy-consensus",
|
||||
"alloy-eips 1.0.41",
|
||||
@@ -8689,7 +8677,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "scroll-alloy-evm"
|
||||
version = "1.8.2"
|
||||
source = "git+https://github.com/scroll-tech/reth?tag=scroll-v91#857624cf982bc814b0da14f3923ccc1ba5cc7dc7"
|
||||
source = "git+https://github.com/scroll-tech/reth?tag=scroll-v91.2#11d0a3f73186dee7a1ba0d51ea5416dc8fef3e46"
|
||||
dependencies = [
|
||||
"alloy-consensus",
|
||||
"alloy-eips 1.0.41",
|
||||
@@ -8707,7 +8695,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "scroll-alloy-hardforks"
|
||||
version = "1.8.2"
|
||||
source = "git+https://github.com/scroll-tech/reth?tag=scroll-v91#857624cf982bc814b0da14f3923ccc1ba5cc7dc7"
|
||||
source = "git+https://github.com/scroll-tech/reth?tag=scroll-v91.2#11d0a3f73186dee7a1ba0d51ea5416dc8fef3e46"
|
||||
dependencies = [
|
||||
"alloy-hardforks",
|
||||
"auto_impl",
|
||||
@@ -8717,7 +8705,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "scroll-alloy-network"
|
||||
version = "1.8.2"
|
||||
source = "git+https://github.com/scroll-tech/reth?tag=scroll-v91#857624cf982bc814b0da14f3923ccc1ba5cc7dc7"
|
||||
source = "git+https://github.com/scroll-tech/reth?tag=scroll-v91.2#11d0a3f73186dee7a1ba0d51ea5416dc8fef3e46"
|
||||
dependencies = [
|
||||
"alloy-consensus",
|
||||
"alloy-network",
|
||||
@@ -8732,7 +8720,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "scroll-alloy-rpc-types"
|
||||
version = "1.8.2"
|
||||
source = "git+https://github.com/scroll-tech/reth?tag=scroll-v91#857624cf982bc814b0da14f3923ccc1ba5cc7dc7"
|
||||
source = "git+https://github.com/scroll-tech/reth?tag=scroll-v91.2#11d0a3f73186dee7a1ba0d51ea5416dc8fef3e46"
|
||||
dependencies = [
|
||||
"alloy-consensus",
|
||||
"alloy-eips 1.0.41",
|
||||
@@ -8778,8 +8766,8 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "scroll-zkvm-prover"
|
||||
version = "0.7.0"
|
||||
source = "git+https://github.com/scroll-tech/zkvm-prover?tag=v0.7.0#56c951893bac4754a170dd95fa186d21aa34e2bf"
|
||||
version = "0.7.1"
|
||||
source = "git+https://github.com/scroll-tech/zkvm-prover?tag=v0.7.1#85dc6bc56728b8eef22281fdb215c136d7b5bbda"
|
||||
dependencies = [
|
||||
"base64 0.22.1",
|
||||
"bincode 1.3.3",
|
||||
@@ -8805,8 +8793,8 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "scroll-zkvm-types"
|
||||
version = "0.7.0"
|
||||
source = "git+https://github.com/scroll-tech/zkvm-prover?tag=v0.7.0#56c951893bac4754a170dd95fa186d21aa34e2bf"
|
||||
version = "0.7.1"
|
||||
source = "git+https://github.com/scroll-tech/zkvm-prover?tag=v0.7.1#85dc6bc56728b8eef22281fdb215c136d7b5bbda"
|
||||
dependencies = [
|
||||
"alloy-primitives",
|
||||
"base64 0.22.1",
|
||||
@@ -8829,8 +8817,8 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "scroll-zkvm-types-base"
|
||||
version = "0.7.0"
|
||||
source = "git+https://github.com/scroll-tech/zkvm-prover?tag=v0.7.0#56c951893bac4754a170dd95fa186d21aa34e2bf"
|
||||
version = "0.7.1"
|
||||
source = "git+https://github.com/scroll-tech/zkvm-prover?tag=v0.7.1#85dc6bc56728b8eef22281fdb215c136d7b5bbda"
|
||||
dependencies = [
|
||||
"alloy-primitives",
|
||||
"alloy-serde 1.0.41",
|
||||
@@ -8842,8 +8830,8 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "scroll-zkvm-types-batch"
|
||||
version = "0.7.0"
|
||||
source = "git+https://github.com/scroll-tech/zkvm-prover?tag=v0.7.0#56c951893bac4754a170dd95fa186d21aa34e2bf"
|
||||
version = "0.7.1"
|
||||
source = "git+https://github.com/scroll-tech/zkvm-prover?tag=v0.7.1#85dc6bc56728b8eef22281fdb215c136d7b5bbda"
|
||||
dependencies = [
|
||||
"alloy-primitives",
|
||||
"c-kzg",
|
||||
@@ -8864,8 +8852,8 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "scroll-zkvm-types-bundle"
|
||||
version = "0.7.0"
|
||||
source = "git+https://github.com/scroll-tech/zkvm-prover?tag=v0.7.0#56c951893bac4754a170dd95fa186d21aa34e2bf"
|
||||
version = "0.7.1"
|
||||
source = "git+https://github.com/scroll-tech/zkvm-prover?tag=v0.7.1#85dc6bc56728b8eef22281fdb215c136d7b5bbda"
|
||||
dependencies = [
|
||||
"rkyv",
|
||||
"scroll-zkvm-types-base",
|
||||
@@ -8874,8 +8862,8 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "scroll-zkvm-types-chunk"
|
||||
version = "0.7.0"
|
||||
source = "git+https://github.com/scroll-tech/zkvm-prover?tag=v0.7.0#56c951893bac4754a170dd95fa186d21aa34e2bf"
|
||||
version = "0.7.1"
|
||||
source = "git+https://github.com/scroll-tech/zkvm-prover?tag=v0.7.1#85dc6bc56728b8eef22281fdb215c136d7b5bbda"
|
||||
dependencies = [
|
||||
"alloy-consensus",
|
||||
"alloy-primitives",
|
||||
@@ -8899,8 +8887,8 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "scroll-zkvm-verifier"
|
||||
version = "0.7.0"
|
||||
source = "git+https://github.com/scroll-tech/zkvm-prover?tag=v0.7.0#56c951893bac4754a170dd95fa186d21aa34e2bf"
|
||||
version = "0.7.1"
|
||||
source = "git+https://github.com/scroll-tech/zkvm-prover?tag=v0.7.1#85dc6bc56728b8eef22281fdb215c136d7b5bbda"
|
||||
dependencies = [
|
||||
"bincode 1.3.3",
|
||||
"eyre",
|
||||
@@ -9103,15 +9091,16 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "serde_json"
|
||||
version = "1.0.140"
|
||||
version = "1.0.145"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "20068b6e96dc6c9bd23e01df8827e6c7e1f2fddd43c21810382803c136b99373"
|
||||
checksum = "402a6f66d8c709116cf22f558eab210f5a50187f702eb4d7e5ef38d9a7f1c79c"
|
||||
dependencies = [
|
||||
"indexmap 2.9.0",
|
||||
"itoa",
|
||||
"memchr",
|
||||
"ryu",
|
||||
"serde",
|
||||
"serde_core",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -9126,20 +9115,21 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "serde_spanned"
|
||||
version = "0.6.8"
|
||||
version = "0.6.9"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "87607cb1398ed59d48732e575a4c28a7a8ebf2454b964fe3f224f2afc07909e1"
|
||||
checksum = "bf41e0cfaf7226dca15e8197172c295a782857fcb97fad1808a166870dee75a3"
|
||||
dependencies = [
|
||||
"serde",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "serde_stacker"
|
||||
version = "0.1.12"
|
||||
version = "0.1.14"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "69c8defe6c780725cce4ec6ad3bd91e321baf6fa4e255df1f31e345d507ef01a"
|
||||
checksum = "d4936375d50c4be7eff22293a9344f8e46f323ed2b3c243e52f89138d9bb0f4a"
|
||||
dependencies = [
|
||||
"serde",
|
||||
"serde_core",
|
||||
"stacker",
|
||||
]
|
||||
|
||||
@@ -9157,9 +9147,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "serde_with"
|
||||
version = "3.14.0"
|
||||
version = "3.16.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f2c45cd61fefa9db6f254525d46e392b852e0e61d9a1fd36e5bd183450a556d5"
|
||||
checksum = "10574371d41b0d9b2cff89418eda27da52bcaff2cc8741db26382a77c29131f1"
|
||||
dependencies = [
|
||||
"base64 0.22.1",
|
||||
"chrono",
|
||||
@@ -9168,8 +9158,7 @@ dependencies = [
|
||||
"indexmap 2.9.0",
|
||||
"schemars 0.9.0",
|
||||
"schemars 1.0.4",
|
||||
"serde",
|
||||
"serde_derive",
|
||||
"serde_core",
|
||||
"serde_json",
|
||||
"serde_with_macros",
|
||||
"time",
|
||||
@@ -9177,11 +9166,11 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "serde_with_macros"
|
||||
version = "3.14.0"
|
||||
version = "3.16.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "de90945e6565ce0d9a25098082ed4ee4002e047cb59892c318d66821e14bb30f"
|
||||
checksum = "08a72d8216842fdd57820dc78d840bef99248e35fb2554ff923319e60f2d686b"
|
||||
dependencies = [
|
||||
"darling 0.20.11",
|
||||
"darling 0.21.3",
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn 2.0.101",
|
||||
|
||||
12
Cargo.toml
12
Cargo.toml
@@ -17,13 +17,13 @@ repository = "https://github.com/scroll-tech/scroll"
|
||||
version = "4.7.1"
|
||||
|
||||
[workspace.dependencies]
|
||||
scroll-zkvm-prover = { git = "https://github.com/scroll-tech/zkvm-prover", tag = "v0.7.0" }
|
||||
scroll-zkvm-verifier = { git = "https://github.com/scroll-tech/zkvm-prover", tag = "v0.7.0" }
|
||||
scroll-zkvm-types = { git = "https://github.com/scroll-tech/zkvm-prover", tag = "v0.7.0" }
|
||||
scroll-zkvm-prover = { git = "https://github.com/scroll-tech/zkvm-prover", tag = "v0.7.1" }
|
||||
scroll-zkvm-verifier = { git = "https://github.com/scroll-tech/zkvm-prover", tag = "v0.7.1" }
|
||||
scroll-zkvm-types = { git = "https://github.com/scroll-tech/zkvm-prover", tag = "v0.7.1" }
|
||||
|
||||
sbv-primitives = { git = "https://github.com/scroll-tech/stateless-block-verifier", tag = "scroll-v91", features = ["scroll", "rkyv"] }
|
||||
sbv-utils = { git = "https://github.com/scroll-tech/stateless-block-verifier", tag = "scroll-v91" }
|
||||
sbv-core = { git = "https://github.com/scroll-tech/stateless-block-verifier", tag = "scroll-v91", features = ["scroll"] }
|
||||
sbv-primitives = { git = "https://github.com/scroll-tech/stateless-block-verifier", tag = "scroll-v91.2", features = ["scroll", "rkyv"] }
|
||||
sbv-utils = { git = "https://github.com/scroll-tech/stateless-block-verifier", tag = "scroll-v91.2" }
|
||||
sbv-core = { git = "https://github.com/scroll-tech/stateless-block-verifier", tag = "scroll-v91.2", features = ["scroll"] }
|
||||
|
||||
metrics = "0.23.0"
|
||||
metrics-util = "0.17"
|
||||
|
||||
2
Makefile
2
Makefile
@@ -1,6 +1,6 @@
|
||||
.PHONY: fmt dev_docker build_test_docker run_test_docker clean update
|
||||
|
||||
L2GETH_TAG=scroll-v5.9.7
|
||||
L2GETH_TAG=scroll-v5.9.17
|
||||
|
||||
help: ## Display this help message
|
||||
@grep -h \
|
||||
|
||||
@@ -10,8 +10,8 @@ require (
|
||||
github.com/go-redis/redis/v8 v8.11.5
|
||||
github.com/pressly/goose/v3 v3.16.0
|
||||
github.com/prometheus/client_golang v1.19.0
|
||||
github.com/scroll-tech/da-codec v0.9.0
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20251113125950-906b730d541d
|
||||
github.com/scroll-tech/da-codec v0.10.0
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20251128092113-8629f088d78f
|
||||
github.com/stretchr/testify v1.10.0
|
||||
github.com/urfave/cli/v2 v2.25.7
|
||||
golang.org/x/sync v0.11.0
|
||||
@@ -21,7 +21,7 @@ require (
|
||||
// Hotfix for header hash incompatibility issue.
|
||||
// PR: https://github.com/scroll-tech/go-ethereum/pull/1133/
|
||||
// CAUTION: Requires careful handling. When upgrading go-ethereum, ensure this fix remains up-to-date in this branch.
|
||||
replace github.com/scroll-tech/go-ethereum => github.com/scroll-tech/go-ethereum v1.10.14-0.20251117071111-47c22325665e
|
||||
replace github.com/scroll-tech/go-ethereum => github.com/scroll-tech/go-ethereum v1.10.14-0.20251128092359-25d5bf6b817b
|
||||
|
||||
require (
|
||||
dario.cat/mergo v1.0.0 // indirect
|
||||
|
||||
@@ -309,10 +309,10 @@ github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik=
|
||||
github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
|
||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/scroll-tech/da-codec v0.9.0 h1:UvHNdSJuVhi/j9pOH7SXkRck52/zeE8T35Fx6qTPW70=
|
||||
github.com/scroll-tech/da-codec v0.9.0/go.mod h1:w+vwIvNiWdiNHVE9yIY2Klx6G4s+SQhEJVSmVG/IsEQ=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20251117071111-47c22325665e h1:7U1/JilCPIqJTYNgKzlfBEkRNORRwJ8+PCmdR8/XK6A=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20251117071111-47c22325665e/go.mod h1:6BVek7YliYh+YeHOSjguPw9GT9BhVBfThArxzVlpqdQ=
|
||||
github.com/scroll-tech/da-codec v0.10.0 h1:IPHxyTyXTWPV0Q+DZ08cod2fWkhUvrfysmj/VBpB+WU=
|
||||
github.com/scroll-tech/da-codec v0.10.0/go.mod h1:MBlIP4wCXPcUDZ/Ci2B7n/2IbVU1WBo9OTFTZ5ffE0U=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20251128092359-25d5bf6b817b h1:pMQKnroJoS/FeL1aOWkz7/u1iBHUP8PWjZstNuzoUGE=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20251128092359-25d5bf6b817b/go.mod h1:Aa/kD1XB+OV/7rRxMQrjcPCB4b0pKyLH0gsTrtuHi38=
|
||||
github.com/scroll-tech/zktrie v0.8.4 h1:UagmnZ4Z3ITCk+aUq9NQZJNAwnWl4gSxsLb2Nl7IgRE=
|
||||
github.com/scroll-tech/zktrie v0.8.4/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
|
||||
github.com/segmentio/asm v1.2.0 h1:9BQrFxC+YOHJlTlHGkTrFWf59nbL3XnCoFLTwDCI7ys=
|
||||
|
||||
@@ -361,7 +361,6 @@ func getTxHistoryInfoFromBridgeBatchDepositMessage(message *orm.BridgeBatchDepos
|
||||
func (h *HistoryLogic) getCachedTxsInfo(ctx context.Context, cacheKey string, pageNum, pageSize uint64) ([]*types.TxHistoryInfo, uint64, bool, error) {
|
||||
start := int64((pageNum - 1) * pageSize)
|
||||
end := start + int64(pageSize) - 1
|
||||
|
||||
total, err := h.redis.ZCard(ctx, cacheKey).Result()
|
||||
if err != nil {
|
||||
log.Error("failed to get zcard result", "error", err)
|
||||
@@ -372,6 +371,10 @@ func (h *HistoryLogic) getCachedTxsInfo(ctx context.Context, cacheKey string, pa
|
||||
return nil, 0, false, nil
|
||||
}
|
||||
|
||||
if start >= total {
|
||||
return nil, 0, false, nil
|
||||
}
|
||||
|
||||
values, err := h.redis.ZRevRange(ctx, cacheKey, start, end).Result()
|
||||
if err != nil {
|
||||
log.Error("failed to get zrange result", "error", err)
|
||||
@@ -450,5 +453,6 @@ func (h *HistoryLogic) processAndCacheTxHistoryInfo(ctx context.Context, cacheKe
|
||||
log.Error("cache miss after write, expect hit", "cached key", cacheKey, "page", page, "page size", pageSize, "error", err)
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
return pagedTxs, total, nil
|
||||
}
|
||||
|
||||
26
build/dockerfiles/coordinator-proxy.Dockerfile
Normal file
26
build/dockerfiles/coordinator-proxy.Dockerfile
Normal file
@@ -0,0 +1,26 @@
|
||||
# Download Go dependencies
|
||||
FROM scrolltech/go-rust-builder:go-1.22.12-rust-nightly-2025-02-14 as base
|
||||
WORKDIR /src
|
||||
COPY go.work* ./
|
||||
COPY ./rollup/go.* ./rollup/
|
||||
COPY ./common/go.* ./common/
|
||||
COPY ./coordinator/go.* ./coordinator/
|
||||
COPY ./database/go.* ./database/
|
||||
COPY ./tests/integration-test/go.* ./tests/integration-test/
|
||||
COPY ./bridge-history-api/go.* ./bridge-history-api/
|
||||
RUN go mod download -x
|
||||
|
||||
|
||||
# Build coordinator proxy
|
||||
FROM base as builder
|
||||
COPY . .
|
||||
RUN cd ./coordinator && CGO_LDFLAGS="-Wl,--no-as-needed -ldl" make coordinator_proxy && mv ./build/bin/coordinator_proxy /bin/coordinator_proxy
|
||||
|
||||
# Pull coordinator proxy into a second stage deploy ubuntu container
|
||||
FROM ubuntu:20.04
|
||||
ENV CGO_LDFLAGS="-Wl,--no-as-needed -ldl"
|
||||
RUN apt update && apt install vim netcat-openbsd net-tools curl jq -y
|
||||
COPY --from=builder /bin/coordinator_proxy /bin/
|
||||
RUN /bin/coordinator_proxy --version
|
||||
WORKDIR /app
|
||||
ENTRYPOINT ["/bin/coordinator_proxy"]
|
||||
@@ -0,0 +1,8 @@
|
||||
assets/
|
||||
contracts/
|
||||
docs/
|
||||
l2geth/
|
||||
rpc-gateway/
|
||||
*target/*
|
||||
|
||||
permissionless-batches/conf/
|
||||
@@ -12,10 +12,11 @@ require (
|
||||
github.com/gin-gonic/gin v1.9.1
|
||||
github.com/mattn/go-colorable v0.1.13
|
||||
github.com/mattn/go-isatty v0.0.20
|
||||
github.com/mitchellh/mapstructure v1.5.0
|
||||
github.com/modern-go/reflect2 v1.0.2
|
||||
github.com/orcaman/concurrent-map v1.0.0
|
||||
github.com/prometheus/client_golang v1.19.0
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20251117065849-b5c3dd1aa975
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20251128092113-8629f088d78f
|
||||
github.com/stretchr/testify v1.10.0
|
||||
github.com/testcontainers/testcontainers-go v0.30.0
|
||||
github.com/testcontainers/testcontainers-go/modules/compose v0.30.0
|
||||
@@ -147,7 +148,6 @@ require (
|
||||
github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b // indirect
|
||||
github.com/miekg/pkcs11 v1.1.1 // indirect
|
||||
github.com/mitchellh/copystructure v1.2.0 // indirect
|
||||
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
||||
github.com/mitchellh/pointerstructure v1.2.0 // indirect
|
||||
github.com/mitchellh/reflectwalk v1.0.2 // indirect
|
||||
github.com/mmcloughlin/addchain v0.4.0 // indirect
|
||||
@@ -184,7 +184,7 @@ require (
|
||||
github.com/rjeczalik/notify v0.9.1 // indirect
|
||||
github.com/rs/cors v1.7.0 // indirect
|
||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||
github.com/scroll-tech/da-codec v0.9.0 // indirect
|
||||
github.com/scroll-tech/da-codec v0.10.0 // indirect
|
||||
github.com/scroll-tech/zktrie v0.8.4 // indirect
|
||||
github.com/secure-systems-lab/go-securesystemslib v0.4.0 // indirect
|
||||
github.com/serialx/hashring v0.0.0-20190422032157-8b2912629002 // indirect
|
||||
|
||||
@@ -636,10 +636,10 @@ github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik=
|
||||
github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
|
||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/scroll-tech/da-codec v0.9.0 h1:UvHNdSJuVhi/j9pOH7SXkRck52/zeE8T35Fx6qTPW70=
|
||||
github.com/scroll-tech/da-codec v0.9.0/go.mod h1:w+vwIvNiWdiNHVE9yIY2Klx6G4s+SQhEJVSmVG/IsEQ=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20251117065849-b5c3dd1aa975 h1:KluZffkRRJ4K9UyvH/r2g8Lp16/NSK8j26MR33hHmoQ=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20251117065849-b5c3dd1aa975/go.mod h1:6BVek7YliYh+YeHOSjguPw9GT9BhVBfThArxzVlpqdQ=
|
||||
github.com/scroll-tech/da-codec v0.10.0 h1:IPHxyTyXTWPV0Q+DZ08cod2fWkhUvrfysmj/VBpB+WU=
|
||||
github.com/scroll-tech/da-codec v0.10.0/go.mod h1:MBlIP4wCXPcUDZ/Ci2B7n/2IbVU1WBo9OTFTZ5ffE0U=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20251128092113-8629f088d78f h1:j6SjP98MoWFFX9TwB1/nFYEkayqHQsrtE66Ll2C+oT0=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20251128092113-8629f088d78f/go.mod h1:Aa/kD1XB+OV/7rRxMQrjcPCB4b0pKyLH0gsTrtuHi38=
|
||||
github.com/scroll-tech/zktrie v0.8.4 h1:UagmnZ4Z3ITCk+aUq9NQZJNAwnWl4gSxsLb2Nl7IgRE=
|
||||
github.com/scroll-tech/zktrie v0.8.4/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
|
||||
github.com/secure-systems-lab/go-securesystemslib v0.4.0 h1:b23VGrQhTA8cN2CbBw7/FulN9fTtqYUdS5+Oxzt+DUE=
|
||||
|
||||
@@ -34,7 +34,7 @@ services:
|
||||
|
||||
# Sets up the genesis configuration for the go-ethereum client from a JSON file.
|
||||
geth-genesis:
|
||||
image: "ethereum/client-go:v1.13.14"
|
||||
image: "ethereum/client-go:v1.14.0"
|
||||
command: --datadir=/data/execution init /data/execution/genesis.json
|
||||
volumes:
|
||||
- data:/data
|
||||
@@ -80,7 +80,7 @@ services:
|
||||
# Runs the go-ethereum execution client with the specified, unlocked account and necessary
|
||||
# APIs to allow for proof-of-stake consensus via Prysm.
|
||||
geth:
|
||||
image: "ethereum/client-go:v1.13.14"
|
||||
image: "ethereum/client-go:v1.14.0"
|
||||
command:
|
||||
- --http
|
||||
- --http.api=eth,net,web3
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
FROM ethereum/client-go:v1.13.14
|
||||
FROM ethereum/client-go:v1.14.0
|
||||
|
||||
COPY password /l1geth/
|
||||
COPY genesis.json /l1geth/
|
||||
|
||||
@@ -167,13 +167,13 @@ func (t *TestcontainerApps) GetPoSL1EndPoint() (string, error) {
|
||||
return contrainer.PortEndpoint(context.Background(), "8545/tcp", "http")
|
||||
}
|
||||
|
||||
// GetPoSL1Client returns a ethclient by dialing running PoS L1 client
|
||||
func (t *TestcontainerApps) GetPoSL1Client() (*ethclient.Client, error) {
|
||||
// GetPoSL1Client returns a raw rpc client by dialing the L1 node
|
||||
func (t *TestcontainerApps) GetPoSL1Client() (*rpc.Client, error) {
|
||||
endpoint, err := t.GetPoSL1EndPoint()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ethclient.Dial(endpoint)
|
||||
return rpc.Dial(endpoint)
|
||||
}
|
||||
|
||||
// GetDBEndPoint returns the endpoint of the running postgres container
|
||||
@@ -221,7 +221,6 @@ func (t *TestcontainerApps) GetGormDBClient() (*gorm.DB, error) {
|
||||
|
||||
// GetL2GethClient returns a ethclient by dialing running L2Geth
|
||||
func (t *TestcontainerApps) GetL2GethClient() (*ethclient.Client, error) {
|
||||
|
||||
rpcCli, err := t.GetL2Client()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
@@ -3,7 +3,6 @@ package testcontainers
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"gorm.io/gorm"
|
||||
)
|
||||
@@ -14,7 +13,6 @@ func TestNewTestcontainerApps(t *testing.T) {
|
||||
err error
|
||||
endpoint string
|
||||
gormDBclient *gorm.DB
|
||||
ethclient *ethclient.Client
|
||||
)
|
||||
|
||||
testApps := NewTestcontainerApps()
|
||||
@@ -32,17 +30,17 @@ func TestNewTestcontainerApps(t *testing.T) {
|
||||
endpoint, err = testApps.GetL2GethEndPoint()
|
||||
assert.NoError(t, err)
|
||||
assert.NotEmpty(t, endpoint)
|
||||
ethclient, err = testApps.GetL2GethClient()
|
||||
l2RawClient, err := testApps.GetL2Client()
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, ethclient)
|
||||
assert.NotNil(t, l2RawClient)
|
||||
|
||||
assert.NoError(t, testApps.StartPoSL1Container())
|
||||
endpoint, err = testApps.GetPoSL1EndPoint()
|
||||
assert.NoError(t, err)
|
||||
assert.NotEmpty(t, endpoint)
|
||||
ethclient, err = testApps.GetPoSL1Client()
|
||||
l1RawClient, err := testApps.GetPoSL1Client()
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, ethclient)
|
||||
assert.NotNil(t, l1RawClient)
|
||||
|
||||
assert.NoError(t, testApps.StartWeb3SignerContainer(1))
|
||||
endpoint, err = testApps.GetWeb3SignerEndpoint()
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"net/http"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/mitchellh/mapstructure"
|
||||
)
|
||||
|
||||
// Response the response schema
|
||||
@@ -13,6 +14,19 @@ type Response struct {
|
||||
Data interface{} `json:"data"`
|
||||
}
|
||||
|
||||
func (resp *Response) DecodeData(out interface{}) error {
|
||||
// Decode generically unmarshaled JSON (map[string]any, []any) into a typed struct
|
||||
// honoring `json` tags and allowing weak type conversions.
|
||||
dec, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{
|
||||
TagName: "json",
|
||||
Result: out,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return dec.Decode(resp.Data)
|
||||
}
|
||||
|
||||
// RenderJSON renders response with json
|
||||
func RenderJSON(ctx *gin.Context, errCode int, err error, data interface{}) {
|
||||
var errMsg string
|
||||
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
"runtime/debug"
|
||||
)
|
||||
|
||||
var tag = "v4.7.4"
|
||||
var tag = "v4.7.10"
|
||||
|
||||
var commit = func() string {
|
||||
if info, ok := debug.ReadBuildInfo(); ok {
|
||||
|
||||
@@ -34,6 +34,10 @@ coordinator_cron:
|
||||
coordinator_tool:
|
||||
go build -ldflags "-X scroll-tech/common/version.ZkVersion=${ZK_VERSION}" -o $(PWD)/build/bin/coordinator_tool ./cmd/tool
|
||||
|
||||
coordinator_proxy:
|
||||
go build -ldflags "-X scroll-tech/common/version.ZkVersion=${ZK_VERSION}" -tags="mock_prover mock_verifier" -o $(PWD)/build/bin/coordinator_proxy ./cmd/proxy
|
||||
|
||||
|
||||
localsetup: coordinator_api ## Local setup: build coordinator_api, copy config, and setup releases
|
||||
mkdir -p build/bin/conf
|
||||
@echo "Copying configuration files..."
|
||||
|
||||
@@ -7,7 +7,7 @@ if [ -z "${SCROLL_ZKVM_VERSION}" ]; then
|
||||
fi
|
||||
|
||||
# default fork name from env or "galileo"
|
||||
SCROLL_FORK_NAME="${SCROLL_FORK_NAME:-galileo}"
|
||||
SCROLL_FORK_NAME="${SCROLL_FORK_NAME:-galileov2}"
|
||||
|
||||
# set ASSET_DIR by reading from config.json
|
||||
CONFIG_FILE="bin/conf/config.template.json"
|
||||
|
||||
122
coordinator/cmd/proxy/app/app.go
Normal file
122
coordinator/cmd/proxy/app/app.go
Normal file
@@ -0,0 +1,122 @@
|
||||
package app
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"os"
|
||||
"os/signal"
|
||||
"time"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"github.com/urfave/cli/v2"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/common/database"
|
||||
"scroll-tech/common/observability"
|
||||
"scroll-tech/common/utils"
|
||||
"scroll-tech/common/version"
|
||||
|
||||
"scroll-tech/coordinator/internal/config"
|
||||
"scroll-tech/coordinator/internal/controller/proxy"
|
||||
"scroll-tech/coordinator/internal/route"
|
||||
)
|
||||
|
||||
var app *cli.App
|
||||
|
||||
func init() {
|
||||
// Set up coordinator app info.
|
||||
app = cli.NewApp()
|
||||
app.Action = action
|
||||
app.Name = "coordinator proxy"
|
||||
app.Usage = "Proxy for multiple Scroll L2 Coordinators"
|
||||
app.Version = version.Version
|
||||
app.Flags = append(app.Flags, utils.CommonFlags...)
|
||||
app.Flags = append(app.Flags, apiFlags...)
|
||||
app.Before = func(ctx *cli.Context) error {
|
||||
return utils.LogSetup(ctx)
|
||||
}
|
||||
// Register `coordinator-test` app for integration-test.
|
||||
utils.RegisterSimulation(app, utils.CoordinatorAPIApp)
|
||||
}
|
||||
|
||||
func action(ctx *cli.Context) error {
|
||||
cfgFile := ctx.String(utils.ConfigFileFlag.Name)
|
||||
cfg, err := config.NewProxyConfig(cfgFile)
|
||||
if err != nil {
|
||||
log.Crit("failed to load config file", "config file", cfgFile, "error", err)
|
||||
}
|
||||
|
||||
var db *gorm.DB
|
||||
if dbCfg := cfg.ProxyManager.DB; dbCfg != nil {
|
||||
log.Info("Apply persistent storage", "via", cfg.ProxyManager.DB.DSN)
|
||||
db, err = database.InitDB(cfg.ProxyManager.DB)
|
||||
if err != nil {
|
||||
log.Crit("failed to init db connection", "err", err)
|
||||
}
|
||||
defer func() {
|
||||
if err = database.CloseDB(db); err != nil {
|
||||
log.Error("can not close db connection", "error", err)
|
||||
}
|
||||
}()
|
||||
observability.Server(ctx, db)
|
||||
}
|
||||
registry := prometheus.DefaultRegisterer
|
||||
|
||||
apiSrv := server(ctx, cfg, db, registry)
|
||||
|
||||
log.Info(
|
||||
"Start coordinator api successfully.",
|
||||
"version", version.Version,
|
||||
)
|
||||
|
||||
// Catch CTRL-C to ensure a graceful shutdown.
|
||||
interrupt := make(chan os.Signal, 1)
|
||||
signal.Notify(interrupt, os.Interrupt)
|
||||
|
||||
// Wait until the interrupt signal is received from an OS signal.
|
||||
<-interrupt
|
||||
log.Info("start shutdown coordinator proxy server ...")
|
||||
|
||||
closeCtx, cancelExit := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer cancelExit()
|
||||
if err = apiSrv.Shutdown(closeCtx); err != nil {
|
||||
log.Warn("shutdown coordinator proxy server failure", "error", err)
|
||||
return nil
|
||||
}
|
||||
|
||||
<-closeCtx.Done()
|
||||
log.Info("coordinator proxy server exiting success")
|
||||
return nil
|
||||
}
|
||||
|
||||
func server(ctx *cli.Context, cfg *config.ProxyConfig, db *gorm.DB, reg prometheus.Registerer) *http.Server {
|
||||
router := gin.New()
|
||||
proxy.InitController(cfg, db, reg)
|
||||
route.ProxyRoute(router, cfg, reg)
|
||||
port := ctx.String(httpPortFlag.Name)
|
||||
srv := &http.Server{
|
||||
Addr: fmt.Sprintf(":%s", port),
|
||||
Handler: router,
|
||||
ReadHeaderTimeout: time.Minute,
|
||||
}
|
||||
|
||||
go func() {
|
||||
if runServerErr := srv.ListenAndServe(); runServerErr != nil && !errors.Is(runServerErr, http.ErrServerClosed) {
|
||||
log.Crit("run coordinator proxy http server failure", "error", runServerErr)
|
||||
}
|
||||
}()
|
||||
return srv
|
||||
}
|
||||
|
||||
// Run coordinator.
|
||||
func Run() {
|
||||
// RunApp the coordinator.
|
||||
if err := app.Run(os.Args); err != nil {
|
||||
_, _ = fmt.Fprintln(os.Stderr, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
30
coordinator/cmd/proxy/app/flags.go
Normal file
30
coordinator/cmd/proxy/app/flags.go
Normal file
@@ -0,0 +1,30 @@
|
||||
package app
|
||||
|
||||
import "github.com/urfave/cli/v2"
|
||||
|
||||
var (
|
||||
apiFlags = []cli.Flag{
|
||||
// http flags
|
||||
&httpEnabledFlag,
|
||||
&httpListenAddrFlag,
|
||||
&httpPortFlag,
|
||||
}
|
||||
// httpEnabledFlag enable rpc server.
|
||||
httpEnabledFlag = cli.BoolFlag{
|
||||
Name: "http",
|
||||
Usage: "Enable the HTTP-RPC server",
|
||||
Value: false,
|
||||
}
|
||||
// httpListenAddrFlag set the http address.
|
||||
httpListenAddrFlag = cli.StringFlag{
|
||||
Name: "http.addr",
|
||||
Usage: "HTTP-RPC server listening interface",
|
||||
Value: "localhost",
|
||||
}
|
||||
// httpPortFlag set http.port.
|
||||
httpPortFlag = cli.IntFlag{
|
||||
Name: "http.port",
|
||||
Usage: "HTTP-RPC server listening port",
|
||||
Value: 8590,
|
||||
}
|
||||
)
|
||||
7
coordinator/cmd/proxy/main.go
Normal file
7
coordinator/cmd/proxy/main.go
Normal file
@@ -0,0 +1,7 @@
|
||||
package main
|
||||
|
||||
import "scroll-tech/coordinator/cmd/proxy/app"
|
||||
|
||||
func main() {
|
||||
app.Run()
|
||||
}
|
||||
@@ -17,7 +17,11 @@
|
||||
{
|
||||
"assets_path": "assets",
|
||||
"fork_name": "galileo"
|
||||
}
|
||||
},
|
||||
{
|
||||
"assets_path": "assets_v2",
|
||||
"fork_name": "galileoV2"
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
|
||||
31
coordinator/conf/config_proxy.json
Normal file
31
coordinator/conf/config_proxy.json
Normal file
@@ -0,0 +1,31 @@
|
||||
{
|
||||
"proxy_manager": {
|
||||
"proxy_cli": {
|
||||
"proxy_name": "proxy_name",
|
||||
"secret": "client private key"
|
||||
},
|
||||
"auth": {
|
||||
"secret": "proxy secret key",
|
||||
"challenge_expire_duration_sec": 3600,
|
||||
"login_expire_duration_sec": 3600
|
||||
},
|
||||
"verifier": {
|
||||
"min_prover_version": "v4.4.45",
|
||||
"verifiers": []
|
||||
},
|
||||
"db": {
|
||||
"driver_name": "postgres",
|
||||
"dsn": "postgres://localhost/scroll?sslmode=disable",
|
||||
"maxOpenNum": 200,
|
||||
"maxIdleNum": 20
|
||||
}
|
||||
},
|
||||
"coordinators": {
|
||||
"sepolia": {
|
||||
"base_url": "http://localhost:8555",
|
||||
"retry_count": 10,
|
||||
"retry_wait_time_sec": 10,
|
||||
"connection_timeout_sec": 30
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -9,8 +9,8 @@ require (
|
||||
github.com/google/uuid v1.6.0
|
||||
github.com/mitchellh/mapstructure v1.5.0
|
||||
github.com/prometheus/client_golang v1.19.0
|
||||
github.com/scroll-tech/da-codec v0.9.0
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20251117065849-b5c3dd1aa975
|
||||
github.com/scroll-tech/da-codec v0.10.0
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20251128092113-8629f088d78f
|
||||
github.com/shopspring/decimal v1.3.1
|
||||
github.com/stretchr/testify v1.10.0
|
||||
github.com/urfave/cli/v2 v2.25.7
|
||||
|
||||
@@ -253,10 +253,10 @@ github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik=
|
||||
github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
|
||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/scroll-tech/da-codec v0.9.0 h1:UvHNdSJuVhi/j9pOH7SXkRck52/zeE8T35Fx6qTPW70=
|
||||
github.com/scroll-tech/da-codec v0.9.0/go.mod h1:w+vwIvNiWdiNHVE9yIY2Klx6G4s+SQhEJVSmVG/IsEQ=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20251117065849-b5c3dd1aa975 h1:KluZffkRRJ4K9UyvH/r2g8Lp16/NSK8j26MR33hHmoQ=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20251117065849-b5c3dd1aa975/go.mod h1:6BVek7YliYh+YeHOSjguPw9GT9BhVBfThArxzVlpqdQ=
|
||||
github.com/scroll-tech/da-codec v0.10.0 h1:IPHxyTyXTWPV0Q+DZ08cod2fWkhUvrfysmj/VBpB+WU=
|
||||
github.com/scroll-tech/da-codec v0.10.0/go.mod h1:MBlIP4wCXPcUDZ/Ci2B7n/2IbVU1WBo9OTFTZ5ffE0U=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20251128092113-8629f088d78f h1:j6SjP98MoWFFX9TwB1/nFYEkayqHQsrtE66Ll2C+oT0=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20251128092113-8629f088d78f/go.mod h1:Aa/kD1XB+OV/7rRxMQrjcPCB4b0pKyLH0gsTrtuHi38=
|
||||
github.com/scroll-tech/zktrie v0.8.4 h1:UagmnZ4Z3ITCk+aUq9NQZJNAwnWl4gSxsLb2Nl7IgRE=
|
||||
github.com/scroll-tech/zktrie v0.8.4/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
|
||||
github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKlUeu/erjjvaPEYiI=
|
||||
|
||||
74
coordinator/internal/config/proxy_config.go
Normal file
74
coordinator/internal/config/proxy_config.go
Normal file
@@ -0,0 +1,74 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"scroll-tech/common/database"
|
||||
"scroll-tech/common/utils"
|
||||
)
|
||||
|
||||
// Proxy loads proxy configuration items.
|
||||
type ProxyManager struct {
|
||||
// Zk verifier config help to confine the connected prover.
|
||||
Verifier *VerifierConfig `json:"verifier"`
|
||||
Client *ProxyClient `json:"proxy_cli"`
|
||||
Auth *Auth `json:"auth"`
|
||||
DB *database.Config `json:"db,omitempty"`
|
||||
}
|
||||
|
||||
func (m *ProxyManager) Normalize() {
|
||||
if m.Client.Secret == "" {
|
||||
m.Client.Secret = m.Auth.Secret
|
||||
}
|
||||
|
||||
if m.Client.ProxyVersion == "" {
|
||||
m.Client.ProxyVersion = m.Verifier.MinProverVersion
|
||||
}
|
||||
}
|
||||
|
||||
// Proxy client configuration for connect to upstream as a client
|
||||
type ProxyClient struct {
|
||||
ProxyName string `json:"proxy_name"`
|
||||
ProxyVersion string `json:"proxy_version,omitempty"`
|
||||
Secret string `json:"secret,omitempty"`
|
||||
}
|
||||
|
||||
// Coordinator configuration
|
||||
type UpStream struct {
|
||||
BaseUrl string `json:"base_url"`
|
||||
RetryCount uint `json:"retry_count"`
|
||||
RetryWaitTime uint `json:"retry_wait_time_sec"`
|
||||
ConnectionTimeoutSec uint `json:"connection_timeout_sec"`
|
||||
CompatibileMode bool `json:"compatible_mode,omitempty"`
|
||||
}
|
||||
|
||||
// Config load configuration items.
|
||||
type ProxyConfig struct {
|
||||
ProxyManager *ProxyManager `json:"proxy_manager"`
|
||||
ProxyName string `json:"proxy_name"`
|
||||
Coordinators map[string]*UpStream `json:"coordinators"`
|
||||
}
|
||||
|
||||
// NewConfig returns a new instance of Config.
|
||||
func NewProxyConfig(file string) (*ProxyConfig, error) {
|
||||
buf, err := os.ReadFile(filepath.Clean(file))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cfg := &ProxyConfig{}
|
||||
err = json.Unmarshal(buf, cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Override config with environment variables
|
||||
err = utils.OverrideConfigWithEnv(cfg, "SCROLL_COORDINATOR_PROXY")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return cfg, nil
|
||||
}
|
||||
@@ -19,28 +19,56 @@ type AuthController struct {
|
||||
loginLogic *auth.LoginLogic
|
||||
}
|
||||
|
||||
// NewAuthController returns an LoginController instance
|
||||
func NewAuthController(db *gorm.DB, cfg *config.Config, vf *verifier.Verifier) *AuthController {
|
||||
func NewAuthControllerWithLogic(loginLogic *auth.LoginLogic) *AuthController {
|
||||
return &AuthController{
|
||||
loginLogic: auth.NewLoginLogic(db, cfg, vf),
|
||||
loginLogic: loginLogic,
|
||||
}
|
||||
}
|
||||
|
||||
// Login the api controller for login
|
||||
// NewAuthController returns an LoginController instance
|
||||
func NewAuthController(db *gorm.DB, cfg *config.Config, vf *verifier.Verifier) *AuthController {
|
||||
return &AuthController{
|
||||
loginLogic: auth.NewLoginLogic(db, cfg.ProverManager.Verifier, vf),
|
||||
}
|
||||
}
|
||||
|
||||
// Login the api controller for login, used as the Authenticator in JWT
|
||||
// It can work in two mode: full process for normal login, or if login request
|
||||
// is posted from proxy, run a simpler process to login a client
|
||||
func (a *AuthController) Login(c *gin.Context) (interface{}, error) {
|
||||
|
||||
// check if the login is post by proxy
|
||||
var viaProxy bool
|
||||
if proverType, proverTypeExist := c.Get(types.ProverProviderTypeKey); proverTypeExist {
|
||||
proverType := uint8(proverType.(float64))
|
||||
viaProxy = proverType == types.ProverProviderTypeProxy
|
||||
}
|
||||
|
||||
var login types.LoginParameter
|
||||
if err := c.ShouldBind(&login); err != nil {
|
||||
return "", fmt.Errorf("missing the public_key, err:%w", err)
|
||||
}
|
||||
|
||||
// check login parameter's token is equal to bearer token, the Authorization must be existed
|
||||
// if not exist, the jwt token will intercept it
|
||||
brearToken := c.GetHeader("Authorization")
|
||||
if brearToken != "Bearer "+login.Message.Challenge {
|
||||
return "", errors.New("check challenge failure for the not equal challenge string")
|
||||
// if not, process with normal login
|
||||
if !viaProxy {
|
||||
// check login parameter's token is equal to bearer token, the Authorization must be existed
|
||||
// if not exist, the jwt token will intercept it
|
||||
brearToken := c.GetHeader("Authorization")
|
||||
if brearToken != "Bearer "+login.Message.Challenge {
|
||||
return "", errors.New("check challenge failure for the not equal challenge string")
|
||||
}
|
||||
|
||||
if err := auth.VerifyMsg(&login); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// check the challenge is used, if used, return failure
|
||||
if err := a.loginLogic.InsertChallengeString(c, login.Message.Challenge); err != nil {
|
||||
return "", fmt.Errorf("login insert challenge string failure:%w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := a.loginLogic.Check(&login); err != nil {
|
||||
if err := a.loginLogic.CompatiblityCheck(&login); err != nil {
|
||||
return "", fmt.Errorf("check the login parameter failure: %w", err)
|
||||
}
|
||||
|
||||
@@ -49,11 +77,6 @@ func (a *AuthController) Login(c *gin.Context) (interface{}, error) {
|
||||
return "", fmt.Errorf("prover hard fork name failure:%w", err)
|
||||
}
|
||||
|
||||
// check the challenge is used, if used, return failure
|
||||
if err := a.loginLogic.InsertChallengeString(c, login.Message.Challenge); err != nil {
|
||||
return "", fmt.Errorf("login insert challenge string failure:%w", err)
|
||||
}
|
||||
|
||||
returnData := types.LoginParameterWithHardForkName{
|
||||
HardForkName: hardForkNames,
|
||||
LoginParameter: login,
|
||||
@@ -85,10 +108,6 @@ func (a *AuthController) IdentityHandler(c *gin.Context) interface{} {
|
||||
c.Set(types.ProverName, proverName)
|
||||
}
|
||||
|
||||
if publicKey, ok := claims[types.PublicKey]; ok {
|
||||
c.Set(types.PublicKey, publicKey)
|
||||
}
|
||||
|
||||
if proverVersion, ok := claims[types.ProverVersion]; ok {
|
||||
c.Set(types.ProverVersion, proverVersion)
|
||||
}
|
||||
@@ -101,5 +120,9 @@ func (a *AuthController) IdentityHandler(c *gin.Context) interface{} {
|
||||
c.Set(types.ProverProviderTypeKey, providerType)
|
||||
}
|
||||
|
||||
if publicKey, ok := claims[types.PublicKey]; ok {
|
||||
return publicKey
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
150
coordinator/internal/controller/proxy/auth.go
Normal file
150
coordinator/internal/controller/proxy/auth.go
Normal file
@@ -0,0 +1,150 @@
|
||||
package proxy
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
jwt "github.com/appleboy/gin-jwt/v2"
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
|
||||
"scroll-tech/coordinator/internal/config"
|
||||
"scroll-tech/coordinator/internal/controller/api"
|
||||
"scroll-tech/coordinator/internal/logic/auth"
|
||||
"scroll-tech/coordinator/internal/logic/verifier"
|
||||
"scroll-tech/coordinator/internal/types"
|
||||
)
|
||||
|
||||
// AuthController is login API
|
||||
type AuthController struct {
|
||||
apiLogin *api.AuthController
|
||||
clients Clients
|
||||
proverMgr *ProverManager
|
||||
}
|
||||
|
||||
const upstreamConnTimeout = time.Second * 5
|
||||
const LoginParamCache = "login_param"
|
||||
const ProverTypesKey = "prover_types"
|
||||
const SignatureKey = "prover_signature"
|
||||
|
||||
// NewAuthController returns an LoginController instance
|
||||
func NewAuthController(cfg *config.ProxyConfig, clients Clients, proverMgr *ProverManager) *AuthController {
|
||||
|
||||
// use a dummy Verifier to create login logic (we do not use any information in verifier)
|
||||
dummyVf := verifier.Verifier{
|
||||
OpenVMVkMap: make(map[string]struct{}),
|
||||
}
|
||||
loginLogic := auth.NewLoginLogicWithSimpleDeduplicator(cfg.ProxyManager.Verifier, &dummyVf)
|
||||
|
||||
authController := &AuthController{
|
||||
apiLogin: api.NewAuthControllerWithLogic(loginLogic),
|
||||
clients: clients,
|
||||
proverMgr: proverMgr,
|
||||
}
|
||||
|
||||
return authController
|
||||
}
|
||||
|
||||
// Login extended the Login hander in api controller
|
||||
func (a *AuthController) Login(c *gin.Context) (interface{}, error) {
|
||||
|
||||
loginRes, err := a.apiLogin.Login(c)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
loginParam := loginRes.(types.LoginParameterWithHardForkName)
|
||||
|
||||
if loginParam.LoginParameter.Message.ProverProviderType == types.ProverProviderTypeProxy {
|
||||
return nil, fmt.Errorf("proxy do not support recursive login")
|
||||
}
|
||||
|
||||
session := a.proverMgr.GetOrCreate(loginParam.PublicKey)
|
||||
log.Debug("start handling login", "cli", loginParam.Message.ProverName)
|
||||
|
||||
loginCtx, cf := context.WithTimeout(context.Background(), upstreamConnTimeout)
|
||||
var wg sync.WaitGroup
|
||||
for _, cli := range a.clients {
|
||||
wg.Add(1)
|
||||
go func(cli Client) {
|
||||
defer wg.Done()
|
||||
if err := session.ProxyLogin(loginCtx, cli, &loginParam.LoginParameter); err != nil {
|
||||
log.Error("proxy login failed during token cache update",
|
||||
"userKey", loginParam.PublicKey,
|
||||
"upstream", cli.Name(),
|
||||
"error", err)
|
||||
}
|
||||
}(cli)
|
||||
}
|
||||
go func(cliName string) {
|
||||
wg.Wait()
|
||||
cf()
|
||||
log.Debug("first login attempt has completed", "cli", cliName)
|
||||
}(loginParam.Message.ProverName)
|
||||
|
||||
return loginParam.LoginParameter, nil
|
||||
}
|
||||
|
||||
// PayloadFunc returns jwt.MapClaims with {public key, prover name}.
|
||||
func (a *AuthController) PayloadFunc(data interface{}) jwt.MapClaims {
|
||||
v, ok := data.(types.LoginParameter)
|
||||
if !ok {
|
||||
log.Error("PayloadFunc received unexpected type", "type", fmt.Sprintf("%T", data))
|
||||
return jwt.MapClaims{}
|
||||
}
|
||||
|
||||
return jwt.MapClaims{
|
||||
types.PublicKey: v.PublicKey,
|
||||
types.ProverName: v.Message.ProverName,
|
||||
types.ProverVersion: v.Message.ProverVersion,
|
||||
types.ProverProviderTypeKey: v.Message.ProverProviderType,
|
||||
SignatureKey: v.Signature,
|
||||
ProverTypesKey: v.Message.ProverTypes,
|
||||
}
|
||||
}
|
||||
|
||||
// IdentityHandler replies to client for /login
|
||||
func (a *AuthController) IdentityHandler(c *gin.Context) interface{} {
|
||||
claims := jwt.ExtractClaims(c)
|
||||
loginParam := &types.LoginParameter{}
|
||||
|
||||
if proverName, ok := claims[types.ProverName]; ok {
|
||||
loginParam.Message.ProverName, _ = proverName.(string)
|
||||
}
|
||||
|
||||
if proverVersion, ok := claims[types.ProverVersion]; ok {
|
||||
loginParam.Message.ProverVersion, _ = proverVersion.(string)
|
||||
}
|
||||
|
||||
if providerType, ok := claims[types.ProverProviderTypeKey]; ok {
|
||||
num, _ := providerType.(float64)
|
||||
loginParam.Message.ProverProviderType = types.ProverProviderType(num)
|
||||
}
|
||||
|
||||
if signature, ok := claims[SignatureKey]; ok {
|
||||
loginParam.Signature, _ = signature.(string)
|
||||
}
|
||||
|
||||
if proverTypes, ok := claims[ProverTypesKey]; ok {
|
||||
arr, _ := proverTypes.([]any)
|
||||
for _, elm := range arr {
|
||||
num, _ := elm.(float64)
|
||||
loginParam.Message.ProverTypes = append(loginParam.Message.ProverTypes, types.ProverType(num))
|
||||
}
|
||||
}
|
||||
|
||||
if publicKey, ok := claims[types.PublicKey]; ok {
|
||||
loginParam.PublicKey, _ = publicKey.(string)
|
||||
}
|
||||
|
||||
if loginParam.PublicKey != "" {
|
||||
|
||||
c.Set(LoginParamCache, loginParam)
|
||||
c.Set(types.ProverName, loginParam.Message.ProverName)
|
||||
// publickey will also be set since we have specified public_key as identical key
|
||||
return loginParam.PublicKey
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
246
coordinator/internal/controller/proxy/client.go
Normal file
246
coordinator/internal/controller/proxy/client.go
Normal file
@@ -0,0 +1,246 @@
|
||||
//nolint:errcheck,bodyclose // body is closed in the following handleHttpResp call
|
||||
package proxy
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/crypto"
|
||||
|
||||
ctypes "scroll-tech/common/types"
|
||||
|
||||
"scroll-tech/coordinator/internal/config"
|
||||
"scroll-tech/coordinator/internal/types"
|
||||
)
|
||||
|
||||
type ProxyCli interface {
|
||||
Login(ctx context.Context, genLogin func(string) (*types.LoginParameter, error)) (*ctypes.Response, error)
|
||||
ProxyLogin(ctx context.Context, param *types.LoginParameter) (*ctypes.Response, error)
|
||||
Token() string
|
||||
Reset()
|
||||
}
|
||||
|
||||
type ProverCli interface {
|
||||
GetTask(ctx context.Context, param *types.GetTaskParameter) (*ctypes.Response, error)
|
||||
SubmitProof(ctx context.Context, param *types.SubmitProofParameter) (*ctypes.Response, error)
|
||||
}
|
||||
|
||||
// Client wraps an http client with a preset host for coordinator API calls
|
||||
type upClient struct {
|
||||
httpClient *http.Client
|
||||
baseURL string
|
||||
loginToken string
|
||||
compatibileMode bool
|
||||
resetFromMgr func()
|
||||
}
|
||||
|
||||
// NewClient creates a new Client with the specified host
|
||||
func newUpClient(cfg *config.UpStream) *upClient {
|
||||
return &upClient{
|
||||
httpClient: &http.Client{
|
||||
Timeout: time.Duration(cfg.ConnectionTimeoutSec) * time.Second,
|
||||
},
|
||||
baseURL: cfg.BaseUrl,
|
||||
compatibileMode: cfg.CompatibileMode,
|
||||
}
|
||||
}
|
||||
|
||||
func (c *upClient) Reset() {
|
||||
if c.resetFromMgr != nil {
|
||||
c.resetFromMgr()
|
||||
}
|
||||
}
|
||||
|
||||
func (c *upClient) Token() string {
|
||||
return c.loginToken
|
||||
}
|
||||
|
||||
// need a parsable schema definition
|
||||
type loginSchema struct {
|
||||
Time string `json:"time"`
|
||||
Token string `json:"token"`
|
||||
}
|
||||
|
||||
// Login performs the complete login process: get challenge then login
|
||||
func (c *upClient) Login(ctx context.Context, genLogin func(string) (*types.LoginParameter, error)) (*ctypes.Response, error) {
|
||||
// Step 1: Get challenge
|
||||
url := fmt.Sprintf("%s/coordinator/v1/challenge", c.baseURL)
|
||||
|
||||
req, err := http.NewRequest("GET", url, nil)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create challenge request: %w", err)
|
||||
}
|
||||
|
||||
challengeResp, err := c.httpClient.Do(req)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get challenge: %w", err)
|
||||
}
|
||||
|
||||
parsedResp, err := handleHttpResp(challengeResp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
} else if parsedResp.ErrCode != 0 {
|
||||
return nil, fmt.Errorf("challenge failed: %d (%s)", parsedResp.ErrCode, parsedResp.ErrMsg)
|
||||
}
|
||||
|
||||
// Ste p2: Parse challenge response
|
||||
var challengeSchema loginSchema
|
||||
if err := parsedResp.DecodeData(&challengeSchema); err != nil {
|
||||
return nil, fmt.Errorf("failed to parse challenge response: %w", err)
|
||||
}
|
||||
|
||||
// Step 3: Use the token from challenge as Bearer token for login
|
||||
url = fmt.Sprintf("%s/coordinator/v1/login", c.baseURL)
|
||||
|
||||
param, err := genLogin(challengeSchema.Token)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to setup login parameter: %w", err)
|
||||
}
|
||||
|
||||
jsonData, err := json.Marshal(param)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to marshal login parameter: %w", err)
|
||||
}
|
||||
|
||||
req, err = http.NewRequest("POST", url, bytes.NewBuffer(jsonData))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create login request: %w", err)
|
||||
}
|
||||
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
req.Header.Set("Authorization", "Bearer "+challengeSchema.Token)
|
||||
|
||||
loginResp, err := c.httpClient.Do(req)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to perform login request: %w", err)
|
||||
}
|
||||
return handleHttpResp(loginResp)
|
||||
}
|
||||
|
||||
func handleHttpResp(resp *http.Response) (*ctypes.Response, error) {
|
||||
if resp.StatusCode == http.StatusOK || resp.StatusCode == http.StatusUnauthorized {
|
||||
defer resp.Body.Close()
|
||||
var respWithData ctypes.Response
|
||||
// Note: Body is consumed after decoding, caller should not read it again
|
||||
if err := json.NewDecoder(resp.Body).Decode(&respWithData); err == nil {
|
||||
return &respWithData, nil
|
||||
} else {
|
||||
return nil, fmt.Errorf("login parsing expected response failed: %v", err)
|
||||
}
|
||||
|
||||
}
|
||||
return nil, fmt.Errorf("login request failed with status: %d", resp.StatusCode)
|
||||
}
|
||||
|
||||
func (c *upClient) proxyLoginCompatibleMode(ctx context.Context, param *types.LoginParameter) (*ctypes.Response, error) {
|
||||
mimePrivK, err := buildPrivateKey([]byte(param.PublicKey))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
mimePkHex := common.Bytes2Hex(crypto.CompressPubkey(&mimePrivK.PublicKey))
|
||||
|
||||
genLoginParam := func(challenge string) (*types.LoginParameter, error) {
|
||||
|
||||
// Create login parameter with proxy settings
|
||||
loginParam := &types.LoginParameter{
|
||||
Message: param.Message,
|
||||
PublicKey: mimePkHex,
|
||||
}
|
||||
loginParam.Message.Challenge = challenge
|
||||
|
||||
// Sign the message with the private key
|
||||
if err := loginParam.SignWithKey(mimePrivK); err != nil {
|
||||
return nil, fmt.Errorf("failed to sign login parameter: %w", err)
|
||||
}
|
||||
|
||||
return loginParam, nil
|
||||
}
|
||||
|
||||
return c.Login(ctx, genLoginParam)
|
||||
}
|
||||
|
||||
// ProxyLogin makes a POST request to /v1/proxy_login with LoginParameter
|
||||
func (c *upClient) ProxyLogin(ctx context.Context, param *types.LoginParameter) (*ctypes.Response, error) {
|
||||
|
||||
if c.compatibileMode {
|
||||
return c.proxyLoginCompatibleMode(ctx, param)
|
||||
}
|
||||
|
||||
url := fmt.Sprintf("%s/coordinator/v1/proxy_login", c.baseURL)
|
||||
|
||||
jsonData, err := json.Marshal(param)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to marshal proxy login parameter: %w", err)
|
||||
}
|
||||
|
||||
req, err := http.NewRequestWithContext(ctx, "POST", url, bytes.NewBuffer(jsonData))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create proxy login request: %w", err)
|
||||
}
|
||||
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
req.Header.Set("Authorization", "Bearer "+c.loginToken)
|
||||
|
||||
proxyLoginResp, err := c.httpClient.Do(req)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to perform proxy login request: %w", err)
|
||||
}
|
||||
return handleHttpResp(proxyLoginResp)
|
||||
}
|
||||
|
||||
// GetTask makes a POST request to /v1/get_task with GetTaskParameter
|
||||
func (c *upClient) GetTask(ctx context.Context, param *types.GetTaskParameter) (*ctypes.Response, error) {
|
||||
url := fmt.Sprintf("%s/coordinator/v1/get_task", c.baseURL)
|
||||
|
||||
jsonData, err := json.Marshal(param)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to marshal get task parameter: %w", err)
|
||||
}
|
||||
|
||||
req, err := http.NewRequestWithContext(ctx, "POST", url, bytes.NewBuffer(jsonData))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create get task request: %w", err)
|
||||
}
|
||||
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
if c.loginToken != "" {
|
||||
req.Header.Set("Authorization", "Bearer "+c.loginToken)
|
||||
}
|
||||
|
||||
resp, err := c.httpClient.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return handleHttpResp(resp)
|
||||
}
|
||||
|
||||
// SubmitProof makes a POST request to /v1/submit_proof with SubmitProofParameter
|
||||
func (c *upClient) SubmitProof(ctx context.Context, param *types.SubmitProofParameter) (*ctypes.Response, error) {
|
||||
url := fmt.Sprintf("%s/coordinator/v1/submit_proof", c.baseURL)
|
||||
|
||||
jsonData, err := json.Marshal(param)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to marshal submit proof parameter: %w", err)
|
||||
}
|
||||
|
||||
req, err := http.NewRequestWithContext(ctx, "POST", url, bytes.NewBuffer(jsonData))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create submit proof request: %w", err)
|
||||
}
|
||||
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
if c.loginToken != "" {
|
||||
req.Header.Set("Authorization", "Bearer "+c.loginToken)
|
||||
}
|
||||
|
||||
resp, err := c.httpClient.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return handleHttpResp(resp)
|
||||
}
|
||||
220
coordinator/internal/controller/proxy/client_manager.go
Normal file
220
coordinator/internal/controller/proxy/client_manager.go
Normal file
@@ -0,0 +1,220 @@
|
||||
package proxy
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/ecdsa"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/crypto"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
|
||||
"scroll-tech/common/version"
|
||||
|
||||
"scroll-tech/coordinator/internal/config"
|
||||
"scroll-tech/coordinator/internal/types"
|
||||
)
|
||||
|
||||
type Client interface {
|
||||
// a client to access upstream coordinator with specified identity
|
||||
// so prover can contact with coordinator as itself
|
||||
Client(string) ProverCli
|
||||
// the client to access upstream as proxy itself
|
||||
ClientAsProxy(context.Context) ProxyCli
|
||||
Name() string
|
||||
}
|
||||
|
||||
type ClientManager struct {
|
||||
name string
|
||||
cliCfg *config.ProxyClient
|
||||
cfg *config.UpStream
|
||||
privKey *ecdsa.PrivateKey
|
||||
|
||||
cachedCli struct {
|
||||
sync.RWMutex
|
||||
cli *upClient
|
||||
completionCtx context.Context
|
||||
}
|
||||
}
|
||||
|
||||
// transformToValidPrivateKey safely transforms arbitrary bytes into valid private key bytes
|
||||
func buildPrivateKey(inputBytes []byte) (*ecdsa.PrivateKey, error) {
|
||||
// Try appending bytes from 0x0 to 0x20 until we get a valid private key
|
||||
for appendByte := byte(0x0); appendByte <= 0x20; appendByte++ {
|
||||
// Append the byte to input
|
||||
extendedBytes := append(inputBytes, appendByte)
|
||||
|
||||
// Calculate 256-bit hash
|
||||
hash := crypto.Keccak256(extendedBytes)
|
||||
|
||||
// Try to create private key from hash
|
||||
if k, err := crypto.ToECDSA(hash); err == nil {
|
||||
return k, nil
|
||||
}
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("failed to generate valid private key from input bytes")
|
||||
}
|
||||
|
||||
func NewClientManager(name string, cliCfg *config.ProxyClient, cfg *config.UpStream) (*ClientManager, error) {
|
||||
|
||||
log.Info("init client", "name", name, "upcfg", cfg.BaseUrl, "compatible mode", cfg.CompatibileMode)
|
||||
privKey, err := buildPrivateKey([]byte(cliCfg.Secret))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &ClientManager{
|
||||
name: name,
|
||||
privKey: privKey,
|
||||
cfg: cfg,
|
||||
cliCfg: cliCfg,
|
||||
}, nil
|
||||
}
|
||||
|
||||
type ctxKeyType string
|
||||
|
||||
const loginCliKey ctxKeyType = "cli"
|
||||
|
||||
func (cliMgr *ClientManager) doLogin(ctx context.Context, loginCli *upClient) {
|
||||
if cliMgr.cfg.CompatibileMode {
|
||||
loginCli.loginToken = "dummy"
|
||||
log.Info("Skip login process for compatible mode")
|
||||
return
|
||||
}
|
||||
|
||||
// Calculate wait time between 2 seconds and cfg.RetryWaitTime
|
||||
minWait := 2 * time.Second
|
||||
waitDuration := time.Duration(cliMgr.cfg.RetryWaitTime) * time.Second
|
||||
if waitDuration < minWait {
|
||||
waitDuration = minWait
|
||||
}
|
||||
|
||||
for {
|
||||
log.Info("proxy attempting login to upstream coordinator", "name", cliMgr.name)
|
||||
loginResp, err := loginCli.Login(ctx, cliMgr.genLoginParam)
|
||||
if err == nil && loginResp.ErrCode == 0 {
|
||||
var loginResult loginSchema
|
||||
err = loginResp.DecodeData(&loginResult)
|
||||
if err != nil {
|
||||
log.Error("login parsing data fail", "error", err)
|
||||
} else {
|
||||
loginCli.loginToken = loginResult.Token
|
||||
log.Info("login to upstream coordinator successful", "name", cliMgr.name, "time", loginResult.Time)
|
||||
// TODO: we need to parse time if we start making use of it
|
||||
return
|
||||
}
|
||||
} else if err != nil {
|
||||
log.Error("login process fail", "error", err)
|
||||
} else {
|
||||
log.Error("login get fail resp", "code", loginResp.ErrCode, "msg", loginResp.ErrMsg)
|
||||
}
|
||||
|
||||
log.Info("login to upstream coordinator failed, retrying", "name", cliMgr.name, "error", err, "waitDuration", waitDuration)
|
||||
|
||||
timer := time.NewTimer(waitDuration)
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
timer.Stop()
|
||||
return
|
||||
case <-timer.C:
|
||||
// Continue to next retry
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (cliMgr *ClientManager) Name() string {
|
||||
return cliMgr.name
|
||||
}
|
||||
|
||||
func (cliMgr *ClientManager) Client(token string) ProverCli {
|
||||
loginCli := newUpClient(cliMgr.cfg)
|
||||
loginCli.loginToken = token
|
||||
return loginCli
|
||||
}
|
||||
|
||||
func (cliMgr *ClientManager) ClientAsProxy(ctx context.Context) ProxyCli {
|
||||
cliMgr.cachedCli.RLock()
|
||||
if cliMgr.cachedCli.cli != nil {
|
||||
defer cliMgr.cachedCli.RUnlock()
|
||||
return cliMgr.cachedCli.cli
|
||||
}
|
||||
cliMgr.cachedCli.RUnlock()
|
||||
|
||||
cliMgr.cachedCli.Lock()
|
||||
if cliMgr.cachedCli.cli != nil {
|
||||
defer cliMgr.cachedCli.Unlock()
|
||||
return cliMgr.cachedCli.cli
|
||||
}
|
||||
|
||||
var completionCtx context.Context
|
||||
// Check if completion context is set
|
||||
if cliMgr.cachedCli.completionCtx != nil {
|
||||
completionCtx = cliMgr.cachedCli.completionCtx
|
||||
} else {
|
||||
// Set new completion context and launch login goroutine
|
||||
ctx, completionDone := context.WithCancel(context.TODO())
|
||||
loginCli := newUpClient(cliMgr.cfg)
|
||||
loginCli.resetFromMgr = func() {
|
||||
cliMgr.cachedCli.Lock()
|
||||
if cliMgr.cachedCli.cli == loginCli {
|
||||
log.Info("cached client cleared", "name", cliMgr.name)
|
||||
cliMgr.cachedCli.cli = nil
|
||||
}
|
||||
cliMgr.cachedCli.Unlock()
|
||||
}
|
||||
completionCtx = context.WithValue(ctx, loginCliKey, loginCli)
|
||||
cliMgr.cachedCli.completionCtx = completionCtx
|
||||
|
||||
// Launch keep-login goroutine
|
||||
go func() {
|
||||
defer completionDone()
|
||||
cliMgr.doLogin(context.Background(), loginCli)
|
||||
|
||||
cliMgr.cachedCli.Lock()
|
||||
cliMgr.cachedCli.cli = loginCli
|
||||
cliMgr.cachedCli.completionCtx = nil
|
||||
|
||||
cliMgr.cachedCli.Unlock()
|
||||
|
||||
}()
|
||||
}
|
||||
cliMgr.cachedCli.Unlock()
|
||||
|
||||
// Wait for completion or request cancellation
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return nil
|
||||
case <-completionCtx.Done():
|
||||
cli := completionCtx.Value(loginCliKey).(*upClient)
|
||||
return cli
|
||||
}
|
||||
}
|
||||
|
||||
func (cliMgr *ClientManager) genLoginParam(challenge string) (*types.LoginParameter, error) {
|
||||
|
||||
// Generate public key string
|
||||
publicKeyHex := common.Bytes2Hex(crypto.CompressPubkey(&cliMgr.privKey.PublicKey))
|
||||
|
||||
// Create login parameter with proxy settings
|
||||
loginParam := &types.LoginParameter{
|
||||
Message: types.Message{
|
||||
Challenge: challenge,
|
||||
ProverName: cliMgr.cliCfg.ProxyName,
|
||||
ProverVersion: version.Version,
|
||||
ProverProviderType: types.ProverProviderTypeProxy,
|
||||
ProverTypes: []types.ProverType{}, // Default empty
|
||||
VKs: []string{}, // Default empty
|
||||
},
|
||||
PublicKey: publicKeyHex,
|
||||
}
|
||||
|
||||
// Sign the message with the private key
|
||||
if err := loginParam.SignWithKey(cliMgr.privKey); err != nil {
|
||||
return nil, fmt.Errorf("failed to sign login parameter: %w", err)
|
||||
}
|
||||
|
||||
return loginParam, nil
|
||||
}
|
||||
44
coordinator/internal/controller/proxy/controller.go
Normal file
44
coordinator/internal/controller/proxy/controller.go
Normal file
@@ -0,0 +1,44 @@
|
||||
package proxy
|
||||
|
||||
import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/coordinator/internal/config"
|
||||
)
|
||||
|
||||
var (
|
||||
// GetTask the prover task controller
|
||||
GetTask *GetTaskController
|
||||
// SubmitProof the submit proof controller
|
||||
SubmitProof *SubmitProofController
|
||||
// Auth the auth controller
|
||||
Auth *AuthController
|
||||
)
|
||||
|
||||
// Clients manager a series of thread-safe clients for requesting upstream
|
||||
// coordinators
|
||||
type Clients map[string]Client
|
||||
|
||||
// InitController inits Controller with database
|
||||
func InitController(cfg *config.ProxyConfig, db *gorm.DB, reg prometheus.Registerer) {
|
||||
// normalize cfg
|
||||
cfg.ProxyManager.Normalize()
|
||||
|
||||
clients := make(map[string]Client)
|
||||
|
||||
for nm, upCfg := range cfg.Coordinators {
|
||||
cli, err := NewClientManager(nm, cfg.ProxyManager.Client, upCfg)
|
||||
if err != nil {
|
||||
panic("create new client fail")
|
||||
}
|
||||
clients[cli.Name()] = cli
|
||||
}
|
||||
|
||||
proverManager := NewProverManagerWithPersistent(100, db)
|
||||
priorityManager := NewPriorityUpstreamManagerPersistent(db)
|
||||
|
||||
Auth = NewAuthController(cfg, clients, proverManager)
|
||||
GetTask = NewGetTaskController(cfg, clients, proverManager, priorityManager, reg)
|
||||
SubmitProof = NewSubmitProofController(cfg, clients, proverManager, priorityManager, reg)
|
||||
}
|
||||
229
coordinator/internal/controller/proxy/get_task.go
Normal file
229
coordinator/internal/controller/proxy/get_task.go
Normal file
@@ -0,0 +1,229 @@
|
||||
package proxy
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"sync"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/common/types"
|
||||
|
||||
"scroll-tech/coordinator/internal/config"
|
||||
coordinatorType "scroll-tech/coordinator/internal/types"
|
||||
)
|
||||
|
||||
func getSessionData(ctx *gin.Context) (string, string) {
|
||||
|
||||
publicKeyData, publicKeyExist := ctx.Get(coordinatorType.PublicKey)
|
||||
publicKey, castOk := publicKeyData.(string)
|
||||
if !publicKeyExist || !castOk {
|
||||
nerr := fmt.Errorf("no public key binding: %v", publicKeyData)
|
||||
log.Warn("get_task parameter fail", "error", nerr)
|
||||
|
||||
types.RenderFailure(ctx, types.ErrCoordinatorParameterInvalidNo, nerr)
|
||||
return "", ""
|
||||
}
|
||||
|
||||
publicNameData, publicNameExist := ctx.Get(coordinatorType.ProverName)
|
||||
publicName, castOk := publicNameData.(string)
|
||||
if !publicNameExist || !castOk {
|
||||
log.Error("no public name binding for unknown reason, but we still forward with name = 'unknown'", "data", publicNameData)
|
||||
publicName = "unknown"
|
||||
}
|
||||
|
||||
return publicKey, publicName
|
||||
}
|
||||
|
||||
// PriorityUpstreamManager manages priority upstream mappings with thread safety
|
||||
type PriorityUpstreamManager struct {
|
||||
sync.RWMutex
|
||||
*proverPriorityPersist
|
||||
data map[string]string
|
||||
}
|
||||
|
||||
// NewPriorityUpstreamManager creates a new PriorityUpstreamManager
|
||||
func NewPriorityUpstreamManager() *PriorityUpstreamManager {
|
||||
return &PriorityUpstreamManager{
|
||||
data: make(map[string]string),
|
||||
}
|
||||
}
|
||||
|
||||
// NewPriorityUpstreamManager creates a new PriorityUpstreamManager
|
||||
func NewPriorityUpstreamManagerPersistent(db *gorm.DB) *PriorityUpstreamManager {
|
||||
return &PriorityUpstreamManager{
|
||||
data: make(map[string]string),
|
||||
proverPriorityPersist: NewProverPriorityPersist(db),
|
||||
}
|
||||
}
|
||||
|
||||
// Get retrieves the priority upstream for a given key
|
||||
func (p *PriorityUpstreamManager) Get(key string) (string, bool) {
|
||||
|
||||
p.RLock()
|
||||
value, exists := p.data[key]
|
||||
p.RUnlock()
|
||||
|
||||
if !exists {
|
||||
if v, err := p.proverPriorityPersist.Get(key); err != nil {
|
||||
log.Error("persistent priority record read failure", "error", err, "key", key)
|
||||
} else if v != "" {
|
||||
log.Debug("restore record from persistent layer", "key", key, "value", v)
|
||||
return v, true
|
||||
}
|
||||
}
|
||||
|
||||
return value, exists
|
||||
}
|
||||
|
||||
// Set sets the priority upstream for a given key
|
||||
func (p *PriorityUpstreamManager) Set(key, value string) {
|
||||
defer func() {
|
||||
if err := p.proverPriorityPersist.Update(key, value); err != nil {
|
||||
log.Error("update priority record failure", "error", err, "key", key, "value", value)
|
||||
}
|
||||
}()
|
||||
p.Lock()
|
||||
defer p.Unlock()
|
||||
p.data[key] = value
|
||||
}
|
||||
|
||||
// Delete removes the priority upstream for a given key
|
||||
func (p *PriorityUpstreamManager) Delete(key string) {
|
||||
defer func() {
|
||||
if err := p.proverPriorityPersist.Del(key); err != nil {
|
||||
log.Error("delete priority record failure", "error", err, "key", key)
|
||||
}
|
||||
}()
|
||||
p.Lock()
|
||||
defer p.Unlock()
|
||||
delete(p.data, key)
|
||||
}
|
||||
|
||||
// GetTaskController the get prover task api controller
|
||||
type GetTaskController struct {
|
||||
proverMgr *ProverManager
|
||||
clients Clients
|
||||
priorityUpstream *PriorityUpstreamManager
|
||||
|
||||
//workingRnd *rand.Rand
|
||||
//getTaskAccessCounter *prometheus.CounterVec
|
||||
}
|
||||
|
||||
// NewGetTaskController create a get prover task controller
|
||||
func NewGetTaskController(cfg *config.ProxyConfig, clients Clients, proverMgr *ProverManager, priorityMgr *PriorityUpstreamManager, reg prometheus.Registerer) *GetTaskController {
|
||||
// TODO: implement proxy get task controller initialization
|
||||
return &GetTaskController{
|
||||
priorityUpstream: priorityMgr,
|
||||
proverMgr: proverMgr,
|
||||
clients: clients,
|
||||
}
|
||||
}
|
||||
|
||||
// func (ptc *GetTaskController) incGetTaskAccessCounter(ctx *gin.Context) error {
|
||||
// // TODO: implement proxy get task access counter
|
||||
// return nil
|
||||
// }
|
||||
|
||||
// GetTasks get assigned chunk/batch task
|
||||
func (ptc *GetTaskController) GetTasks(ctx *gin.Context) {
|
||||
|
||||
var getTaskParameter coordinatorType.GetTaskParameter
|
||||
if err := ctx.ShouldBind(&getTaskParameter); err != nil {
|
||||
nerr := fmt.Errorf("prover task parameter invalid, err:%w", err)
|
||||
types.RenderFailure(ctx, types.ErrCoordinatorParameterInvalidNo, nerr)
|
||||
return
|
||||
}
|
||||
|
||||
publicKey, proverName := getSessionData(ctx)
|
||||
if publicKey == "" {
|
||||
return
|
||||
}
|
||||
|
||||
session := ptc.proverMgr.Get(publicKey)
|
||||
if session == nil {
|
||||
nerr := fmt.Errorf("can not get session for prover %s", proverName)
|
||||
types.RenderFailure(ctx, types.InternalServerError, nerr)
|
||||
return
|
||||
}
|
||||
|
||||
getTask := func(cli Client) (error, int) {
|
||||
log.Debug("Start get task", "up", cli.Name(), "cli", proverName)
|
||||
upStream := cli.Name()
|
||||
resp, err := session.GetTask(ctx, &getTaskParameter, cli)
|
||||
if err != nil {
|
||||
log.Error("Upstream error for get task", "error", err, "up", upStream, "cli", proverName)
|
||||
return err, types.ErrCoordinatorGetTaskFailure
|
||||
} else if resp.ErrCode != types.ErrCoordinatorEmptyProofData {
|
||||
|
||||
if resp.ErrCode != 0 {
|
||||
// simply dispatch the error from upstream to prover
|
||||
log.Error("Upstream has error resp for get task", "code", resp.ErrCode, "msg", resp.ErrMsg, "up", upStream, "cli", proverName)
|
||||
return fmt.Errorf("upstream failure %s:", resp.ErrMsg), resp.ErrCode
|
||||
}
|
||||
|
||||
var task coordinatorType.GetTaskSchema
|
||||
if err = resp.DecodeData(&task); err == nil {
|
||||
task.TaskID = formUpstreamWithTaskName(upStream, task.TaskID)
|
||||
ptc.priorityUpstream.Set(publicKey, upStream)
|
||||
log.Debug("Upstream get task", "up", upStream, "cli", proverName, "taskID", task.TaskID, "taskType", task.TaskType)
|
||||
types.RenderSuccess(ctx, &task)
|
||||
return nil, 0
|
||||
} else {
|
||||
log.Error("Upstream has wrong data for get task", "error", err, "up", upStream, "cli", proverName)
|
||||
return fmt.Errorf("decode task fail: %v", err), types.InternalServerError
|
||||
}
|
||||
}
|
||||
|
||||
return nil, resp.ErrCode
|
||||
}
|
||||
|
||||
// if the priority upstream is set, we try this upstream first until get the task resp or no task resp
|
||||
priorityUpstream, exist := ptc.priorityUpstream.Get(publicKey)
|
||||
if exist {
|
||||
cli := ptc.clients[priorityUpstream]
|
||||
log.Debug("Try get task from priority stream", "up", priorityUpstream, "cli", proverName)
|
||||
if cli != nil {
|
||||
err, code := getTask(cli)
|
||||
if err != nil {
|
||||
types.RenderFailure(ctx, code, err)
|
||||
return
|
||||
} else if code == 0 {
|
||||
// get task done and rendered, return
|
||||
return
|
||||
}
|
||||
// only continue if get empty task (the task has been removed in upstream)
|
||||
log.Debug("can not get priority task from upstream", "up", priorityUpstream, "cli", proverName)
|
||||
|
||||
} else {
|
||||
log.Warn("A upstream is removed or lost for some reason while running", "up", priorityUpstream, "cli", proverName)
|
||||
}
|
||||
}
|
||||
ptc.priorityUpstream.Delete(publicKey)
|
||||
|
||||
// Create a slice to hold the keys
|
||||
keys := make([]string, 0, len(ptc.clients))
|
||||
for k := range ptc.clients {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
|
||||
// Shuffle the keys using a local RNG (avoid deprecated rand.Seed)
|
||||
rand.Shuffle(len(keys), func(i, j int) {
|
||||
keys[i], keys[j] = keys[j], keys[i]
|
||||
})
|
||||
|
||||
// Iterate over the shuffled keys
|
||||
for _, n := range keys {
|
||||
if err, code := getTask(ptc.clients[n]); err == nil && code == 0 {
|
||||
// get task done
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
log.Debug("get no task from upstream", "cli", proverName)
|
||||
// if all get task failed, throw empty proof resp
|
||||
types.RenderFailure(ctx, types.ErrCoordinatorEmptyProofData, fmt.Errorf("get empty prover task"))
|
||||
}
|
||||
125
coordinator/internal/controller/proxy/persistent.go
Normal file
125
coordinator/internal/controller/proxy/persistent.go
Normal file
@@ -0,0 +1,125 @@
|
||||
package proxy
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"gorm.io/gorm"
|
||||
"gorm.io/gorm/clause"
|
||||
|
||||
"scroll-tech/coordinator/internal/types"
|
||||
)
|
||||
|
||||
type proverDataPersist struct {
|
||||
db *gorm.DB
|
||||
}
|
||||
|
||||
// NewProverDataPersist creates a persistence instance backed by a gorm DB.
|
||||
func NewProverDataPersist(db *gorm.DB) *proverDataPersist {
|
||||
return &proverDataPersist{db: db}
|
||||
}
|
||||
|
||||
// gorm model mapping to table `prover_sessions`
|
||||
type proverSessionRecord struct {
|
||||
PublicKey string `gorm:"column:public_key;not null"`
|
||||
Upstream string `gorm:"column:upstream;not null"`
|
||||
UpToken string `gorm:"column:up_token;not null"`
|
||||
Expired time.Time `gorm:"column:expired;not null"`
|
||||
}
|
||||
|
||||
func (proverSessionRecord) TableName() string { return "prover_sessions" }
|
||||
|
||||
// priority_upstream model
|
||||
type priorityUpstreamRecord struct {
|
||||
PublicKey string `gorm:"column:public_key;not null"`
|
||||
Upstream string `gorm:"column:upstream;not null"`
|
||||
}
|
||||
|
||||
func (priorityUpstreamRecord) TableName() string { return "priority_upstream" }
|
||||
|
||||
// get retrieves ProverSession for a given user key, returns empty if still not exists
|
||||
func (p *proverDataPersist) Get(userKey string) (*proverSession, error) {
|
||||
if p == nil || p.db == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
var rows []proverSessionRecord
|
||||
if err := p.db.Where("public_key = ?", userKey).Find(&rows).Error; err != nil || len(rows) == 0 {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ret := &proverSession{
|
||||
proverToken: make(map[string]loginToken),
|
||||
}
|
||||
for _, r := range rows {
|
||||
ls := &types.LoginSchema{
|
||||
Token: r.UpToken,
|
||||
Time: r.Expired,
|
||||
}
|
||||
ret.proverToken[r.Upstream] = loginToken{LoginSchema: ls}
|
||||
}
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
func (p *proverDataPersist) Update(userKey, up string, login *types.LoginSchema) error {
|
||||
if p == nil || p.db == nil || login == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
rec := proverSessionRecord{
|
||||
PublicKey: userKey,
|
||||
Upstream: up,
|
||||
UpToken: login.Token,
|
||||
Expired: login.Time,
|
||||
}
|
||||
|
||||
return p.db.Clauses(
|
||||
clause.OnConflict{
|
||||
Columns: []clause.Column{{Name: "public_key"}, {Name: "upstream"}},
|
||||
DoUpdates: clause.AssignmentColumns([]string{"up_token", "expired"}),
|
||||
},
|
||||
).Create(&rec).Error
|
||||
}
|
||||
|
||||
type proverPriorityPersist struct {
|
||||
db *gorm.DB
|
||||
}
|
||||
|
||||
func NewProverPriorityPersist(db *gorm.DB) *proverPriorityPersist {
|
||||
return &proverPriorityPersist{db: db}
|
||||
}
|
||||
|
||||
func (p *proverPriorityPersist) Get(userKey string) (string, error) {
|
||||
if p == nil || p.db == nil {
|
||||
return "", nil
|
||||
}
|
||||
var rec priorityUpstreamRecord
|
||||
if err := p.db.Where("public_key = ?", userKey).First(&rec).Error; err != nil {
|
||||
if err != gorm.ErrRecordNotFound {
|
||||
return "", err
|
||||
} else {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
}
|
||||
return rec.Upstream, nil
|
||||
}
|
||||
|
||||
func (p *proverPriorityPersist) Update(userKey, up string) error {
|
||||
if p == nil || p.db == nil {
|
||||
return nil
|
||||
}
|
||||
rec := priorityUpstreamRecord{PublicKey: userKey, Upstream: up}
|
||||
return p.db.Clauses(
|
||||
clause.OnConflict{
|
||||
Columns: []clause.Column{{Name: "public_key"}},
|
||||
DoUpdates: clause.Assignments(map[string]interface{}{"upstream": up}),
|
||||
},
|
||||
).Create(&rec).Error
|
||||
}
|
||||
|
||||
func (p *proverPriorityPersist) Del(userKey string) error {
|
||||
if p == nil || p.db == nil {
|
||||
return nil
|
||||
}
|
||||
return p.db.Where("public_key = ?", userKey).Delete(&priorityUpstreamRecord{}).Error
|
||||
}
|
||||
285
coordinator/internal/controller/proxy/prover_session.go
Normal file
285
coordinator/internal/controller/proxy/prover_session.go
Normal file
@@ -0,0 +1,285 @@
|
||||
package proxy
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math"
|
||||
"sync"
|
||||
|
||||
"gorm.io/gorm"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
|
||||
ctypes "scroll-tech/common/types"
|
||||
|
||||
"scroll-tech/coordinator/internal/types"
|
||||
)
|
||||
|
||||
type ProverManager struct {
|
||||
sync.RWMutex
|
||||
data map[string]*proverSession
|
||||
willDeprecatedData map[string]*proverSession
|
||||
sizeLimit int
|
||||
persistent *proverDataPersist
|
||||
}
|
||||
|
||||
func NewProverManager(size int) *ProverManager {
|
||||
return &ProverManager{
|
||||
data: make(map[string]*proverSession),
|
||||
willDeprecatedData: make(map[string]*proverSession),
|
||||
sizeLimit: size,
|
||||
}
|
||||
}
|
||||
|
||||
func NewProverManagerWithPersistent(size int, db *gorm.DB) *ProverManager {
|
||||
return &ProverManager{
|
||||
data: make(map[string]*proverSession),
|
||||
willDeprecatedData: make(map[string]*proverSession),
|
||||
sizeLimit: size,
|
||||
persistent: NewProverDataPersist(db),
|
||||
}
|
||||
}
|
||||
|
||||
// get retrieves ProverSession for a given user key, returns empty if still not exists
|
||||
func (m *ProverManager) Get(userKey string) (ret *proverSession) {
|
||||
defer func() {
|
||||
if ret == nil {
|
||||
var err error
|
||||
ret, err = m.persistent.Get(userKey)
|
||||
if err != nil {
|
||||
log.Error("Get persistent layer for prover tokens fail", "error", err)
|
||||
} else if ret != nil {
|
||||
log.Debug("restore record from persistent", "key", userKey, "token", ret.proverToken)
|
||||
ret.persistent = m.persistent
|
||||
}
|
||||
}
|
||||
|
||||
if ret != nil {
|
||||
m.Lock()
|
||||
m.data[userKey] = ret
|
||||
m.Unlock()
|
||||
}
|
||||
}()
|
||||
|
||||
m.RLock()
|
||||
defer m.RUnlock()
|
||||
if r, existed := m.data[userKey]; existed {
|
||||
return r
|
||||
} else {
|
||||
return m.willDeprecatedData[userKey]
|
||||
}
|
||||
}
|
||||
|
||||
func (m *ProverManager) GetOrCreate(userKey string) *proverSession {
|
||||
|
||||
if ret := m.Get(userKey); ret != nil {
|
||||
return ret
|
||||
}
|
||||
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
|
||||
ret := &proverSession{
|
||||
proverToken: make(map[string]loginToken),
|
||||
persistent: m.persistent,
|
||||
}
|
||||
|
||||
if len(m.data) >= m.sizeLimit {
|
||||
m.willDeprecatedData = m.data
|
||||
m.data = make(map[string]*proverSession)
|
||||
}
|
||||
|
||||
m.data[userKey] = ret
|
||||
return ret
|
||||
}
|
||||
|
||||
type loginToken struct {
|
||||
*types.LoginSchema
|
||||
phase uint
|
||||
}
|
||||
|
||||
// Client wraps an http client with a preset host for coordinator API calls
|
||||
type proverSession struct {
|
||||
persistent *proverDataPersist
|
||||
|
||||
sync.RWMutex
|
||||
proverToken map[string]loginToken
|
||||
completionCtx context.Context
|
||||
}
|
||||
|
||||
func (c *proverSession) maintainLogin(ctx context.Context, cliMgr Client, up string, param *types.LoginParameter, phase uint) (result loginToken, nerr error) {
|
||||
c.Lock()
|
||||
curPhase := c.proverToken[up].phase
|
||||
if c.completionCtx != nil {
|
||||
waitctx := c.completionCtx
|
||||
c.Unlock()
|
||||
select {
|
||||
case <-waitctx.Done():
|
||||
return c.maintainLogin(ctx, cliMgr, up, param, phase)
|
||||
case <-ctx.Done():
|
||||
nerr = fmt.Errorf("ctx fail")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if phase < curPhase {
|
||||
// outdate login phase, give up
|
||||
log.Debug("drop outdated proxy login attempt", "upstream", up, "cli", param.Message.ProverName, "phase", phase, "now", curPhase)
|
||||
defer c.Unlock()
|
||||
return c.proverToken[up], nil
|
||||
}
|
||||
|
||||
// occupy the update slot
|
||||
completeCtx, cf := context.WithCancel(ctx)
|
||||
defer cf()
|
||||
c.completionCtx = completeCtx
|
||||
defer func() {
|
||||
c.Lock()
|
||||
c.completionCtx = nil
|
||||
if result.LoginSchema != nil {
|
||||
c.proverToken[up] = result
|
||||
log.Info("maintain login status", "upstream", up, "cli", param.Message.ProverName, "phase", curPhase+1)
|
||||
}
|
||||
c.Unlock()
|
||||
if nerr != nil {
|
||||
log.Error("maintain login fail", "error", nerr, "upstream", up, "cli", param.Message.ProverName, "phase", curPhase)
|
||||
}
|
||||
}()
|
||||
c.Unlock()
|
||||
|
||||
log.Debug("start proxy login process", "upstream", up, "cli", param.Message.ProverName)
|
||||
|
||||
cli := cliMgr.ClientAsProxy(ctx)
|
||||
if cli == nil {
|
||||
nerr = fmt.Errorf("get upstream cli fail")
|
||||
return
|
||||
}
|
||||
|
||||
resp, err := cli.ProxyLogin(ctx, param)
|
||||
if err != nil {
|
||||
nerr = fmt.Errorf("proxylogin fail: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
if resp.ErrCode == ctypes.ErrJWTTokenExpired {
|
||||
log.Info("up stream has expired, renew upstream connection", "up", up)
|
||||
cli.Reset()
|
||||
cli = cliMgr.ClientAsProxy(ctx)
|
||||
if cli == nil {
|
||||
nerr = fmt.Errorf("get upstream cli fail (secondary try)")
|
||||
return
|
||||
}
|
||||
|
||||
// like SDK, we would try one more time if the upstream token is expired
|
||||
resp, err = cli.ProxyLogin(ctx, param)
|
||||
if err != nil {
|
||||
nerr = fmt.Errorf("proxylogin fail: %v", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if resp.ErrCode != 0 {
|
||||
nerr = fmt.Errorf("upstream fail: %d (%s)", resp.ErrCode, resp.ErrMsg)
|
||||
return
|
||||
}
|
||||
|
||||
var loginResult loginSchema
|
||||
if err := resp.DecodeData(&loginResult); err != nil {
|
||||
nerr = err
|
||||
return
|
||||
}
|
||||
|
||||
log.Debug("Proxy login done", "upstream", up, "cli", param.Message.ProverName)
|
||||
result = loginToken{
|
||||
LoginSchema: &types.LoginSchema{
|
||||
Token: loginResult.Token,
|
||||
},
|
||||
phase: curPhase + 1,
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// const expireTolerant = 10 * time.Minute
|
||||
|
||||
// ProxyLogin makes a POST request to /v1/proxy_login with LoginParameter
|
||||
func (c *proverSession) ProxyLogin(ctx context.Context, cli Client, param *types.LoginParameter) error {
|
||||
up := cli.Name()
|
||||
c.RLock()
|
||||
existedToken := c.proverToken[up]
|
||||
c.RUnlock()
|
||||
|
||||
newtoken, err := c.maintainLogin(ctx, cli, up, param, math.MaxUint)
|
||||
if newtoken.phase > existedToken.phase {
|
||||
if err := c.persistent.Update(param.PublicKey, up, newtoken.LoginSchema); err != nil {
|
||||
log.Error("Update persistent layer for prover tokens fail", "error", err)
|
||||
}
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// GetTask makes a POST request to /v1/get_task with GetTaskParameter
|
||||
func (c *proverSession) GetTask(ctx context.Context, param *types.GetTaskParameter, cliMgr Client) (*ctypes.Response, error) {
|
||||
up := cliMgr.Name()
|
||||
c.RLock()
|
||||
log.Debug("call get task", "up", up, "tokens", c.proverToken)
|
||||
token := c.proverToken[up]
|
||||
c.RUnlock()
|
||||
|
||||
if token.LoginSchema != nil {
|
||||
resp, err := cliMgr.Client(token.Token).GetTask(ctx, param)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if resp.ErrCode != ctypes.ErrJWTTokenExpired {
|
||||
return resp, nil
|
||||
}
|
||||
}
|
||||
|
||||
// like SDK, we would try one more time if the upstream token is expired
|
||||
// get param from ctx
|
||||
loginParam, ok := ctx.Value(LoginParamCache).(*types.LoginParameter)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("Unexpected error, no loginparam ctx value")
|
||||
}
|
||||
|
||||
newToken, err := c.maintainLogin(ctx, cliMgr, up, loginParam, token.phase)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("update prover token fail: %v", err)
|
||||
}
|
||||
|
||||
return cliMgr.Client(newToken.Token).GetTask(ctx, param)
|
||||
|
||||
}
|
||||
|
||||
// SubmitProof makes a POST request to /v1/submit_proof with SubmitProofParameter
|
||||
func (c *proverSession) SubmitProof(ctx context.Context, param *types.SubmitProofParameter, cliMgr Client) (*ctypes.Response, error) {
|
||||
up := cliMgr.Name()
|
||||
c.RLock()
|
||||
token := c.proverToken[up]
|
||||
c.RUnlock()
|
||||
|
||||
if token.LoginSchema != nil {
|
||||
resp, err := cliMgr.Client(token.Token).SubmitProof(ctx, param)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if resp.ErrCode != ctypes.ErrJWTTokenExpired {
|
||||
return resp, nil
|
||||
}
|
||||
}
|
||||
|
||||
// like SDK, we would try one more time if the upstream token is expired
|
||||
// get param from ctx
|
||||
loginParam, ok := ctx.Value(LoginParamCache).(*types.LoginParameter)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("Unexpected error, no loginparam ctx value")
|
||||
}
|
||||
|
||||
newToken, err := c.maintainLogin(ctx, cliMgr, up, loginParam, token.phase)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("update prover token fail: %v", err)
|
||||
}
|
||||
|
||||
return cliMgr.Client(newToken.Token).SubmitProof(ctx, param)
|
||||
}
|
||||
107
coordinator/internal/controller/proxy/prover_session_test.go
Normal file
107
coordinator/internal/controller/proxy/prover_session_test.go
Normal file
@@ -0,0 +1,107 @@
|
||||
package proxy
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
// TestProverManagerGetAndCreate validates basic creation and retrieval semantics.
|
||||
func TestProverManagerGetAndCreate(t *testing.T) {
|
||||
pm := NewProverManager(2)
|
||||
|
||||
if got := pm.Get("user1"); got != nil {
|
||||
t.Fatalf("expected nil for non-existent key, got: %+v", got)
|
||||
}
|
||||
|
||||
sess1 := pm.GetOrCreate("user1")
|
||||
if sess1 == nil {
|
||||
t.Fatalf("expected non-nil session from GetOrCreate")
|
||||
}
|
||||
|
||||
// Should be stable on subsequent Get
|
||||
if got := pm.Get("user1"); got != sess1 {
|
||||
t.Fatalf("expected same session pointer on Get, got different instance: %p vs %p", got, sess1)
|
||||
}
|
||||
}
|
||||
|
||||
// TestProverManagerRolloverAndPromotion verifies rollover when sizeLimit is reached
|
||||
// and that old entries are accessible and promoted back to active data map.
|
||||
func TestProverManagerRolloverAndPromotion(t *testing.T) {
|
||||
pm := NewProverManager(2)
|
||||
|
||||
s1 := pm.GetOrCreate("u1")
|
||||
s2 := pm.GetOrCreate("u2")
|
||||
if s1 == nil || s2 == nil {
|
||||
t.Fatalf("expected sessions to be created for u1/u2")
|
||||
}
|
||||
|
||||
// Precondition: data should contain 2 entries, no deprecated yet.
|
||||
pm.RLock()
|
||||
if len(pm.data) != 2 {
|
||||
pm.RUnlock()
|
||||
t.Fatalf("expected data len=2 before rollover, got %d", len(pm.data))
|
||||
}
|
||||
if len(pm.willDeprecatedData) != 0 {
|
||||
pm.RUnlock()
|
||||
t.Fatalf("expected willDeprecatedData len=0 before rollover, got %d", len(pm.willDeprecatedData))
|
||||
}
|
||||
pm.RUnlock()
|
||||
|
||||
// Trigger rollover by creating a third key.
|
||||
s3 := pm.GetOrCreate("u3")
|
||||
if s3 == nil {
|
||||
t.Fatalf("expected session for u3 after rollover")
|
||||
}
|
||||
|
||||
// After rollover: current data should only have u3, deprecated should hold u1 and u2.
|
||||
pm.RLock()
|
||||
if len(pm.data) != 1 {
|
||||
pm.RUnlock()
|
||||
t.Fatalf("expected data len=1 after rollover (only u3), got %d", len(pm.data))
|
||||
}
|
||||
if _, ok := pm.data["u3"]; !ok {
|
||||
pm.RUnlock()
|
||||
t.Fatalf("expected 'u3' to be in active data after rollover")
|
||||
}
|
||||
if len(pm.willDeprecatedData) != 2 {
|
||||
pm.RUnlock()
|
||||
t.Fatalf("expected willDeprecatedData len=2 after rollover, got %d", len(pm.willDeprecatedData))
|
||||
}
|
||||
pm.RUnlock()
|
||||
|
||||
// Accessing an old key should return the same pointer and promote it to active data map.
|
||||
got1 := pm.Get("u1")
|
||||
if got1 != s1 {
|
||||
t.Fatalf("expected same pointer for u1 after promotion, got %p want %p", got1, s1)
|
||||
}
|
||||
|
||||
// The promotion should add it to active data (without enforcing size limit on promotion).
|
||||
pm.RLock()
|
||||
if _, ok := pm.data["u1"]; !ok {
|
||||
pm.RUnlock()
|
||||
t.Fatalf("expected 'u1' to be present in active data after promotion")
|
||||
}
|
||||
if len(pm.data) != 2 {
|
||||
// Now should contain u3 and u1
|
||||
pm.RUnlock()
|
||||
t.Fatalf("expected data len=2 after promotion of u1, got %d", len(pm.data))
|
||||
}
|
||||
pm.RUnlock()
|
||||
|
||||
// Access the other deprecated key and ensure behavior is consistent.
|
||||
got2 := pm.Get("u2")
|
||||
if got2 != s2 {
|
||||
t.Fatalf("expected same pointer for u2 after promotion, got %p want %p", got2, s2)
|
||||
}
|
||||
|
||||
pm.RLock()
|
||||
if _, ok := pm.data["u2"]; !ok {
|
||||
pm.RUnlock()
|
||||
t.Fatalf("expected 'u2' to be present in active data after promotion")
|
||||
}
|
||||
// Note: promotion does not enforce sizeLimit, so data can grow beyond sizeLimit after promotions.
|
||||
if len(pm.data) != 3 {
|
||||
pm.RUnlock()
|
||||
t.Fatalf("expected data len=3 after promoting both u1 and u2, got %d", len(pm.data))
|
||||
}
|
||||
pm.RUnlock()
|
||||
}
|
||||
94
coordinator/internal/controller/proxy/submit_proof.go
Normal file
94
coordinator/internal/controller/proxy/submit_proof.go
Normal file
@@ -0,0 +1,94 @@
|
||||
package proxy
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
|
||||
"scroll-tech/common/types"
|
||||
|
||||
"scroll-tech/coordinator/internal/config"
|
||||
coordinatorType "scroll-tech/coordinator/internal/types"
|
||||
)
|
||||
|
||||
// SubmitProofController the submit proof api controller
|
||||
type SubmitProofController struct {
|
||||
proverMgr *ProverManager
|
||||
clients Clients
|
||||
priorityUpstream *PriorityUpstreamManager
|
||||
}
|
||||
|
||||
// NewSubmitProofController create the submit proof api controller instance
|
||||
func NewSubmitProofController(cfg *config.ProxyConfig, clients Clients, proverMgr *ProverManager, priorityMgr *PriorityUpstreamManager, reg prometheus.Registerer) *SubmitProofController {
|
||||
return &SubmitProofController{
|
||||
proverMgr: proverMgr,
|
||||
clients: clients,
|
||||
priorityUpstream: priorityMgr,
|
||||
}
|
||||
}
|
||||
|
||||
func upstreamFromTaskName(taskID string) (string, string) {
|
||||
parts, rest, found := strings.Cut(taskID, ":")
|
||||
if found {
|
||||
return parts, rest
|
||||
}
|
||||
return "", parts
|
||||
}
|
||||
|
||||
func formUpstreamWithTaskName(upstream string, taskID string) string {
|
||||
return fmt.Sprintf("%s:%s", upstream, taskID)
|
||||
}
|
||||
|
||||
// SubmitProof prover submit the proof to coordinator
|
||||
func (spc *SubmitProofController) SubmitProof(ctx *gin.Context) {
|
||||
|
||||
var submitParameter coordinatorType.SubmitProofParameter
|
||||
if err := ctx.ShouldBind(&submitParameter); err != nil {
|
||||
nerr := fmt.Errorf("prover submitProof parameter invalid, err:%w", err)
|
||||
types.RenderFailure(ctx, types.ErrCoordinatorParameterInvalidNo, nerr)
|
||||
return
|
||||
}
|
||||
|
||||
publicKey, proverName := getSessionData(ctx)
|
||||
if publicKey == "" {
|
||||
return
|
||||
}
|
||||
|
||||
session := spc.proverMgr.Get(publicKey)
|
||||
if session == nil {
|
||||
nerr := fmt.Errorf("can not get session for prover %s", proverName)
|
||||
types.RenderFailure(ctx, types.InternalServerError, nerr)
|
||||
return
|
||||
}
|
||||
|
||||
upstream, realTaskID := upstreamFromTaskName(submitParameter.TaskID)
|
||||
cli, existed := spc.clients[upstream]
|
||||
if !existed {
|
||||
log.Warn("A upstream for submitting is removed or lost for some reason while running", "up", upstream)
|
||||
nerr := fmt.Errorf("Invalid upstream name (%s) from taskID %s", upstream, submitParameter.TaskID)
|
||||
types.RenderFailure(ctx, types.ErrCoordinatorParameterInvalidNo, nerr)
|
||||
return
|
||||
}
|
||||
log.Debug("Start submitting", "up", upstream, "cli", proverName, "id", realTaskID, "status", submitParameter.Status)
|
||||
submitParameter.TaskID = realTaskID
|
||||
|
||||
resp, err := session.SubmitProof(ctx, &submitParameter, cli)
|
||||
if err != nil {
|
||||
log.Error("Upstream has error resp for submit", "error", err, "up", upstream, "cli", proverName, "taskID", realTaskID)
|
||||
types.RenderFailure(ctx, types.ErrCoordinatorGetTaskFailure, err)
|
||||
return
|
||||
} else if resp.ErrCode != 0 {
|
||||
log.Error("Upstream has error resp for get task", "code", resp.ErrCode, "msg", resp.ErrMsg, "up", upstream, "cli", proverName, "taskID", realTaskID)
|
||||
// simply dispatch the error from upstream to prover
|
||||
types.RenderFailure(ctx, resp.ErrCode, fmt.Errorf("%s", resp.ErrMsg))
|
||||
return
|
||||
} else {
|
||||
log.Debug("Submit proof to upstream", "up", upstream, "cli", proverName, "taskID", realTaskID)
|
||||
spc.priorityUpstream.Delete(publicKey)
|
||||
types.RenderSuccess(ctx, resp.Data)
|
||||
return
|
||||
}
|
||||
}
|
||||
@@ -1,6 +1,7 @@
|
||||
package auth
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
@@ -19,45 +20,72 @@ import (
|
||||
|
||||
// LoginLogic the auth logic
|
||||
type LoginLogic struct {
|
||||
cfg *config.Config
|
||||
challengeOrm *orm.Challenge
|
||||
cfg *config.VerifierConfig
|
||||
deduplicator ChallengeDeduplicator
|
||||
|
||||
openVmVks map[string]struct{}
|
||||
|
||||
proverVersionHardForkMap map[string]string
|
||||
}
|
||||
|
||||
type ChallengeDeduplicator interface {
|
||||
InsertChallenge(ctx context.Context, challengeString string) error
|
||||
}
|
||||
|
||||
type SimpleDeduplicator struct {
|
||||
}
|
||||
|
||||
func (s *SimpleDeduplicator) InsertChallenge(ctx context.Context, challengeString string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewLoginLogicWithSimpleDEduplicator new a LoginLogic, do not use db to deduplicate challenge
|
||||
func NewLoginLogicWithSimpleDeduplicator(vcfg *config.VerifierConfig, vf *verifier.Verifier) *LoginLogic {
|
||||
return newLoginLogic(&SimpleDeduplicator{}, vcfg, vf)
|
||||
}
|
||||
|
||||
// NewLoginLogic new a LoginLogic
|
||||
func NewLoginLogic(db *gorm.DB, cfg *config.Config, vf *verifier.Verifier) *LoginLogic {
|
||||
func NewLoginLogic(db *gorm.DB, vcfg *config.VerifierConfig, vf *verifier.Verifier) *LoginLogic {
|
||||
return newLoginLogic(orm.NewChallenge(db), vcfg, vf)
|
||||
}
|
||||
|
||||
func newLoginLogic(deduplicator ChallengeDeduplicator, vcfg *config.VerifierConfig, vf *verifier.Verifier) *LoginLogic {
|
||||
|
||||
proverVersionHardForkMap := make(map[string]string)
|
||||
|
||||
for _, cfg := range cfg.ProverManager.Verifier.Verifiers {
|
||||
for _, cfg := range vcfg.Verifiers {
|
||||
proverVersionHardForkMap[cfg.ForkName] = cfg.MinProverVersion
|
||||
}
|
||||
|
||||
return &LoginLogic{
|
||||
cfg: cfg,
|
||||
cfg: vcfg,
|
||||
openVmVks: vf.OpenVMVkMap,
|
||||
challengeOrm: orm.NewChallenge(db),
|
||||
deduplicator: deduplicator,
|
||||
proverVersionHardForkMap: proverVersionHardForkMap,
|
||||
}
|
||||
}
|
||||
|
||||
// InsertChallengeString insert and check the challenge string is existed
|
||||
func (l *LoginLogic) InsertChallengeString(ctx *gin.Context, challenge string) error {
|
||||
return l.challengeOrm.InsertChallenge(ctx.Copy(), challenge)
|
||||
}
|
||||
|
||||
func (l *LoginLogic) Check(login *types.LoginParameter) error {
|
||||
// Verify the completeness of login message
|
||||
func VerifyMsg(login *types.LoginParameter) error {
|
||||
verify, err := login.Verify()
|
||||
if err != nil || !verify {
|
||||
log.Error("auth message verify failure", "prover_name", login.Message.ProverName,
|
||||
"prover_version", login.Message.ProverVersion, "message", login.Message)
|
||||
return errors.New("auth message verify failure")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
if !version.CheckScrollRepoVersion(login.Message.ProverVersion, l.cfg.ProverManager.Verifier.MinProverVersion) {
|
||||
return fmt.Errorf("incompatible prover version. please upgrade your prover, minimum allowed version: %s, actual version: %s", l.cfg.ProverManager.Verifier.MinProverVersion, login.Message.ProverVersion)
|
||||
// InsertChallengeString insert and check the challenge string is existed
|
||||
func (l *LoginLogic) InsertChallengeString(ctx *gin.Context, challenge string) error {
|
||||
return l.deduplicator.InsertChallenge(ctx.Copy(), challenge)
|
||||
}
|
||||
|
||||
// Check if the login client is compatible with the setting in coordinator
|
||||
func (l *LoginLogic) CompatiblityCheck(login *types.LoginParameter) error {
|
||||
|
||||
if !version.CheckScrollRepoVersion(login.Message.ProverVersion, l.cfg.MinProverVersion) {
|
||||
return fmt.Errorf("incompatible prover version. please upgrade your prover, minimum allowed version: %s, actual version: %s", l.cfg.MinProverVersion, login.Message.ProverVersion)
|
||||
}
|
||||
|
||||
vks := make(map[string]struct{})
|
||||
@@ -65,27 +93,32 @@ func (l *LoginLogic) Check(login *types.LoginParameter) error {
|
||||
vks[vk] = struct{}{}
|
||||
}
|
||||
|
||||
for _, vk := range login.Message.VKs {
|
||||
if _, ok := vks[vk]; !ok {
|
||||
log.Error("vk inconsistency", "prover vk", vk, "prover name", login.Message.ProverName,
|
||||
"prover_version", login.Message.ProverVersion, "message", login.Message)
|
||||
if !version.CheckScrollProverVersion(login.Message.ProverVersion) {
|
||||
return fmt.Errorf("incompatible prover version. please upgrade your prover, expect version: %s, actual version: %s",
|
||||
version.Version, login.Message.ProverVersion)
|
||||
// new coordinator / proxy do not check vks while login, code only for backward compatibility
|
||||
if len(vks) != 0 {
|
||||
for _, vk := range login.Message.VKs {
|
||||
if _, ok := vks[vk]; !ok {
|
||||
log.Error("vk inconsistency", "prover vk", vk, "prover name", login.Message.ProverName,
|
||||
"prover_version", login.Message.ProverVersion, "message", login.Message)
|
||||
if !version.CheckScrollProverVersion(login.Message.ProverVersion) {
|
||||
return fmt.Errorf("incompatible prover version. please upgrade your prover, expect version: %s, actual version: %s",
|
||||
version.Version, login.Message.ProverVersion)
|
||||
}
|
||||
// if the prover reports a same prover version
|
||||
return errors.New("incompatible vk. please check your params files or config files")
|
||||
}
|
||||
// if the prover reports a same prover version
|
||||
return errors.New("incompatible vk. please check your params files or config files")
|
||||
}
|
||||
}
|
||||
|
||||
if login.Message.ProverProviderType != types.ProverProviderTypeInternal && login.Message.ProverProviderType != types.ProverProviderTypeExternal {
|
||||
switch login.Message.ProverProviderType {
|
||||
case types.ProverProviderTypeInternal:
|
||||
case types.ProverProviderTypeExternal:
|
||||
case types.ProverProviderTypeProxy:
|
||||
case types.ProverProviderTypeUndefined:
|
||||
// for backward compatibility, set ProverProviderType as internal
|
||||
if login.Message.ProverProviderType == types.ProverProviderTypeUndefined {
|
||||
login.Message.ProverProviderType = types.ProverProviderTypeInternal
|
||||
} else {
|
||||
log.Error("invalid prover_provider_type", "value", login.Message.ProverProviderType, "prover name", login.Message.ProverName, "prover version", login.Message.ProverVersion)
|
||||
return errors.New("invalid prover provider type.")
|
||||
}
|
||||
login.Message.ProverProviderType = types.ProverProviderTypeInternal
|
||||
default:
|
||||
log.Error("invalid prover_provider_type", "value", login.Message.ProverProviderType, "prover name", login.Message.ProverName, "prover version", login.Message.ProverVersion)
|
||||
return errors.New("invalid prover provider type.")
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
//go:build !mock_verifier
|
||||
|
||||
package libzkp
|
||||
|
||||
/*
|
||||
@@ -13,8 +15,6 @@ import (
|
||||
"os"
|
||||
"strings"
|
||||
"unsafe"
|
||||
|
||||
"scroll-tech/common/types/message"
|
||||
)
|
||||
|
||||
func init() {
|
||||
@@ -72,31 +72,6 @@ func VerifyBundleProof(proofData, forkName string) bool {
|
||||
return result != 0
|
||||
}
|
||||
|
||||
// TaskType enum values matching the Rust enum
|
||||
const (
|
||||
TaskTypeChunk = 0
|
||||
TaskTypeBatch = 1
|
||||
TaskTypeBundle = 2
|
||||
)
|
||||
|
||||
func fromMessageTaskType(taskType int) int {
|
||||
switch message.ProofType(taskType) {
|
||||
case message.ProofTypeChunk:
|
||||
return TaskTypeChunk
|
||||
case message.ProofTypeBatch:
|
||||
return TaskTypeBatch
|
||||
case message.ProofTypeBundle:
|
||||
return TaskTypeBundle
|
||||
default:
|
||||
panic(fmt.Sprintf("unsupported proof type: %d", taskType))
|
||||
}
|
||||
}
|
||||
|
||||
// Generate a universal task
|
||||
func GenerateUniversalTask(taskType int, taskJSON, forkName string, expectedVk []byte, decryptionKey []byte) (bool, string, string, []byte) {
|
||||
return generateUniversalTask(fromMessageTaskType(taskType), taskJSON, strings.ToLower(forkName), expectedVk, decryptionKey)
|
||||
}
|
||||
|
||||
// Generate wrapped proof
|
||||
func GenerateWrappedProof(proofJSON, metadata string, vkData []byte) string {
|
||||
cProofJSON := goToCString(proofJSON)
|
||||
|
||||
57
coordinator/internal/logic/libzkp/lib_mock.go
Normal file
57
coordinator/internal/logic/libzkp/lib_mock.go
Normal file
@@ -0,0 +1,57 @@
|
||||
//go:build mock_verifier
|
||||
|
||||
package libzkp
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
)
|
||||
|
||||
// // InitVerifier is a no-op in the mock.
|
||||
// func InitVerifier(configJSON string) {}
|
||||
|
||||
// // VerifyChunkProof returns a fixed success in the mock.
|
||||
// func VerifyChunkProof(proofData, forkName string) bool {
|
||||
// return true
|
||||
// }
|
||||
|
||||
// // VerifyBatchProof returns a fixed success in the mock.
|
||||
// func VerifyBatchProof(proofData, forkName string) bool {
|
||||
// return true
|
||||
// }
|
||||
|
||||
// // VerifyBundleProof returns a fixed success in the mock.
|
||||
// func VerifyBundleProof(proofData, forkName string) bool {
|
||||
// return true
|
||||
// }
|
||||
|
||||
func UniversalTaskCompatibilityFix(taskJSON string) (string, error) {
|
||||
panic("should not run here")
|
||||
}
|
||||
|
||||
// GenerateWrappedProof returns a fixed dummy proof string in the mock.
|
||||
func GenerateWrappedProof(proofJSON, metadata string, vkData []byte) string {
|
||||
|
||||
payload := struct {
|
||||
Metadata json.RawMessage `json:"metadata"`
|
||||
Proof json.RawMessage `json:"proof"`
|
||||
GitVersion string `json:"git_version"`
|
||||
}{
|
||||
Metadata: json.RawMessage(metadata),
|
||||
Proof: json.RawMessage(proofJSON),
|
||||
GitVersion: "mock-git-version",
|
||||
}
|
||||
|
||||
out, err := json.Marshal(payload)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return string(out)
|
||||
}
|
||||
|
||||
// DumpVk is a no-op and returns nil in the mock.
|
||||
func DumpVk(forkName, filePath string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetDynamicFeature is a no-op in the mock.
|
||||
func SetDynamicFeature(feats string) {}
|
||||
27
coordinator/internal/logic/libzkp/message_types.go
Normal file
27
coordinator/internal/logic/libzkp/message_types.go
Normal file
@@ -0,0 +1,27 @@
|
||||
package libzkp
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"scroll-tech/common/types/message"
|
||||
)
|
||||
|
||||
// TaskType enum values matching the Rust enum
|
||||
const (
|
||||
TaskTypeChunk = 0
|
||||
TaskTypeBatch = 1
|
||||
TaskTypeBundle = 2
|
||||
)
|
||||
|
||||
func fromMessageTaskType(taskType int) int {
|
||||
switch message.ProofType(taskType) {
|
||||
case message.ProofTypeChunk:
|
||||
return TaskTypeChunk
|
||||
case message.ProofTypeBatch:
|
||||
return TaskTypeBatch
|
||||
case message.ProofTypeBundle:
|
||||
return TaskTypeBundle
|
||||
default:
|
||||
panic(fmt.Sprintf("unsupported proof type: %d", taskType))
|
||||
}
|
||||
}
|
||||
@@ -5,6 +5,7 @@ package libzkp
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"scroll-tech/common/types/message"
|
||||
|
||||
@@ -14,6 +15,10 @@ import (
|
||||
func InitL2geth(configJSON string) {
|
||||
}
|
||||
|
||||
func GenerateUniversalTask(taskType int, taskJSON, forkName string, expectedVk []byte, decryptionKey []byte) (bool, string, string, []byte) {
|
||||
return generateUniversalTask(fromMessageTaskType(taskType), taskJSON, strings.ToLower(forkName), expectedVk, decryptionKey)
|
||||
}
|
||||
|
||||
func generateUniversalTask(taskType int, taskJSON, forkName string, expectedVk []byte, decryptionKey []byte) (bool, string, string, []byte) {
|
||||
|
||||
fmt.Printf("call mocked generate universal task %d, taskJson %s\n", taskType, taskJSON)
|
||||
|
||||
@@ -7,7 +7,10 @@ package libzkp
|
||||
#include "libzkp.h"
|
||||
*/
|
||||
import "C" //nolint:typecheck
|
||||
import "unsafe"
|
||||
import (
|
||||
"strings"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// Initialize the handler for universal task
|
||||
func InitL2geth(configJSON string) {
|
||||
@@ -17,6 +20,11 @@ func InitL2geth(configJSON string) {
|
||||
C.init_l2geth(cConfig)
|
||||
}
|
||||
|
||||
// Generate a universal task
|
||||
func GenerateUniversalTask(taskType int, taskJSON, forkName string, expectedVk []byte, decryptionKey []byte) (bool, string, string, []byte) {
|
||||
return generateUniversalTask(fromMessageTaskType(taskType), taskJSON, strings.ToLower(forkName), expectedVk, decryptionKey)
|
||||
}
|
||||
|
||||
func generateUniversalTask(taskType int, taskJSON, forkName string, expectedVk []byte, decryptionKey []byte) (bool, string, string, []byte) {
|
||||
cTask := goToCString(taskJSON)
|
||||
cForkName := goToCString(forkName)
|
||||
|
||||
@@ -314,7 +314,7 @@ func (bp *BatchProverTask) getBatchTaskDetail(dbBatch *orm.Batch, chunkProofs []
|
||||
case 0:
|
||||
log.Warn("the codec version is 0, if it is not under integration test we have encountered an error here")
|
||||
return taskDetail, nil
|
||||
case encoding.CodecV3, encoding.CodecV4, encoding.CodecV6, encoding.CodecV7, encoding.CodecV8, encoding.CodecV9:
|
||||
case encoding.CodecV3, encoding.CodecV4, encoding.CodecV6, encoding.CodecV7, encoding.CodecV8, encoding.CodecV9, encoding.CodecV10:
|
||||
default:
|
||||
return nil, fmt.Errorf("Unsupported codec version <%d>", dbBatchCodecVersion)
|
||||
}
|
||||
|
||||
@@ -155,7 +155,7 @@ func NewSubmitProofReceiverLogic(cfg *config.ProverManager, chainCfg *params.Cha
|
||||
// HandleZkProof handle a ZkProof submitted from a prover.
|
||||
// For now only proving/verifying error will lead to setting status as skipped.
|
||||
// db/unmarshal errors will not because they are errors on the business logic side.
|
||||
func (m *ProofReceiverLogic) HandleZkProof(ctx *gin.Context, proofParameter coordinatorType.SubmitProofParameter) error {
|
||||
func (m *ProofReceiverLogic) HandleZkProof(ctx *gin.Context, proofParameter coordinatorType.SubmitProofParameter) (rerr error) {
|
||||
m.proofReceivedTotal.Inc()
|
||||
pk := ctx.GetString(coordinatorType.PublicKey)
|
||||
if len(pk) == 0 {
|
||||
@@ -172,6 +172,18 @@ func (m *ProofReceiverLogic) HandleZkProof(ctx *gin.Context, proofParameter coor
|
||||
return ErrValidatorFailureProverTaskEmpty
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if rerr != nil && types.ProverProveStatus(proverTask.ProvingStatus) == types.ProverAssigned {
|
||||
// trigger a last-chance closing of current task if some routine had missed it
|
||||
log.Warn("last chance proof recover triggerred",
|
||||
"proofID", proofParameter.TaskID,
|
||||
"err", rerr,
|
||||
)
|
||||
m.proofRecover(ctx.Copy(), proverTask, types.ProverTaskFailureTypeUndefined, proofParameter)
|
||||
}
|
||||
|
||||
}()
|
||||
|
||||
proofTime := time.Since(proverTask.CreatedAt)
|
||||
proofTimeSec := uint64(proofTime.Seconds())
|
||||
|
||||
@@ -311,6 +323,20 @@ func (m *ProofReceiverLogic) validator(ctx context.Context, proverTask *orm.Prov
|
||||
}
|
||||
}()
|
||||
|
||||
// Internally we overide the timeout failure:
|
||||
// if prover task FailureType is SessionInfoFailureTimeout, the submit proof is timeout, but we still accept it
|
||||
if types.ProverProveStatus(proverTask.ProvingStatus) == types.ProverProofInvalid &&
|
||||
types.ProverTaskFailureType(proverTask.FailureType) == types.ProverTaskFailureTypeTimeout {
|
||||
m.validateFailureProverTaskTimeout.Inc()
|
||||
proverTask.ProvingStatus = int16(types.ProverAssigned)
|
||||
|
||||
proofTime := time.Since(proverTask.CreatedAt)
|
||||
proofTimeSec := uint64(proofTime.Seconds())
|
||||
log.Warn("proof submit proof have timeout", "hash", proofParameter.TaskID, "taskType", proverTask.TaskType,
|
||||
"proverName", proverTask.ProverName, "proverPublicKey", pk, "proofTime", proofTimeSec)
|
||||
|
||||
}
|
||||
|
||||
// Ensure this prover is eligible to participate in the prover task.
|
||||
if types.ProverProveStatus(proverTask.ProvingStatus) == types.ProverProofValid ||
|
||||
types.ProverProveStatus(proverTask.ProvingStatus) == types.ProverProofInvalid {
|
||||
@@ -328,9 +354,6 @@ func (m *ProofReceiverLogic) validator(ctx context.Context, proverTask *orm.Prov
|
||||
return ErrValidatorFailureProverTaskCannotSubmitTwice
|
||||
}
|
||||
|
||||
proofTime := time.Since(proverTask.CreatedAt)
|
||||
proofTimeSec := uint64(proofTime.Seconds())
|
||||
|
||||
if proofParameter.Status != int(coordinatorType.StatusOk) {
|
||||
// Temporarily replace "panic" with "pa-nic" to prevent triggering the alert based on logs.
|
||||
failureMsg := strings.Replace(proofParameter.FailureMsg, "panic", "pa-nic", -1)
|
||||
@@ -346,14 +369,6 @@ func (m *ProofReceiverLogic) validator(ctx context.Context, proverTask *orm.Prov
|
||||
return ErrValidatorFailureProofMsgStatusNotOk
|
||||
}
|
||||
|
||||
// if prover task FailureType is SessionInfoFailureTimeout, the submit proof is timeout, need skip it
|
||||
if types.ProverTaskFailureType(proverTask.FailureType) == types.ProverTaskFailureTypeTimeout {
|
||||
m.validateFailureProverTaskTimeout.Inc()
|
||||
log.Info("proof submit proof have timeout, skip this submit proof", "hash", proofParameter.TaskID, "taskType", proverTask.TaskType,
|
||||
"proverName", proverTask.ProverName, "proverPublicKey", pk, "proofTime", proofTimeSec)
|
||||
return ErrValidatorFailureProofTimeout
|
||||
}
|
||||
|
||||
// store the proof to prover task
|
||||
if updateTaskProofErr := m.updateProverTaskProof(ctx, proverTask, proofParameter); updateTaskProofErr != nil {
|
||||
log.Warn("update prover task proof failure", "hash", proofParameter.TaskID, "proverPublicKey", pk,
|
||||
@@ -368,6 +383,7 @@ func (m *ProofReceiverLogic) validator(ctx context.Context, proverTask *orm.Prov
|
||||
"taskType", proverTask.TaskType, "proverName", proverTask.ProverName, "proverPublicKey", pk)
|
||||
return ErrValidatorFailureTaskHaveVerifiedSuccess
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -384,7 +400,7 @@ func (m *ProofReceiverLogic) closeProofTask(ctx context.Context, proverTask *orm
|
||||
log.Info("proof close task update proof status", "hash", proverTask.TaskID, "proverPublicKey", proverTask.ProverPublicKey,
|
||||
"taskType", message.ProofType(proverTask.TaskType).String(), "status", types.ProvingTaskVerified.String())
|
||||
|
||||
if err := m.updateProofStatus(ctx, proverTask, proofParameter, types.ProverProofValid, types.ProverTaskFailureTypeUndefined, proofTimeSec); err != nil {
|
||||
if err := m.updateProofStatus(ctx, proverTask, proofParameter, types.ProverProofValid, types.ProverTaskFailureType(proverTask.FailureType), proofTimeSec); err != nil {
|
||||
log.Error("failed to updated proof status ProvingTaskVerified", "hash", proverTask.TaskID, "proverPublicKey", proverTask.ProverPublicKey, "error", err)
|
||||
return err
|
||||
}
|
||||
@@ -445,6 +461,9 @@ func (m *ProofReceiverLogic) updateProofStatus(ctx context.Context, proverTask *
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// sync status and failture type into proverTask
|
||||
proverTask.ProvingStatus = int16(status)
|
||||
proverTask.FailureType = int16(failureType)
|
||||
|
||||
if status == types.ProverProofValid && message.ProofType(proofParameter.TaskType) == message.ProofTypeChunk {
|
||||
if checkReadyErr := m.checkAreAllChunkProofsReady(ctx, proverTask.TaskID); checkReadyErr != nil {
|
||||
|
||||
@@ -14,7 +14,7 @@ import (
|
||||
)
|
||||
|
||||
// ChallengeMiddleware jwt challenge middleware
|
||||
func ChallengeMiddleware(conf *config.Config) *jwt.GinJWTMiddleware {
|
||||
func ChallengeMiddleware(auth *config.Auth) *jwt.GinJWTMiddleware {
|
||||
jwtMiddleware, err := jwt.New(&jwt.GinJWTMiddleware{
|
||||
Authenticator: func(c *gin.Context) (interface{}, error) {
|
||||
return nil, nil
|
||||
@@ -30,8 +30,8 @@ func ChallengeMiddleware(conf *config.Config) *jwt.GinJWTMiddleware {
|
||||
}
|
||||
},
|
||||
Unauthorized: unauthorized,
|
||||
Key: []byte(conf.Auth.Secret),
|
||||
Timeout: time.Second * time.Duration(conf.Auth.ChallengeExpireDurationSec),
|
||||
Key: []byte(auth.Secret),
|
||||
Timeout: time.Second * time.Duration(auth.ChallengeExpireDurationSec),
|
||||
TokenLookup: "header: Authorization, query: token, cookie: jwt",
|
||||
TokenHeadName: "Bearer",
|
||||
TimeFunc: time.Now,
|
||||
|
||||
@@ -4,22 +4,57 @@ import (
|
||||
"time"
|
||||
|
||||
jwt "github.com/appleboy/gin-jwt/v2"
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
|
||||
"scroll-tech/coordinator/internal/config"
|
||||
"scroll-tech/coordinator/internal/controller/api"
|
||||
"scroll-tech/coordinator/internal/controller/proxy"
|
||||
"scroll-tech/coordinator/internal/types"
|
||||
)
|
||||
|
||||
func nonIdendityAuthorizator(data interface{}, _ *gin.Context) bool {
|
||||
return data != nil
|
||||
}
|
||||
|
||||
// LoginMiddleware jwt auth middleware
|
||||
func LoginMiddleware(conf *config.Config) *jwt.GinJWTMiddleware {
|
||||
func LoginMiddleware(auth *config.Auth) *jwt.GinJWTMiddleware {
|
||||
jwtMiddleware, err := jwt.New(&jwt.GinJWTMiddleware{
|
||||
PayloadFunc: api.Auth.PayloadFunc,
|
||||
IdentityHandler: api.Auth.IdentityHandler,
|
||||
IdentityKey: types.PublicKey,
|
||||
Key: []byte(conf.Auth.Secret),
|
||||
Timeout: time.Second * time.Duration(conf.Auth.LoginExpireDurationSec),
|
||||
Key: []byte(auth.Secret),
|
||||
Timeout: time.Second * time.Duration(auth.LoginExpireDurationSec),
|
||||
Authenticator: api.Auth.Login,
|
||||
Authorizator: nonIdendityAuthorizator,
|
||||
Unauthorized: unauthorized,
|
||||
TokenLookup: "header: Authorization, query: token, cookie: jwt",
|
||||
TokenHeadName: "Bearer",
|
||||
TimeFunc: time.Now,
|
||||
LoginResponse: loginResponse,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
log.Crit("new jwt middleware panic", "error", err)
|
||||
}
|
||||
|
||||
if errInit := jwtMiddleware.MiddlewareInit(); errInit != nil {
|
||||
log.Crit("init jwt middleware panic", "error", errInit)
|
||||
}
|
||||
|
||||
return jwtMiddleware
|
||||
}
|
||||
|
||||
// ProxyLoginMiddleware jwt auth middleware for proxy login
|
||||
func ProxyLoginMiddleware(auth *config.Auth) *jwt.GinJWTMiddleware {
|
||||
jwtMiddleware, err := jwt.New(&jwt.GinJWTMiddleware{
|
||||
PayloadFunc: proxy.Auth.PayloadFunc,
|
||||
IdentityHandler: proxy.Auth.IdentityHandler,
|
||||
IdentityKey: types.PublicKey,
|
||||
Key: []byte(auth.Secret),
|
||||
Timeout: time.Second * time.Duration(auth.LoginExpireDurationSec),
|
||||
Authenticator: proxy.Auth.Login,
|
||||
Authorizator: nonIdendityAuthorizator,
|
||||
Unauthorized: unauthorized,
|
||||
TokenLookup: "header: Authorization, query: token, cookie: jwt",
|
||||
TokenHeadName: "Bearer",
|
||||
|
||||
@@ -28,8 +28,8 @@ func TestMain(m *testing.M) {
|
||||
defer func() {
|
||||
if testApps != nil {
|
||||
testApps.Free()
|
||||
tearDownEnv(t)
|
||||
}
|
||||
tearDownEnv(t)
|
||||
}()
|
||||
m.Run()
|
||||
}
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
|
||||
"scroll-tech/coordinator/internal/config"
|
||||
"scroll-tech/coordinator/internal/controller/api"
|
||||
"scroll-tech/coordinator/internal/controller/proxy"
|
||||
"scroll-tech/coordinator/internal/middleware"
|
||||
)
|
||||
|
||||
@@ -25,16 +26,45 @@ func Route(router *gin.Engine, cfg *config.Config, reg prometheus.Registerer) {
|
||||
func v1(router *gin.RouterGroup, conf *config.Config) {
|
||||
r := router.Group("/v1")
|
||||
|
||||
challengeMiddleware := middleware.ChallengeMiddleware(conf)
|
||||
challengeMiddleware := middleware.ChallengeMiddleware(conf.Auth)
|
||||
r.GET("/challenge", challengeMiddleware.LoginHandler)
|
||||
|
||||
loginMiddleware := middleware.LoginMiddleware(conf)
|
||||
loginMiddleware := middleware.LoginMiddleware(conf.Auth)
|
||||
r.POST("/login", challengeMiddleware.MiddlewareFunc(), loginMiddleware.LoginHandler)
|
||||
|
||||
// need jwt token api
|
||||
r.Use(loginMiddleware.MiddlewareFunc())
|
||||
{
|
||||
r.POST("/proxy_login", loginMiddleware.LoginHandler)
|
||||
r.POST("/get_task", api.GetTask.GetTasks)
|
||||
r.POST("/submit_proof", api.SubmitProof.SubmitProof)
|
||||
}
|
||||
}
|
||||
|
||||
// Route register route for coordinator
|
||||
func ProxyRoute(router *gin.Engine, cfg *config.ProxyConfig, reg prometheus.Registerer) {
|
||||
router.Use(gin.Recovery())
|
||||
|
||||
observability.Use(router, "coordinator", reg)
|
||||
|
||||
r := router.Group("coordinator")
|
||||
|
||||
v1_proxy(r, cfg)
|
||||
}
|
||||
|
||||
func v1_proxy(router *gin.RouterGroup, conf *config.ProxyConfig) {
|
||||
r := router.Group("/v1")
|
||||
|
||||
challengeMiddleware := middleware.ChallengeMiddleware(conf.ProxyManager.Auth)
|
||||
r.GET("/challenge", challengeMiddleware.LoginHandler)
|
||||
|
||||
loginMiddleware := middleware.ProxyLoginMiddleware(conf.ProxyManager.Auth)
|
||||
r.POST("/login", challengeMiddleware.MiddlewareFunc(), loginMiddleware.LoginHandler)
|
||||
|
||||
// need jwt token api
|
||||
r.Use(loginMiddleware.MiddlewareFunc())
|
||||
{
|
||||
r.POST("/get_task", proxy.GetTask.GetTasks)
|
||||
r.POST("/submit_proof", proxy.SubmitProof.SubmitProof)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -64,6 +64,8 @@ func (r ProverProviderType) String() string {
|
||||
return "prover provider type internal"
|
||||
case ProverProviderTypeExternal:
|
||||
return "prover provider type external"
|
||||
case ProverProviderTypeProxy:
|
||||
return "prover provider type proxy"
|
||||
default:
|
||||
return fmt.Sprintf("prover provider type: %d", r)
|
||||
}
|
||||
@@ -76,4 +78,6 @@ const (
|
||||
ProverProviderTypeInternal
|
||||
// ProverProviderTypeExternal is an external prover provider type
|
||||
ProverProviderTypeExternal
|
||||
// ProverProviderTypeProxy is an proxy prover provider type
|
||||
ProverProviderTypeProxy = 3
|
||||
)
|
||||
|
||||
48
coordinator/internal/types/response_test.go
Normal file
48
coordinator/internal/types/response_test.go
Normal file
@@ -0,0 +1,48 @@
|
||||
package types
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"scroll-tech/common/types"
|
||||
)
|
||||
|
||||
func TestResponseDecodeData_GetTaskSchema(t *testing.T) {
|
||||
// Arrange: build a dummy payload and wrap it in Response
|
||||
in := GetTaskSchema{
|
||||
UUID: "uuid-123",
|
||||
TaskID: "task-abc",
|
||||
TaskType: 1,
|
||||
UseSnark: true,
|
||||
TaskData: "dummy-data",
|
||||
HardForkName: "cancun",
|
||||
}
|
||||
|
||||
resp := types.Response{
|
||||
ErrCode: 0,
|
||||
ErrMsg: "",
|
||||
Data: in,
|
||||
}
|
||||
|
||||
// Act: JSON round-trip the Response to simulate real HTTP encoding/decoding
|
||||
b, err := json.Marshal(resp)
|
||||
if err != nil {
|
||||
t.Fatalf("marshal response: %v", err)
|
||||
}
|
||||
|
||||
var decoded types.Response
|
||||
if err := json.Unmarshal(b, &decoded); err != nil {
|
||||
t.Fatalf("unmarshal response: %v", err)
|
||||
}
|
||||
|
||||
var out GetTaskSchema
|
||||
if err := decoded.DecodeData(&out); err != nil {
|
||||
t.Fatalf("DecodeData error: %v", err)
|
||||
}
|
||||
|
||||
// Assert: structs match after decode
|
||||
if !reflect.DeepEqual(in, out) {
|
||||
t.Fatalf("decoded struct mismatch:\nwant: %+v\n got: %+v", in, out)
|
||||
}
|
||||
}
|
||||
@@ -31,6 +31,8 @@ func Version(hardForkName string, ValidiumMode bool) (uint8, error) {
|
||||
stfVersion = 8
|
||||
case "galileo":
|
||||
stfVersion = 9
|
||||
case "galileov2":
|
||||
stfVersion = 10
|
||||
default:
|
||||
return 0, errors.New("unknown fork name " + canonicalName)
|
||||
}
|
||||
|
||||
@@ -30,12 +30,14 @@ import (
|
||||
"scroll-tech/coordinator/internal/config"
|
||||
"scroll-tech/coordinator/internal/controller/api"
|
||||
"scroll-tech/coordinator/internal/controller/cron"
|
||||
"scroll-tech/coordinator/internal/controller/proxy"
|
||||
"scroll-tech/coordinator/internal/orm"
|
||||
"scroll-tech/coordinator/internal/route"
|
||||
)
|
||||
|
||||
var (
|
||||
conf *config.Config
|
||||
conf *config.Config
|
||||
proxyConf *config.ProxyConfig
|
||||
|
||||
testApps *testcontainers.TestcontainerApps
|
||||
|
||||
@@ -51,6 +53,9 @@ var (
|
||||
chunk *encoding.Chunk
|
||||
batch *encoding.Batch
|
||||
tokenTimeout int
|
||||
|
||||
envSet bool
|
||||
portUsed map[int64]struct{}
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
@@ -63,18 +68,44 @@ func TestMain(m *testing.M) {
|
||||
}
|
||||
|
||||
func randomURL() string {
|
||||
id, _ := rand.Int(rand.Reader, big.NewInt(2000-1))
|
||||
return fmt.Sprintf("localhost:%d", 10000+2000+id.Int64())
|
||||
return randmURLBatch(1)[0]
|
||||
}
|
||||
|
||||
func setupCoordinator(t *testing.T, proversPerSession uint8, coordinatorURL string) (*cron.Collector, *http.Server) {
|
||||
var err error
|
||||
db, err = testApps.GetGormDBClient()
|
||||
// Generate a batch of random localhost URLs with different ports, similar to randomURL.
|
||||
func randmURLBatch(n int) []string {
|
||||
if n <= 0 {
|
||||
return nil
|
||||
}
|
||||
urls := make([]string, 0, n)
|
||||
if portUsed == nil {
|
||||
portUsed = make(map[int64]struct{})
|
||||
}
|
||||
for len(urls) < n {
|
||||
id, _ := rand.Int(rand.Reader, big.NewInt(2000-1))
|
||||
port := 20000 + 2000 + id.Int64()
|
||||
if _, exist := portUsed[port]; exist {
|
||||
continue
|
||||
}
|
||||
portUsed[port] = struct{}{}
|
||||
urls = append(urls, fmt.Sprintf("localhost:%d", port))
|
||||
}
|
||||
return urls
|
||||
}
|
||||
|
||||
assert.NoError(t, err)
|
||||
func setupCoordinatorDb(t *testing.T) {
|
||||
var err error
|
||||
assert.NotNil(t, db, "setEnv must be called before")
|
||||
// db, err = testApps.GetGormDBClient()
|
||||
|
||||
// assert.NoError(t, err)
|
||||
sqlDB, err := db.DB()
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, migrate.ResetDB(sqlDB))
|
||||
}
|
||||
|
||||
func launchCoordinator(t *testing.T, proversPerSession uint8, coordinatorURL string) (*cron.Collector, *http.Server) {
|
||||
|
||||
assert.NotNil(t, db, "db must be set")
|
||||
|
||||
tokenTimeout = 60
|
||||
conf = &config.Config{
|
||||
@@ -114,6 +145,7 @@ func setupCoordinator(t *testing.T, proversPerSession uint8, coordinatorURL stri
|
||||
EuclidV2Time: new(uint64),
|
||||
}, db, nil)
|
||||
route.Route(router, conf, nil)
|
||||
t.Log("coordinator server url", coordinatorURL)
|
||||
srv := &http.Server{
|
||||
Addr: coordinatorURL,
|
||||
Handler: router,
|
||||
@@ -129,7 +161,77 @@ func setupCoordinator(t *testing.T, proversPerSession uint8, coordinatorURL stri
|
||||
return proofCollector, srv
|
||||
}
|
||||
|
||||
func setupCoordinator(t *testing.T, proversPerSession uint8, coordinatorURL string) (*cron.Collector, *http.Server) {
|
||||
setupCoordinatorDb(t)
|
||||
return launchCoordinator(t, proversPerSession, coordinatorURL)
|
||||
}
|
||||
|
||||
func setupProxyDb(t *testing.T) {
|
||||
assert.NotNil(t, db, "setEnv must be called before")
|
||||
sqlDB, err := db.DB()
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, migrate.ResetModuleDB(sqlDB, "proxy"))
|
||||
}
|
||||
|
||||
func launchProxy(t *testing.T, proxyURL string, coordinatorURL []string, usePersistent bool) *http.Server {
|
||||
var err error
|
||||
assert.NoError(t, err)
|
||||
|
||||
coordinators := make(map[string]*config.UpStream)
|
||||
for i, n := range coordinatorURL {
|
||||
coordinators[fmt.Sprintf("coordinator_%d", i)] = testProxyUpStreamCfg(n)
|
||||
}
|
||||
|
||||
tokenTimeout = 60
|
||||
proxyConf = &config.ProxyConfig{
|
||||
ProxyName: "test_proxy",
|
||||
ProxyManager: &config.ProxyManager{
|
||||
Verifier: &config.VerifierConfig{
|
||||
MinProverVersion: "v4.4.89",
|
||||
Verifiers: []config.AssetConfig{{
|
||||
AssetsPath: "",
|
||||
ForkName: "euclidV2",
|
||||
}},
|
||||
},
|
||||
Client: testProxyClientCfg(),
|
||||
Auth: &config.Auth{
|
||||
Secret: "proxy",
|
||||
ChallengeExpireDurationSec: tokenTimeout,
|
||||
LoginExpireDurationSec: tokenTimeout,
|
||||
},
|
||||
},
|
||||
Coordinators: coordinators,
|
||||
}
|
||||
|
||||
router := gin.New()
|
||||
if usePersistent {
|
||||
proxy.InitController(proxyConf, db, nil)
|
||||
} else {
|
||||
proxy.InitController(proxyConf, nil, nil)
|
||||
}
|
||||
route.ProxyRoute(router, proxyConf, nil)
|
||||
t.Log("proxy server url", proxyURL)
|
||||
srv := &http.Server{
|
||||
Addr: proxyURL,
|
||||
Handler: router,
|
||||
}
|
||||
go func() {
|
||||
runErr := srv.ListenAndServe()
|
||||
if runErr != nil && !errors.Is(runErr, http.ErrServerClosed) {
|
||||
assert.NoError(t, runErr)
|
||||
}
|
||||
}()
|
||||
time.Sleep(time.Second * 2)
|
||||
|
||||
return srv
|
||||
}
|
||||
|
||||
func setEnv(t *testing.T) {
|
||||
if envSet {
|
||||
t.Log("SetEnv is re-entried")
|
||||
return
|
||||
}
|
||||
|
||||
var err error
|
||||
|
||||
version.Version = "v4.5.45"
|
||||
@@ -146,6 +248,7 @@ func setEnv(t *testing.T) {
|
||||
sqlDB, err := db.DB()
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, migrate.ResetDB(sqlDB))
|
||||
assert.NoError(t, migrate.MigrateModule(sqlDB, "proxy"))
|
||||
|
||||
batchOrm = orm.NewBatch(db)
|
||||
chunkOrm = orm.NewChunk(db)
|
||||
@@ -169,6 +272,7 @@ func setEnv(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
batch = &encoding.Batch{Chunks: []*encoding.Chunk{chunk}}
|
||||
|
||||
envSet = true
|
||||
}
|
||||
|
||||
func TestApis(t *testing.T) {
|
||||
|
||||
@@ -34,6 +34,8 @@ type mockProver struct {
|
||||
privKey *ecdsa.PrivateKey
|
||||
proofType message.ProofType
|
||||
coordinatorURL string
|
||||
token string
|
||||
useCacheToken bool
|
||||
}
|
||||
|
||||
func newMockProver(t *testing.T, proverName string, coordinatorURL string, proofType message.ProofType, version string) *mockProver {
|
||||
@@ -50,6 +52,14 @@ func newMockProver(t *testing.T, proverName string, coordinatorURL string, proof
|
||||
return prover
|
||||
}
|
||||
|
||||
func (r *mockProver) resetConnection(coordinatorURL string) {
|
||||
r.coordinatorURL = coordinatorURL
|
||||
}
|
||||
|
||||
func (r *mockProver) setUseCacheToken(enable bool) {
|
||||
r.useCacheToken = enable
|
||||
}
|
||||
|
||||
// connectToCoordinator sets up a websocket client to connect to the prover manager.
|
||||
func (r *mockProver) connectToCoordinator(t *testing.T, proverTypes []types.ProverType) (string, int, string) {
|
||||
challengeString := r.challenge(t)
|
||||
@@ -115,6 +125,7 @@ func (r *mockProver) login(t *testing.T, challengeString string, proverTypes []t
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, http.StatusOK, resp.StatusCode())
|
||||
assert.Empty(t, result.ErrMsg)
|
||||
r.token = loginData.Token
|
||||
return loginData.Token, 0, ""
|
||||
}
|
||||
|
||||
@@ -144,11 +155,14 @@ func (r *mockProver) healthCheckFailure(t *testing.T) bool {
|
||||
|
||||
func (r *mockProver) getProverTask(t *testing.T, proofType message.ProofType) (*types.GetTaskSchema, int, string) {
|
||||
// get task from coordinator
|
||||
token, errCode, errMsg := r.connectToCoordinator(t, []types.ProverType{types.MakeProverType(proofType)})
|
||||
if errCode != 0 {
|
||||
return nil, errCode, errMsg
|
||||
if !r.useCacheToken || r.token == "" {
|
||||
token, errCode, errMsg := r.connectToCoordinator(t, []types.ProverType{types.MakeProverType(proofType)})
|
||||
if errCode != 0 {
|
||||
return nil, errCode, errMsg
|
||||
}
|
||||
assert.NotEmpty(t, token)
|
||||
assert.Equal(t, token, r.token)
|
||||
}
|
||||
assert.NotEmpty(t, token)
|
||||
|
||||
type response struct {
|
||||
ErrCode int `json:"errcode"`
|
||||
@@ -160,7 +174,7 @@ func (r *mockProver) getProverTask(t *testing.T, proofType message.ProofType) (*
|
||||
client := resty.New()
|
||||
resp, err := client.R().
|
||||
SetHeader("Content-Type", "application/json").
|
||||
SetHeader("Authorization", fmt.Sprintf("Bearer %s", token)).
|
||||
SetHeader("Authorization", fmt.Sprintf("Bearer %s", r.token)).
|
||||
SetBody(map[string]interface{}{"universal": true, "prover_height": 100, "task_types": []int{int(proofType)}}).
|
||||
SetResult(&result).
|
||||
Post("http://" + r.coordinatorURL + "/coordinator/v1/get_task")
|
||||
@@ -174,11 +188,14 @@ func (r *mockProver) getProverTask(t *testing.T, proofType message.ProofType) (*
|
||||
//nolint:unparam
|
||||
func (r *mockProver) tryGetProverTask(t *testing.T, proofType message.ProofType) (int, string) {
|
||||
// get task from coordinator
|
||||
token, errCode, errMsg := r.connectToCoordinator(t, []types.ProverType{types.MakeProverType(proofType)})
|
||||
if errCode != 0 {
|
||||
return errCode, errMsg
|
||||
if !r.useCacheToken || r.token == "" {
|
||||
token, errCode, errMsg := r.connectToCoordinator(t, []types.ProverType{types.MakeProverType(proofType)})
|
||||
if errCode != 0 {
|
||||
return errCode, errMsg
|
||||
}
|
||||
assert.NotEmpty(t, token)
|
||||
assert.Equal(t, token, r.token)
|
||||
}
|
||||
assert.NotEmpty(t, token)
|
||||
|
||||
type response struct {
|
||||
ErrCode int `json:"errcode"`
|
||||
@@ -190,8 +207,8 @@ func (r *mockProver) tryGetProverTask(t *testing.T, proofType message.ProofType)
|
||||
client := resty.New()
|
||||
resp, err := client.R().
|
||||
SetHeader("Content-Type", "application/json").
|
||||
SetHeader("Authorization", fmt.Sprintf("Bearer %s", token)).
|
||||
SetBody(map[string]interface{}{"prover_height": 100, "task_type": int(proofType), "universal": true}).
|
||||
SetHeader("Authorization", fmt.Sprintf("Bearer %s", r.token)).
|
||||
SetBody(map[string]interface{}{"prover_height": 100, "task_types": []int{int(proofType)}, "universal": true}).
|
||||
SetResult(&result).
|
||||
Post("http://" + r.coordinatorURL + "/coordinator/v1/get_task")
|
||||
assert.NoError(t, err)
|
||||
@@ -249,10 +266,13 @@ func (r *mockProver) submitProof(t *testing.T, proverTaskSchema *types.GetTaskSc
|
||||
Universal: true,
|
||||
}
|
||||
|
||||
token, authErrCode, errMsg := r.connectToCoordinator(t, []types.ProverType{types.MakeProverType(message.ProofType(proverTaskSchema.TaskType))})
|
||||
assert.Equal(t, authErrCode, 0)
|
||||
assert.Equal(t, errMsg, "")
|
||||
assert.NotEmpty(t, token)
|
||||
if !r.useCacheToken || r.token == "" {
|
||||
token, authErrCode, errMsg := r.connectToCoordinator(t, []types.ProverType{types.MakeProverType(message.ProofType(proverTaskSchema.TaskType))})
|
||||
assert.Equal(t, authErrCode, 0)
|
||||
assert.Equal(t, errMsg, "")
|
||||
assert.NotEmpty(t, token)
|
||||
assert.Equal(t, token, r.token)
|
||||
}
|
||||
|
||||
submitProofData, err := json.Marshal(submitProof)
|
||||
assert.NoError(t, err)
|
||||
@@ -262,7 +282,7 @@ func (r *mockProver) submitProof(t *testing.T, proverTaskSchema *types.GetTaskSc
|
||||
client := resty.New()
|
||||
resp, err := client.R().
|
||||
SetHeader("Content-Type", "application/json").
|
||||
SetHeader("Authorization", fmt.Sprintf("Bearer %s", token)).
|
||||
SetHeader("Authorization", fmt.Sprintf("Bearer %s", r.token)).
|
||||
SetBody(string(submitProofData)).
|
||||
SetResult(&result).
|
||||
Post("http://" + r.coordinatorURL + "/coordinator/v1/submit_proof")
|
||||
|
||||
297
coordinator/test/proxy_test.go
Normal file
297
coordinator/test/proxy_test.go
Normal file
@@ -0,0 +1,297 @@
|
||||
package test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/scroll-tech/da-codec/encoding"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"scroll-tech/common/types"
|
||||
"scroll-tech/common/types/message"
|
||||
"scroll-tech/common/version"
|
||||
|
||||
"scroll-tech/coordinator/internal/config"
|
||||
"scroll-tech/coordinator/internal/controller/proxy"
|
||||
)
|
||||
|
||||
func testProxyClientCfg() *config.ProxyClient {
|
||||
|
||||
return &config.ProxyClient{
|
||||
Secret: "test-secret-key",
|
||||
ProxyName: "test-proxy",
|
||||
ProxyVersion: version.Version,
|
||||
}
|
||||
}
|
||||
|
||||
var testCompatibileMode bool
|
||||
|
||||
func testProxyUpStreamCfg(coordinatorURL string) *config.UpStream {
|
||||
|
||||
return &config.UpStream{
|
||||
BaseUrl: fmt.Sprintf("http://%s", coordinatorURL),
|
||||
RetryWaitTime: 3,
|
||||
ConnectionTimeoutSec: 30,
|
||||
CompatibileMode: testCompatibileMode,
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func testProxyClient(t *testing.T) {
|
||||
|
||||
// Setup coordinator and http server.
|
||||
coordinatorURL := randomURL()
|
||||
proofCollector, httpHandler := setupCoordinator(t, 1, coordinatorURL)
|
||||
defer func() {
|
||||
proofCollector.Stop()
|
||||
assert.NoError(t, httpHandler.Shutdown(context.Background()))
|
||||
}()
|
||||
|
||||
cliCfg := testProxyClientCfg()
|
||||
upCfg := testProxyUpStreamCfg(coordinatorURL)
|
||||
|
||||
clientManager, err := proxy.NewClientManager("test_coordinator", cliCfg, upCfg)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, clientManager)
|
||||
|
||||
// Create context with timeout
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
// Test Client method
|
||||
client := clientManager.ClientAsProxy(ctx)
|
||||
|
||||
// Client should not be nil if login succeeds
|
||||
// Note: This might be nil if the coordinator is not properly set up for proxy authentication
|
||||
// but the test validates that the Client method completes without panic
|
||||
assert.NotNil(t, client)
|
||||
token1 := client.Token()
|
||||
assert.NotEmpty(t, token1)
|
||||
t.Logf("Client token: %s (%v)", token1, client)
|
||||
|
||||
if !upCfg.CompatibileMode {
|
||||
time.Sleep(time.Second * 2)
|
||||
client.Reset()
|
||||
client = clientManager.ClientAsProxy(ctx)
|
||||
assert.NotNil(t, client)
|
||||
token2 := client.Token()
|
||||
assert.NotEmpty(t, token2)
|
||||
t.Logf("Client token (sec): %s (%v)", token2, client)
|
||||
assert.NotEqual(t, token1, token2, "token should not be identical")
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func testProxyHandshake(t *testing.T) {
|
||||
// Setup proxy http server.
|
||||
proxyURL := randomURL()
|
||||
proxyHttpHandler := launchProxy(t, proxyURL, []string{}, false)
|
||||
defer func() {
|
||||
assert.NoError(t, proxyHttpHandler.Shutdown(context.Background()))
|
||||
}()
|
||||
|
||||
chunkProver := newMockProver(t, "prover_chunk_test", proxyURL, message.ProofTypeChunk, version.Version)
|
||||
assert.True(t, chunkProver.healthCheckSuccess(t))
|
||||
}
|
||||
|
||||
func testProxyGetTask(t *testing.T) {
|
||||
// Setup coordinator and http server.
|
||||
urls := randmURLBatch(2)
|
||||
coordinatorURL := urls[0]
|
||||
collector, httpHandler := setupCoordinator(t, 3, coordinatorURL)
|
||||
defer func() {
|
||||
collector.Stop()
|
||||
assert.NoError(t, httpHandler.Shutdown(context.Background()))
|
||||
}()
|
||||
|
||||
proxyURL := urls[1]
|
||||
proxyHttpHandler := launchProxy(t, proxyURL, []string{coordinatorURL}, false)
|
||||
defer func() {
|
||||
assert.NoError(t, proxyHttpHandler.Shutdown(context.Background()))
|
||||
}()
|
||||
|
||||
chunkProver := newMockProver(t, "prover_chunk_test", proxyURL, message.ProofTypeChunk, version.Version)
|
||||
chunkProver.setUseCacheToken(true)
|
||||
code, _ := chunkProver.tryGetProverTask(t, message.ProofTypeChunk)
|
||||
assert.Equal(t, int(types.ErrCoordinatorEmptyProofData), code)
|
||||
|
||||
err := l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block1, block2})
|
||||
assert.NoError(t, err)
|
||||
dbChunk, err := chunkOrm.InsertChunk(context.Background(), chunk)
|
||||
assert.NoError(t, err)
|
||||
err = l2BlockOrm.UpdateChunkHashInRange(context.Background(), 0, 100, dbChunk.Hash)
|
||||
assert.NoError(t, err)
|
||||
|
||||
task, code, msg := chunkProver.getProverTask(t, message.ProofTypeChunk)
|
||||
assert.Empty(t, code)
|
||||
if code == 0 {
|
||||
t.Log("get task id", task.TaskID)
|
||||
} else {
|
||||
t.Log("get task error msg", msg)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func testProxyProof(t *testing.T) {
|
||||
urls := randmURLBatch(3)
|
||||
coordinatorURL0 := urls[0]
|
||||
setupCoordinatorDb(t)
|
||||
collector0, httpHandler0 := launchCoordinator(t, 3, coordinatorURL0)
|
||||
defer func() {
|
||||
collector0.Stop()
|
||||
httpHandler0.Shutdown(context.Background())
|
||||
}()
|
||||
coordinatorURL1 := urls[1]
|
||||
collector1, httpHandler1 := launchCoordinator(t, 3, coordinatorURL1)
|
||||
defer func() {
|
||||
collector1.Stop()
|
||||
httpHandler1.Shutdown(context.Background())
|
||||
}()
|
||||
coordinators := map[string]*http.Server{
|
||||
"coordinator_0": httpHandler0,
|
||||
"coordinator_1": httpHandler1,
|
||||
}
|
||||
|
||||
proxyURL := urls[2]
|
||||
proxyHttpHandler := launchProxy(t, proxyURL, []string{coordinatorURL0, coordinatorURL1}, false)
|
||||
defer func() {
|
||||
assert.NoError(t, proxyHttpHandler.Shutdown(context.Background()))
|
||||
}()
|
||||
|
||||
err := l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block1, block2})
|
||||
assert.NoError(t, err)
|
||||
dbChunk, err := chunkOrm.InsertChunk(context.Background(), chunk)
|
||||
assert.NoError(t, err)
|
||||
err = l2BlockOrm.UpdateChunkHashInRange(context.Background(), 0, 100, dbChunk.Hash)
|
||||
assert.NoError(t, err)
|
||||
|
||||
chunkProver := newMockProver(t, "prover_chunk_test", proxyURL, message.ProofTypeChunk, version.Version)
|
||||
chunkProver.setUseCacheToken(true)
|
||||
task, code, msg := chunkProver.getProverTask(t, message.ProofTypeChunk)
|
||||
assert.Empty(t, code)
|
||||
if code == 0 {
|
||||
t.Log("get task", task)
|
||||
parts, _, _ := strings.Cut(task.TaskID, ":")
|
||||
// close the coordinator which do not dispatch task first, so if we submit to wrong target,
|
||||
// there would be a chance the submit failed (to the closed coordinator)
|
||||
for n, srv := range coordinators {
|
||||
if n != parts {
|
||||
t.Log("close coordinator", n)
|
||||
assert.NoError(t, srv.Shutdown(context.Background()))
|
||||
}
|
||||
}
|
||||
exceptProofStatus := verifiedSuccess
|
||||
chunkProver.submitProof(t, task, exceptProofStatus, types.Success)
|
||||
|
||||
} else {
|
||||
t.Log("get task error msg", msg)
|
||||
}
|
||||
|
||||
// verify proof status
|
||||
var (
|
||||
tick = time.Tick(1500 * time.Millisecond)
|
||||
tickStop = time.Tick(time.Minute)
|
||||
)
|
||||
|
||||
var (
|
||||
chunkProofStatus types.ProvingStatus
|
||||
chunkActiveAttempts int16
|
||||
chunkMaxAttempts int16
|
||||
)
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-tick:
|
||||
chunkProofStatus, err = chunkOrm.GetProvingStatusByHash(context.Background(), dbChunk.Hash)
|
||||
assert.NoError(t, err)
|
||||
if chunkProofStatus == types.ProvingTaskVerified {
|
||||
return
|
||||
}
|
||||
|
||||
chunkActiveAttempts, chunkMaxAttempts, err = chunkOrm.GetAttemptsByHash(context.Background(), dbChunk.Hash)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, int(chunkMaxAttempts))
|
||||
assert.Equal(t, 0, int(chunkActiveAttempts))
|
||||
|
||||
case <-tickStop:
|
||||
t.Error("failed to check proof status", "chunkProofStatus", chunkProofStatus.String())
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func testProxyPersistent(t *testing.T) {
|
||||
urls := randmURLBatch(4)
|
||||
coordinatorURL0 := urls[0]
|
||||
setupCoordinatorDb(t)
|
||||
collector0, httpHandler0 := launchCoordinator(t, 3, coordinatorURL0)
|
||||
defer func() {
|
||||
collector0.Stop()
|
||||
httpHandler0.Shutdown(context.Background())
|
||||
}()
|
||||
coordinatorURL1 := urls[1]
|
||||
collector1, httpHandler1 := launchCoordinator(t, 3, coordinatorURL1)
|
||||
defer func() {
|
||||
collector1.Stop()
|
||||
httpHandler1.Shutdown(context.Background())
|
||||
}()
|
||||
|
||||
setupProxyDb(t)
|
||||
proxyURL1 := urls[2]
|
||||
proxyHttpHandler := launchProxy(t, proxyURL1, []string{coordinatorURL0, coordinatorURL1}, true)
|
||||
defer func() {
|
||||
assert.NoError(t, proxyHttpHandler.Shutdown(context.Background()))
|
||||
}()
|
||||
|
||||
proxyURL2 := urls[3]
|
||||
proxyHttpHandler2 := launchProxy(t, proxyURL2, []string{coordinatorURL0, coordinatorURL1}, true)
|
||||
defer func() {
|
||||
assert.NoError(t, proxyHttpHandler2.Shutdown(context.Background()))
|
||||
}()
|
||||
|
||||
err := l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block1, block2})
|
||||
assert.NoError(t, err)
|
||||
dbChunk, err := chunkOrm.InsertChunk(context.Background(), chunk)
|
||||
assert.NoError(t, err)
|
||||
err = l2BlockOrm.UpdateChunkHashInRange(context.Background(), 0, 100, dbChunk.Hash)
|
||||
assert.NoError(t, err)
|
||||
|
||||
chunkProver := newMockProver(t, "prover_chunk_test", proxyURL1, message.ProofTypeChunk, version.Version)
|
||||
chunkProver.setUseCacheToken(true)
|
||||
task, _, _ := chunkProver.getProverTask(t, message.ProofTypeChunk)
|
||||
assert.NotNil(t, task)
|
||||
taskFrom, _, _ := strings.Cut(task.TaskID, ":")
|
||||
t.Log("get task from coordinator:", taskFrom)
|
||||
|
||||
chunkProver.resetConnection(proxyURL2)
|
||||
task, _, _ = chunkProver.getProverTask(t, message.ProofTypeChunk)
|
||||
assert.NotNil(t, task)
|
||||
taskFrom2, _, _ := strings.Cut(task.TaskID, ":")
|
||||
assert.Equal(t, taskFrom, taskFrom2)
|
||||
}
|
||||
|
||||
func TestProxyClient(t *testing.T) {
|
||||
testCompatibileMode = false
|
||||
// Set up the test environment.
|
||||
setEnv(t)
|
||||
t.Run("TestProxyClient", testProxyClient)
|
||||
t.Run("TestProxyHandshake", testProxyHandshake)
|
||||
t.Run("TestProxyGetTask", testProxyGetTask)
|
||||
t.Run("TestProxyValidProof", testProxyProof)
|
||||
t.Run("testProxyPersistent", testProxyPersistent)
|
||||
}
|
||||
|
||||
func TestProxyClientCompatibleMode(t *testing.T) {
|
||||
testCompatibileMode = true
|
||||
// Set up the test environment.
|
||||
setEnv(t)
|
||||
t.Run("TestProxyClient", testProxyClient)
|
||||
t.Run("TestProxyHandshake", testProxyHandshake)
|
||||
t.Run("TestProxyGetTask", testProxyGetTask)
|
||||
t.Run("TestProxyValidProof", testProxyProof)
|
||||
t.Run("testProxyPersistent", testProxyPersistent)
|
||||
}
|
||||
@@ -28,10 +28,10 @@ pub struct BatchHeaderValidiumWithHash {
|
||||
|
||||
/// Parse header types passed from golang side and adapt to the
|
||||
/// definition in zkvm-prover's types
|
||||
/// We distinguish the header type in golang side according to the codec
|
||||
/// version, i.e. v7 - v9 (current), and validium
|
||||
/// And adapt it to the corresponding header version used in zkvm-prover's witness
|
||||
/// definition, i.e. v7- v8 (current), and validium
|
||||
/// We distinguish the header type in golang side according to the STF
|
||||
/// version, i.e. v6, v7-v10 (current), and validium
|
||||
/// And adapt it to the corresponding batch header type used in zkvm-prover's witness
|
||||
/// definition, i.e. v6, v7 (current), and validium
|
||||
#[derive(Clone, serde::Deserialize, serde::Serialize)]
|
||||
#[serde(untagged)]
|
||||
#[allow(non_camel_case_types)]
|
||||
@@ -40,18 +40,18 @@ pub enum BatchHeaderV {
|
||||
Validium(BatchHeaderValidiumWithHash),
|
||||
/// Header for scroll's STF version v6.
|
||||
V6(BatchHeaderV6),
|
||||
/// Header for scroll's STF versions v7, v8, v9.
|
||||
/// Header for scroll's STF versions v7 - v10.
|
||||
///
|
||||
/// Since the codec essentially is unchanged for the above STF versions, we do not define new
|
||||
/// variants, instead re-using the [`BatchHeaderV7`] variant.
|
||||
V7_V8_V9(BatchHeaderV7),
|
||||
V7_to_V10(BatchHeaderV7),
|
||||
}
|
||||
|
||||
impl core::fmt::Display for BatchHeaderV {
|
||||
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
|
||||
match self {
|
||||
BatchHeaderV::V6(_) => write!(f, "V6"),
|
||||
BatchHeaderV::V7_V8_V9(_) => write!(f, "V7_V8_V9"),
|
||||
BatchHeaderV::V7_to_V10(_) => write!(f, "V7 - V10"),
|
||||
BatchHeaderV::Validium(_) => write!(f, "Validium"),
|
||||
}
|
||||
}
|
||||
@@ -61,26 +61,29 @@ impl BatchHeaderV {
|
||||
pub fn batch_hash(&self) -> B256 {
|
||||
match self {
|
||||
BatchHeaderV::V6(h) => h.batch_hash(),
|
||||
BatchHeaderV::V7_V8_V9(h) => h.batch_hash(),
|
||||
BatchHeaderV::V7_to_V10(h) => h.batch_hash(),
|
||||
BatchHeaderV::Validium(h) => h.header.batch_hash(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn must_v6_header(&self) -> &BatchHeaderV6 {
|
||||
pub fn to_zkvm_batch_header_v6(&self) -> &BatchHeaderV6 {
|
||||
match self {
|
||||
BatchHeaderV::V6(h) => h,
|
||||
_ => unreachable!("A header of {} is considered to be v6", self),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn must_v7_v8_v9_header(&self) -> &BatchHeaderV7 {
|
||||
pub fn to_zkvm_batch_header_v7_to_v10(&self) -> &BatchHeaderV7 {
|
||||
match self {
|
||||
BatchHeaderV::V7_V8_V9(h) => h,
|
||||
_ => unreachable!("A header of {} is considered to be in [v7, v8, v9]", self),
|
||||
BatchHeaderV::V7_to_V10(h) => h,
|
||||
_ => unreachable!(
|
||||
"A header of {} is considered to be in [v7, v8, v9, v10]",
|
||||
self
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn must_validium_header(&self) -> &BatchHeaderValidium {
|
||||
pub fn to_zkvm_batch_header_validium(&self) -> &BatchHeaderValidium {
|
||||
match self {
|
||||
BatchHeaderV::Validium(h) => &h.header,
|
||||
_ => unreachable!("A header of {} is considered to be validium", self),
|
||||
@@ -154,11 +157,11 @@ impl BatchProvingTask {
|
||||
version.fork,
|
||||
ForkName::EuclidV1,
|
||||
),
|
||||
BatchHeaderV::V7_V8_V9(_) => assert!(
|
||||
matches!(version.fork, ForkName::EuclidV2 | ForkName::Feynman | ForkName::Galileo),
|
||||
"hardfork mismatch for da-codec@v7/8/9 header: found={}, expected={:?}",
|
||||
BatchHeaderV::V7_to_V10(_) => assert!(
|
||||
matches!(version.fork, ForkName::EuclidV2 | ForkName::Feynman | ForkName::Galileo | ForkName::GalileoV2),
|
||||
"hardfork mismatch for da-codec@v7/8/9/10 header: found={}, expected={:?}",
|
||||
version.fork,
|
||||
[ForkName::EuclidV2, ForkName::Feynman, ForkName::Galileo],
|
||||
[ForkName::EuclidV2, ForkName::Feynman, ForkName::Galileo, ForkName::GalileoV2],
|
||||
),
|
||||
}
|
||||
|
||||
@@ -228,23 +231,25 @@ impl BatchProvingTask {
|
||||
|
||||
let reference_header = match (version.domain, version.stf_version) {
|
||||
(Domain::Scroll, STFVersion::V6) => {
|
||||
ReferenceHeader::V6(*self.batch_header.must_v6_header())
|
||||
ReferenceHeader::V6(*self.batch_header.to_zkvm_batch_header_v6())
|
||||
}
|
||||
// The da-codec for STF versions v7, v8, v9 is identical. In zkvm-prover we do not
|
||||
// The da-codec for STF versions v7, v8, v9, v10 is identical. In zkvm-prover we do not
|
||||
// create additional variants to indicate the identical behaviour of codec. Instead we
|
||||
// add a separate variant for the STF version.
|
||||
//
|
||||
// We handle the different STF versions here however build the same batch header since
|
||||
// that type does not change. The batch header's version byte constructed in the
|
||||
// coordinator actually defines the STF version (v7, v8 or v9) and we can derive the
|
||||
// hard-fork (feynman or galileo) and the codec from the version byte.
|
||||
// coordinator actually defines the STF version (v7, v8 or v9, v10) and we can derive
|
||||
// the hard-fork (e.g. feynman or galileo) and the codec from the version
|
||||
// byte.
|
||||
//
|
||||
// Refer [`scroll_zkvm_types::public_inputs::Version`].
|
||||
(Domain::Scroll, STFVersion::V7 | STFVersion::V8 | STFVersion::V9) => {
|
||||
ReferenceHeader::V7_V8_V9(*self.batch_header.must_v7_v8_v9_header())
|
||||
}
|
||||
(
|
||||
Domain::Scroll,
|
||||
STFVersion::V7 | STFVersion::V8 | STFVersion::V9 | STFVersion::V10,
|
||||
) => ReferenceHeader::V7_V8_V9(*self.batch_header.to_zkvm_batch_header_v7_to_v10()),
|
||||
(Domain::Validium, STFVersion::V1) => {
|
||||
ReferenceHeader::Validium(*self.batch_header.must_validium_header())
|
||||
ReferenceHeader::Validium(*self.batch_header.to_zkvm_batch_header_validium())
|
||||
}
|
||||
(domain, stf_version) => {
|
||||
unreachable!("unsupported domain={domain:?},stf-version={stf_version:?}")
|
||||
|
||||
@@ -144,7 +144,6 @@ impl LocalProverConfig {
|
||||
|
||||
#[derive(Clone, Serialize, Deserialize)]
|
||||
pub struct CircuitConfig {
|
||||
pub hard_fork_name: String,
|
||||
/// The path to save assets for a specified hard fork phase
|
||||
pub workspace_path: String,
|
||||
#[serde(flatten)]
|
||||
|
||||
@@ -8,7 +8,7 @@ require (
|
||||
github.com/jmoiron/sqlx v1.3.5
|
||||
github.com/lib/pq v1.10.9
|
||||
github.com/pressly/goose/v3 v3.16.0
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20251117065849-b5c3dd1aa975
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20251128092113-8629f088d78f
|
||||
github.com/stretchr/testify v1.10.0
|
||||
github.com/urfave/cli/v2 v2.25.7
|
||||
)
|
||||
|
||||
@@ -121,8 +121,8 @@ github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU
|
||||
github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
|
||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20251117065849-b5c3dd1aa975 h1:KluZffkRRJ4K9UyvH/r2g8Lp16/NSK8j26MR33hHmoQ=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20251117065849-b5c3dd1aa975/go.mod h1:6BVek7YliYh+YeHOSjguPw9GT9BhVBfThArxzVlpqdQ=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20251128092113-8629f088d78f h1:j6SjP98MoWFFX9TwB1/nFYEkayqHQsrtE66Ll2C+oT0=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20251128092113-8629f088d78f/go.mod h1:Aa/kD1XB+OV/7rRxMQrjcPCB4b0pKyLH0gsTrtuHi38=
|
||||
github.com/segmentio/asm v1.2.0 h1:9BQrFxC+YOHJlTlHGkTrFWf59nbL3XnCoFLTwDCI7ys=
|
||||
github.com/segmentio/asm v1.2.0/go.mod h1:BqMnlJP91P8d+4ibuonYZw9mfnzI9HfxselHZr5aAcs=
|
||||
github.com/sethvargo/go-retry v0.2.4 h1:T+jHEQy/zKJf5s95UkguisicE0zuF9y7+/vgz08Ocec=
|
||||
|
||||
@@ -9,13 +9,14 @@ import (
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
//go:embed migrations/*.sql
|
||||
//go:embed migrations
|
||||
var embedMigrations embed.FS
|
||||
|
||||
// MigrationsDir migration dir
|
||||
const MigrationsDir string = "migrations"
|
||||
|
||||
func init() {
|
||||
// note goose ignore ono-sql files by default so we do not need to specify *.sql
|
||||
goose.SetBaseFS(embedMigrations)
|
||||
goose.SetSequential(true)
|
||||
goose.SetTableName("scroll_migrations")
|
||||
@@ -24,6 +25,41 @@ func init() {
|
||||
goose.SetVerbose(verbose)
|
||||
}
|
||||
|
||||
// MigrateModule migrate db used by other module with specified goose TableName
|
||||
// sql file for that module must be put as a sub-directory under `MigrationsDir`
|
||||
func MigrateModule(db *sql.DB, moduleName string) error {
|
||||
|
||||
goose.SetTableName(moduleName + "_migrations")
|
||||
defer func() {
|
||||
goose.SetTableName("scroll_migrations")
|
||||
}()
|
||||
|
||||
return goose.Up(db, MigrationsDir+"/"+moduleName, goose.WithAllowMissing())
|
||||
}
|
||||
|
||||
// RollbackModule rollback the specified module to the given version
|
||||
func RollbackModule(db *sql.DB, moduleName string, version *int64) error {
|
||||
|
||||
goose.SetTableName(moduleName + "_migrations")
|
||||
defer func() {
|
||||
goose.SetTableName("scroll_migrations")
|
||||
}()
|
||||
moduleDir := MigrationsDir + "/" + moduleName
|
||||
|
||||
if version != nil {
|
||||
return goose.DownTo(db, moduleDir, *version)
|
||||
}
|
||||
return goose.Down(db, moduleDir)
|
||||
}
|
||||
|
||||
// ResetModuleDB clean and migrate db for a module.
|
||||
func ResetModuleDB(db *sql.DB, moduleName string) error {
|
||||
if err := RollbackModule(db, moduleName, new(int64)); err != nil {
|
||||
return err
|
||||
}
|
||||
return MigrateModule(db, moduleName)
|
||||
}
|
||||
|
||||
// Migrate migrate db
|
||||
func Migrate(db *sql.DB) error {
|
||||
//return goose.Up(db, MIGRATIONS_DIR, goose.WithAllowMissing())
|
||||
|
||||
30
database/migrate/migrations/proxy/0001_running_tables.sql
Normal file
30
database/migrate/migrations/proxy/0001_running_tables.sql
Normal file
@@ -0,0 +1,30 @@
|
||||
-- +goose Up
|
||||
-- +goose StatementBegin
|
||||
create table prover_sessions
|
||||
(
|
||||
public_key TEXT NOT NULL,
|
||||
upstream TEXT NOT NULL,
|
||||
up_token TEXT NOT NULL,
|
||||
expired TIMESTAMP(0) NOT NULL,
|
||||
constraint uk_prover_sessions_public_key_upstream unique (public_key, upstream)
|
||||
);
|
||||
|
||||
create index idx_prover_sessions_expired on prover_sessions (expired);
|
||||
|
||||
create table priority_upstream
|
||||
(
|
||||
public_key TEXT NOT NULL,
|
||||
upstream TEXT NOT NULL,
|
||||
update_time TIMESTAMP(0) NOT NULL DEFAULT now()
|
||||
);
|
||||
|
||||
create unique index idx_priority_upstream_public_key on priority_upstream (public_key);
|
||||
-- +goose StatementEnd
|
||||
|
||||
-- +goose Down
|
||||
-- +goose StatementBegin
|
||||
drop index if exists idx_prover_sessions_expired;
|
||||
|
||||
drop table if exists prover_sessions;
|
||||
drop table if exists priority_upstream;
|
||||
-- +goose StatementEnd
|
||||
@@ -1413,16 +1413,14 @@ github.com/scroll-tech/da-codec v0.1.3-0.20250609154559-8935de62c148 h1:cyK1ifU2
|
||||
github.com/scroll-tech/da-codec v0.1.3-0.20250609154559-8935de62c148/go.mod h1:gz5x3CsLy5htNTbv4PWRPBU9nSAujfx1U2XtFcXoFuk=
|
||||
github.com/scroll-tech/da-codec v0.1.3-0.20250626091118-58b899494da6/go.mod h1:Z6kN5u2khPhiqHyk172kGB7o38bH/nj7Ilrb/46wZGg=
|
||||
github.com/scroll-tech/da-codec v0.1.3-0.20250825071838-cddc263e5ef6/go.mod h1:Z6kN5u2khPhiqHyk172kGB7o38bH/nj7Ilrb/46wZGg=
|
||||
github.com/scroll-tech/da-codec v0.9.0 h1:UvHNdSJuVhi/j9pOH7SXkRck52/zeE8T35Fx6qTPW70=
|
||||
github.com/scroll-tech/da-codec v0.9.0/go.mod h1:w+vwIvNiWdiNHVE9yIY2Klx6G4s+SQhEJVSmVG/IsEQ=
|
||||
github.com/scroll-tech/ecies-go/v2 v2.0.10-beta.1/go.mod h1:A+pHaITd+ogBm4Rk35xebF9OPiyMYlFlgqBOiY5PSjg=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240607130425-e2becce6a1a4/go.mod h1:byf/mZ8jLYUCnUePTicjJWn+RvKdxDn7buS6glTnMwQ=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240821074444-b3fa00861e5e/go.mod h1:swB5NSp8pKNDuYsTxfR08bHS6L56i119PBx8fxvV8Cs=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20241010064814-3d88e870ae22/go.mod h1:r9FwtxCtybMkTbWYCyBuevT9TW3zHmOTHqD082Uh+Oo=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20250206083728-ea43834c198f/go.mod h1:Ik3OBLl7cJxPC+CFyCBYNXBPek4wpdzkWehn/y5qLM8=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20250225152658-bcfdb48dd939/go.mod h1:AgU8JJxC7+nfs7R7ma35AU7dMAGW7wCw3dRZRefIKyQ=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20251117071111-47c22325665e h1:7U1/JilCPIqJTYNgKzlfBEkRNORRwJ8+PCmdR8/XK6A=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20251117071111-47c22325665e/go.mod h1:6BVek7YliYh+YeHOSjguPw9GT9BhVBfThArxzVlpqdQ=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20251128092359-25d5bf6b817b h1:pMQKnroJoS/FeL1aOWkz7/u1iBHUP8PWjZstNuzoUGE=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20251128092359-25d5bf6b817b/go.mod h1:Aa/kD1XB+OV/7rRxMQrjcPCB4b0pKyLH0gsTrtuHi38=
|
||||
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I=
|
||||
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
|
||||
github.com/segmentio/kafka-go v0.1.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo=
|
||||
|
||||
@@ -66,17 +66,26 @@ func action(ctx *cli.Context) error {
|
||||
registry := prometheus.DefaultRegisterer
|
||||
observability.Server(ctx, db)
|
||||
|
||||
l1client, err := ethclient.Dial(cfg.L1Config.Endpoint)
|
||||
// Init L1 connection
|
||||
l1RpcClient, err := rpc.Dial(cfg.L1Config.Endpoint)
|
||||
if err != nil {
|
||||
log.Crit("failed to connect l1 geth", "config file", cfgFile, "error", err)
|
||||
log.Crit("failed to dial raw RPC client to L1 endpoint", "endpoint", cfg.L1Config.Endpoint, "error", err)
|
||||
}
|
||||
l1client := ethclient.NewClient(l1RpcClient)
|
||||
|
||||
// sanity check config
|
||||
if cfg.L1Config.RelayerConfig.GasOracleConfig.L1BaseFeeLimit == 0 || cfg.L1Config.RelayerConfig.GasOracleConfig.L1BlobBaseFeeLimit == 0 {
|
||||
log.Crit("gas-oracle `l1_base_fee_limit` and `l1_blob_base_fee_limit` configs must be set")
|
||||
}
|
||||
|
||||
l1watcher := watcher.NewL1WatcherClient(ctx.Context, l1client, cfg.L1Config.StartHeight, db, registry)
|
||||
// Init watcher and relayer
|
||||
l1watcher := watcher.NewL1WatcherClient(ctx.Context, l1RpcClient, cfg.L1Config.StartHeight, db, registry)
|
||||
|
||||
l1relayer, err := relayer.NewLayer1Relayer(ctx.Context, db, cfg.L1Config.RelayerConfig, relayer.ServiceTypeL1GasOracle, registry)
|
||||
if err != nil {
|
||||
log.Crit("failed to create new l1 relayer", "config file", cfgFile, "error", err)
|
||||
}
|
||||
|
||||
// Start l1 watcher process
|
||||
go utils.LoopWithContext(subCtx, 10*time.Second, func(ctx context.Context) {
|
||||
// Fetch the latest block number to decrease the delay when fetching gas prices
|
||||
|
||||
@@ -21,7 +21,9 @@
|
||||
"check_committed_batches_window_minutes": 5,
|
||||
"l1_base_fee_default": 15000000000,
|
||||
"l1_blob_base_fee_default": 1,
|
||||
"l1_blob_base_fee_threshold": 0
|
||||
"l1_blob_base_fee_threshold": 0,
|
||||
"l1_base_fee_limit": 20000000000,
|
||||
"l1_blob_base_fee_limit": 20000000000
|
||||
},
|
||||
"gas_oracle_sender_signer_config": {
|
||||
"signer_type": "PrivateKey",
|
||||
@@ -56,7 +58,8 @@
|
||||
"min_batches": 1,
|
||||
"max_batches": 6,
|
||||
"timeout": 7200,
|
||||
"backlog_max": 75
|
||||
"backlog_max": 75,
|
||||
"blob_fee_tolerance": 500000000
|
||||
},
|
||||
"gas_oracle_config": {
|
||||
"min_gas_price": 0,
|
||||
|
||||
@@ -15,8 +15,8 @@ require (
|
||||
github.com/holiman/uint256 v1.3.2
|
||||
github.com/mitchellh/mapstructure v1.5.0
|
||||
github.com/prometheus/client_golang v1.16.0
|
||||
github.com/scroll-tech/da-codec v0.9.0
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20251117065849-b5c3dd1aa975
|
||||
github.com/scroll-tech/da-codec v0.10.0
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20251128092113-8629f088d78f
|
||||
github.com/smartystreets/goconvey v1.8.0
|
||||
github.com/spf13/viper v1.19.0
|
||||
github.com/stretchr/testify v1.10.0
|
||||
@@ -51,7 +51,7 @@ require (
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.3 // indirect
|
||||
github.com/crate-crypto/go-eth-kzg v1.4.0 // indirect
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||
github.com/deckarep/golang-set v0.0.0-20180603214616-504e848d77ea // indirect
|
||||
github.com/deckarep/golang-set v1.8.0 // indirect
|
||||
github.com/edsrzf/mmap-go v1.0.0 // indirect
|
||||
github.com/ethereum/c-kzg-4844/v2 v2.1.5 // indirect
|
||||
github.com/fjl/memsize v0.0.2 // indirect
|
||||
|
||||
@@ -88,8 +88,8 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/deckarep/golang-set v0.0.0-20180603214616-504e848d77ea h1:j4317fAZh7X6GqbFowYdYdI0L9bwxL07jyPZIdepyZ0=
|
||||
github.com/deckarep/golang-set v0.0.0-20180603214616-504e848d77ea/go.mod h1:93vsz/8Wt4joVM7c2AVqh+YRMiUSc14yDtF28KmMOgQ=
|
||||
github.com/deckarep/golang-set v1.8.0 h1:sk9/l/KqpunDwP7pSjUg0keiOOLEnOBHzykLrsPppp4=
|
||||
github.com/deckarep/golang-set v1.8.0/go.mod h1:5nI87KwE7wgsBU1F4GKAw2Qod7p5kyS383rP6+o6qqo=
|
||||
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
|
||||
github.com/edsrzf/mmap-go v1.0.0 h1:CEBF7HpRnUCSJgGUb5h1Gm7e3VkmVDrR8lvWVLtrOFw=
|
||||
github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M=
|
||||
@@ -287,10 +287,10 @@ github.com/sagikazarmark/locafero v0.4.0 h1:HApY1R9zGo4DBgr7dqsTH/JJxLTTsOt7u6ke
|
||||
github.com/sagikazarmark/locafero v0.4.0/go.mod h1:Pe1W6UlPYUk/+wc/6KFhbORCfqzgYEpgQ3O5fPuL3H4=
|
||||
github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE=
|
||||
github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ=
|
||||
github.com/scroll-tech/da-codec v0.9.0 h1:UvHNdSJuVhi/j9pOH7SXkRck52/zeE8T35Fx6qTPW70=
|
||||
github.com/scroll-tech/da-codec v0.9.0/go.mod h1:w+vwIvNiWdiNHVE9yIY2Klx6G4s+SQhEJVSmVG/IsEQ=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20251117065849-b5c3dd1aa975 h1:KluZffkRRJ4K9UyvH/r2g8Lp16/NSK8j26MR33hHmoQ=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20251117065849-b5c3dd1aa975/go.mod h1:6BVek7YliYh+YeHOSjguPw9GT9BhVBfThArxzVlpqdQ=
|
||||
github.com/scroll-tech/da-codec v0.10.0 h1:IPHxyTyXTWPV0Q+DZ08cod2fWkhUvrfysmj/VBpB+WU=
|
||||
github.com/scroll-tech/da-codec v0.10.0/go.mod h1:MBlIP4wCXPcUDZ/Ci2B7n/2IbVU1WBo9OTFTZ5ffE0U=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20251128092113-8629f088d78f h1:j6SjP98MoWFFX9TwB1/nFYEkayqHQsrtE66Ll2C+oT0=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20251128092113-8629f088d78f/go.mod h1:Aa/kD1XB+OV/7rRxMQrjcPCB4b0pKyLH0gsTrtuHi38=
|
||||
github.com/scroll-tech/zktrie v0.8.4 h1:UagmnZ4Z3ITCk+aUq9NQZJNAwnWl4gSxsLb2Nl7IgRE=
|
||||
github.com/scroll-tech/zktrie v0.8.4/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
|
||||
github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKlUeu/erjjvaPEYiI=
|
||||
|
||||
@@ -48,6 +48,10 @@ type BatchSubmission struct {
|
||||
TimeoutSec int64 `json:"timeout"`
|
||||
// The maximum number of pending batches to keep in the backlog.
|
||||
BacklogMax int64 `json:"backlog_max"`
|
||||
// BlobFeeTolerance is the absolute tolerance (in wei) added to the target blob fee.
|
||||
// If the current fee is below target + tolerance, we proceed with submission.
|
||||
// This prevents skipping submission when the price difference is negligible.
|
||||
BlobFeeTolerance uint64 `json:"blob_fee_tolerance"`
|
||||
}
|
||||
|
||||
// ChainMonitor this config is used to get batch status from chain_monitor API.
|
||||
@@ -109,6 +113,10 @@ type GasOracleConfig struct {
|
||||
L1BaseFeeDefault uint64 `json:"l1_base_fee_default"`
|
||||
L1BlobBaseFeeDefault uint64 `json:"l1_blob_base_fee_default"`
|
||||
|
||||
// Upper limit values for gas oracle updates
|
||||
L1BaseFeeLimit uint64 `json:"l1_base_fee_limit"`
|
||||
L1BlobBaseFeeLimit uint64 `json:"l1_blob_base_fee_limit"`
|
||||
|
||||
// L1BlobBaseFeeThreshold the threshold of L1 blob base fee to enter the default gas price mode
|
||||
L1BlobBaseFeeThreshold uint64 `json:"l1_blob_base_fee_threshold"`
|
||||
}
|
||||
|
||||
@@ -167,7 +167,7 @@ func (b *BlobUploader) constructBlobCodec(dbBatch *orm.Batch) (*kzg4844.Blob, er
|
||||
Chunks: chunks,
|
||||
}
|
||||
|
||||
case encoding.CodecV7, encoding.CodecV8, encoding.CodecV9:
|
||||
case encoding.CodecV7, encoding.CodecV8, encoding.CodecV9, encoding.CodecV10:
|
||||
encodingBatch = &encoding.Batch{
|
||||
Index: dbBatch.Index,
|
||||
ParentBatchHash: common.HexToHash(dbBatch.ParentBatchHash),
|
||||
|
||||
@@ -173,6 +173,18 @@ func (r *Layer1Relayer) ProcessGasPriceOracle() {
|
||||
} else if err != nil {
|
||||
return
|
||||
}
|
||||
// Cap base fee update at the configured upper limit
|
||||
if limit := r.cfg.GasOracleConfig.L1BaseFeeLimit; baseFee > limit {
|
||||
log.Error("L1 base fee exceed max limit, set to max limit", "baseFee", baseFee, "maxLimit", limit)
|
||||
r.metrics.rollupL1RelayerGasPriceOracleFeeOverLimitTotal.Inc()
|
||||
baseFee = limit
|
||||
}
|
||||
// Cap blob base fee update at the configured upper limit
|
||||
if limit := r.cfg.GasOracleConfig.L1BlobBaseFeeLimit; blobBaseFee > limit {
|
||||
log.Error("L1 blob base fee exceed max limit, set to max limit", "blobBaseFee", blobBaseFee, "maxLimit", limit)
|
||||
r.metrics.rollupL1RelayerGasPriceOracleFeeOverLimitTotal.Inc()
|
||||
blobBaseFee = limit
|
||||
}
|
||||
data, err := r.l1GasOracleABI.Pack("setL1BaseFeeAndBlobBaseFee", new(big.Int).SetUint64(baseFee), new(big.Int).SetUint64(blobBaseFee))
|
||||
if err != nil {
|
||||
log.Error("Failed to pack setL1BaseFeeAndBlobBaseFee", "block.Hash", block.Hash, "block.Height", block.Number, "block.BaseFee", baseFee, "block.BlobBaseFee", blobBaseFee, "err", err)
|
||||
|
||||
@@ -8,11 +8,12 @@ import (
|
||||
)
|
||||
|
||||
type l1RelayerMetrics struct {
|
||||
rollupL1RelayerGasPriceOraclerRunTotal prometheus.Counter
|
||||
rollupL1RelayerLatestBaseFee prometheus.Gauge
|
||||
rollupL1RelayerLatestBlobBaseFee prometheus.Gauge
|
||||
rollupL1UpdateGasOracleConfirmedTotal prometheus.Counter
|
||||
rollupL1UpdateGasOracleConfirmedFailedTotal prometheus.Counter
|
||||
rollupL1RelayerGasPriceOraclerRunTotal prometheus.Counter
|
||||
rollupL1RelayerLatestBaseFee prometheus.Gauge
|
||||
rollupL1RelayerLatestBlobBaseFee prometheus.Gauge
|
||||
rollupL1UpdateGasOracleConfirmedTotal prometheus.Counter
|
||||
rollupL1UpdateGasOracleConfirmedFailedTotal prometheus.Counter
|
||||
rollupL1RelayerGasPriceOracleFeeOverLimitTotal prometheus.Counter
|
||||
}
|
||||
|
||||
var (
|
||||
@@ -43,6 +44,10 @@ func initL1RelayerMetrics(reg prometheus.Registerer) *l1RelayerMetrics {
|
||||
Name: "rollup_layer1_update_gas_oracle_confirmed_failed_total",
|
||||
Help: "The total number of updating layer1 gas oracle confirmed failed",
|
||||
}),
|
||||
rollupL1RelayerGasPriceOracleFeeOverLimitTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "rollup_layer1_gas_price_oracle_fee_over_limit_total",
|
||||
Help: "The total number of times when a gas price oracle fee update went over the configured limit",
|
||||
}),
|
||||
}
|
||||
})
|
||||
return l1RelayerMetric
|
||||
|
||||
@@ -452,6 +452,7 @@ func (r *Layer2Relayer) ProcessPendingBatches() {
|
||||
// The next call of ProcessPendingBatches will then start with the batch with the different codec version.
|
||||
batchesToSubmitLen := len(batchesToSubmit)
|
||||
if batchesToSubmitLen > 0 && batchesToSubmit[batchesToSubmitLen-1].Batch.CodecVersion != dbBatch.CodecVersion {
|
||||
forceSubmit = true
|
||||
break
|
||||
}
|
||||
|
||||
@@ -488,7 +489,7 @@ func (r *Layer2Relayer) ProcessPendingBatches() {
|
||||
|
||||
codecVersion := encoding.CodecVersion(firstBatch.CodecVersion)
|
||||
switch codecVersion {
|
||||
case encoding.CodecV7, encoding.CodecV8, encoding.CodecV9:
|
||||
case encoding.CodecV7, encoding.CodecV8, encoding.CodecV9, encoding.CodecV10:
|
||||
if r.cfg.ValidiumMode {
|
||||
if len(batchesToSubmit) != 1 {
|
||||
log.Error("validium mode only supports committing one batch at a time", "codecVersion", codecVersion, "start index", firstBatch.Index, "end index", lastBatch.Index, "batches count", len(batchesToSubmit))
|
||||
@@ -747,7 +748,7 @@ func (r *Layer2Relayer) finalizeBundle(bundle *orm.Bundle, withProof bool) error
|
||||
|
||||
var calldata []byte
|
||||
switch encoding.CodecVersion(bundle.CodecVersion) {
|
||||
case encoding.CodecV7, encoding.CodecV8, encoding.CodecV9:
|
||||
case encoding.CodecV7, encoding.CodecV8, encoding.CodecV9, encoding.CodecV10:
|
||||
if r.cfg.ValidiumMode {
|
||||
calldata, err = r.constructFinalizeBundlePayloadValidium(dbBatch, endChunk, aggProof)
|
||||
if err != nil {
|
||||
@@ -1050,7 +1051,7 @@ func (r *Layer2Relayer) constructCommitBatchPayloadValidium(batch *dbBatchWithCh
|
||||
commitment := common.HexToHash(lastChunk.EndBlockHash)
|
||||
|
||||
var version uint8
|
||||
if encoding.CodecVersion(batch.Batch.CodecVersion) == encoding.CodecV8 || encoding.CodecVersion(batch.Batch.CodecVersion) == encoding.CodecV9 {
|
||||
if encoding.CodecVersion(batch.Batch.CodecVersion) == encoding.CodecV8 || encoding.CodecVersion(batch.Batch.CodecVersion) == encoding.CodecV9 || encoding.CodecVersion(batch.Batch.CodecVersion) == encoding.CodecV10 {
|
||||
// Validium version line starts with v1,
|
||||
// but rollup-relayer behavior follows v8.
|
||||
version = 1
|
||||
@@ -1254,16 +1255,20 @@ func (r *Layer2Relayer) skipSubmitByFee(oldest time.Time, metrics *l2RelayerMetr
|
||||
target := calculateTargetPrice(windowSec, r.batchStrategy, oldest, hist)
|
||||
current := hist[len(hist)-1]
|
||||
|
||||
// apply absolute tolerance offset to target
|
||||
tolerance := new(big.Int).SetUint64(r.cfg.BatchSubmission.BlobFeeTolerance)
|
||||
threshold := new(big.Int).Add(target, tolerance)
|
||||
|
||||
currentFloat, _ := current.Float64()
|
||||
targetFloat, _ := target.Float64()
|
||||
metrics.rollupL2RelayerCurrentBlobPrice.Set(currentFloat)
|
||||
metrics.rollupL2RelayerTargetBlobPrice.Set(targetFloat)
|
||||
|
||||
// if current fee > target and still inside the timeout window, skip
|
||||
if current.Cmp(target) > 0 && time.Since(oldest) < time.Duration(windowSec)*time.Second {
|
||||
// if current fee > threshold (target + tolerance) and still inside the timeout window, skip
|
||||
if current.Cmp(threshold) > 0 && time.Since(oldest) < time.Duration(windowSec)*time.Second {
|
||||
return true, fmt.Errorf(
|
||||
"blob-fee above target & window not yet passed; current=%s target=%s age=%s",
|
||||
current.String(), target.String(), time.Since(oldest),
|
||||
"blob-fee above threshold & window not yet passed; current=%s target=%s threshold=%s tolerance=%s age=%s",
|
||||
current.String(), target.String(), threshold.String(), tolerance.String(), time.Since(oldest),
|
||||
)
|
||||
}
|
||||
|
||||
|
||||
@@ -2,6 +2,7 @@ package sender
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/big"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum"
|
||||
@@ -118,7 +119,7 @@ func (s *Sender) estimateGasLimit(to *common.Address, data []byte, sidecar *type
|
||||
|
||||
gasLimitWithoutAccessList, err := s.client.EstimateGas(s.ctx, msg)
|
||||
if err != nil {
|
||||
log.Error("estimateGasLimit EstimateGas failure without access list", "error", err)
|
||||
log.Error("estimateGasLimit EstimateGas failure without access list", "error", err, "msg", fmt.Sprintf("%+v", msg))
|
||||
return 0, nil, err
|
||||
}
|
||||
|
||||
|
||||
@@ -13,7 +13,7 @@ import (
|
||||
"github.com/holiman/uint256"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/consensus/misc"
|
||||
"github.com/scroll-tech/go-ethereum/common/hexutil"
|
||||
gethTypes "github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/scroll-tech/go-ethereum/crypto/kzg4844"
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
@@ -67,7 +67,8 @@ type FeeData struct {
|
||||
// Sender Transaction sender to send transaction to l1/l2
|
||||
type Sender struct {
|
||||
config *config.SenderConfig
|
||||
gethClient *gethclient.Client
|
||||
rpcClient *rpc.Client // Raw RPC client
|
||||
gethClient *gethclient.Client // Client to use for CreateAccessList
|
||||
client *ethclient.Client // The client to retrieve on chain data (read-only)
|
||||
writeClients []*ethclient.Client // The clients to send transactions to (write operations)
|
||||
transactionSigner *TransactionSigner
|
||||
@@ -141,6 +142,7 @@ func NewSender(ctx context.Context, config *config.SenderConfig, signerConfig *c
|
||||
sender := &Sender{
|
||||
ctx: ctx,
|
||||
config: config,
|
||||
rpcClient: rpcClient,
|
||||
gethClient: gethclient.New(rpcClient),
|
||||
client: client,
|
||||
writeClients: writeClients,
|
||||
@@ -841,8 +843,19 @@ func (s *Sender) getBlockNumberAndTimestampAndBaseFeeAndBlobFee(ctx context.Cont
|
||||
|
||||
var blobBaseFee uint64
|
||||
if excess := header.ExcessBlobGas; excess != nil {
|
||||
blobBaseFee = misc.CalcBlobFee(*excess).Uint64()
|
||||
// Leave it up to the L1 node to compute the correct blob base fee.
|
||||
// Previously we would compute it locally using `CalcBlobFee`, but
|
||||
// that approach requires syncing any future L1 configuration changes.
|
||||
// Note: The fetched blob base fee might not correspond to the block
|
||||
// that we fetched in the previous step, but this is acceptable.
|
||||
var blobBaseFeeHex hexutil.Big
|
||||
if err := s.rpcClient.CallContext(ctx, &blobBaseFeeHex, "eth_blobBaseFee"); err != nil {
|
||||
return 0, 0, 0, 0, fmt.Errorf("failed to call eth_blobBaseFee, err: %w", err)
|
||||
}
|
||||
// A correct L1 node could not return a value that overflows uint64
|
||||
blobBaseFee = blobBaseFeeHex.ToInt().Uint64()
|
||||
}
|
||||
|
||||
// header.Number.Uint64() returns the pendingBlockNumber, so we minus 1 to get the latestBlockNumber.
|
||||
return header.Number.Uint64() - 1, header.Time, baseFee, blobBaseFee, nil
|
||||
}
|
||||
|
||||
@@ -21,6 +21,7 @@ import (
|
||||
gethTypes "github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/scroll-tech/go-ethereum/crypto"
|
||||
"github.com/scroll-tech/go-ethereum/crypto/kzg4844"
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"github.com/scroll-tech/go-ethereum/rpc"
|
||||
"github.com/stretchr/testify/assert"
|
||||
@@ -94,8 +95,9 @@ func setupEnv(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, migrate.ResetDB(sqlDB))
|
||||
|
||||
l1Client, err := testApps.GetPoSL1Client()
|
||||
l1RawClient, err := testApps.GetPoSL1Client()
|
||||
assert.NoError(t, err)
|
||||
l1Client := ethclient.NewClient(l1RawClient)
|
||||
|
||||
chainID, err := l1Client.ChainID(context.Background())
|
||||
assert.NoError(t, err)
|
||||
|
||||
@@ -245,11 +245,13 @@ func (p *ChunkProposer) ProposeChunk() error {
|
||||
// Ensure all blocks in the same chunk use the same hardfork name
|
||||
// If a different hardfork name is found, truncate the blocks slice at that point
|
||||
hardforkName := encoding.GetHardforkName(p.chainCfg, blocks[0].Header.Number.Uint64(), blocks[0].Header.Time)
|
||||
hardforkBoundary := false
|
||||
for i := 1; i < len(blocks); i++ {
|
||||
currentHardfork := encoding.GetHardforkName(p.chainCfg, blocks[i].Header.Number.Uint64(), blocks[i].Header.Time)
|
||||
if currentHardfork != hardforkName {
|
||||
blocks = blocks[:i]
|
||||
// Truncate blocks at hardfork boundary
|
||||
blocks = blocks[:i]
|
||||
hardforkBoundary = true
|
||||
break
|
||||
}
|
||||
}
|
||||
@@ -321,6 +323,19 @@ func (p *ChunkProposer) ProposeChunk() error {
|
||||
return fmt.Errorf("failed to calculate chunk metrics: %w", calcErr)
|
||||
}
|
||||
|
||||
// No breaking condition met, but hardfork boundary reached
|
||||
if hardforkBoundary {
|
||||
log.Info("hardfork boundary reached, proposing chunk",
|
||||
"block count", len(chunk.Blocks),
|
||||
"codec version", codecVersion,
|
||||
"start block number", chunk.Blocks[0].Header.Number,
|
||||
"end block number", chunk.Blocks[len(chunk.Blocks)-1].Header.Number)
|
||||
|
||||
p.recordAllChunkMetrics(metrics)
|
||||
return p.updateDBChunkInfo(&chunk, codecVersion, metrics)
|
||||
}
|
||||
|
||||
// No breaking condition met, check for timeout
|
||||
currentTimeSec := uint64(time.Now().Unix())
|
||||
if metrics.FirstBlockTimestamp+p.cfg.ChunkTimeoutSec < currentTimeSec {
|
||||
log.Info("first block timeout reached",
|
||||
|
||||
@@ -19,6 +19,8 @@ import (
|
||||
"scroll-tech/rollup/internal/utils"
|
||||
)
|
||||
|
||||
func newUint64(val uint64) *uint64 { return &val }
|
||||
|
||||
func testChunkProposerLimitsCodecV7(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
@@ -26,6 +28,7 @@ func testChunkProposerLimitsCodecV7(t *testing.T) {
|
||||
chunkTimeoutSec uint64
|
||||
expectedChunksLen int
|
||||
expectedBlocksInFirstChunk int // only be checked when expectedChunksLen > 0
|
||||
GalileoTime *uint64
|
||||
}{
|
||||
{
|
||||
name: "NoLimitReached",
|
||||
@@ -62,6 +65,14 @@ func testChunkProposerLimitsCodecV7(t *testing.T) {
|
||||
expectedChunksLen: 1,
|
||||
expectedBlocksInFirstChunk: 1,
|
||||
},
|
||||
{
|
||||
name: "SingleBlockByForkBoundary",
|
||||
maxL2Gas: 20_000_000,
|
||||
chunkTimeoutSec: 1000000000000,
|
||||
expectedChunksLen: 1,
|
||||
expectedBlocksInFirstChunk: 1,
|
||||
GalileoTime: newUint64(1669364525), // timestamp of `block2`
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
@@ -78,11 +89,26 @@ func testChunkProposerLimitsCodecV7(t *testing.T) {
|
||||
_, err = chunkOrm.InsertChunk(context.Background(), &encoding.Chunk{Blocks: []*encoding.Block{{Header: &gethTypes.Header{Number: big.NewInt(0)}}}}, encoding.CodecV0, utils.ChunkMetrics{})
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Initialize the chunk proposer.
|
||||
chainConfig := ¶ms.ChainConfig{
|
||||
LondonBlock: big.NewInt(0),
|
||||
BernoulliBlock: big.NewInt(0),
|
||||
CurieBlock: big.NewInt(0),
|
||||
DarwinTime: new(uint64),
|
||||
DarwinV2Time: new(uint64),
|
||||
EuclidTime: new(uint64),
|
||||
EuclidV2Time: new(uint64),
|
||||
FeynmanTime: new(uint64),
|
||||
GalileoTime: tt.GalileoTime,
|
||||
}
|
||||
|
||||
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
|
||||
MaxL2GasPerChunk: tt.maxL2Gas,
|
||||
ChunkTimeoutSec: tt.chunkTimeoutSec,
|
||||
MaxUncompressedBatchBytesSize: math.MaxUint64,
|
||||
}, encoding.CodecV7, ¶ms.ChainConfig{LondonBlock: big.NewInt(0), BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), DarwinTime: new(uint64), DarwinV2Time: new(uint64), EuclidTime: new(uint64), EuclidV2Time: new(uint64)}, db, nil)
|
||||
}, encoding.CodecV7, chainConfig, db, nil)
|
||||
|
||||
// Run one round of chunk proposing.
|
||||
cp.TryProposeChunk()
|
||||
|
||||
chunks, err := chunkOrm.GetChunksGEIndex(context.Background(), 1, 0)
|
||||
|
||||
@@ -3,13 +3,15 @@ package watcher
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/big"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/scroll-tech/go-ethereum/consensus/misc"
|
||||
"github.com/scroll-tech/go-ethereum/common/hexutil"
|
||||
gethTypes "github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"github.com/scroll-tech/go-ethereum/rpc"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/common/types"
|
||||
@@ -20,7 +22,8 @@ import (
|
||||
// L1WatcherClient will listen for smart contract events from Eth L1.
|
||||
type L1WatcherClient struct {
|
||||
ctx context.Context
|
||||
client *ethclient.Client
|
||||
rpcClient *rpc.Client // Raw RPC client
|
||||
client *ethclient.Client // Go SDK RPC client
|
||||
l1BlockOrm *orm.L1Block
|
||||
|
||||
// The height of the block that the watcher has retrieved header rlp
|
||||
@@ -30,7 +33,7 @@ type L1WatcherClient struct {
|
||||
}
|
||||
|
||||
// NewL1WatcherClient returns a new instance of L1WatcherClient.
|
||||
func NewL1WatcherClient(ctx context.Context, client *ethclient.Client, startHeight uint64, db *gorm.DB, reg prometheus.Registerer) *L1WatcherClient {
|
||||
func NewL1WatcherClient(ctx context.Context, rpcClient *rpc.Client, startHeight uint64, db *gorm.DB, reg prometheus.Registerer) *L1WatcherClient {
|
||||
l1BlockOrm := orm.NewL1Block(db)
|
||||
savedL1BlockHeight, err := l1BlockOrm.GetLatestL1BlockHeight(ctx)
|
||||
if err != nil {
|
||||
@@ -43,7 +46,8 @@ func NewL1WatcherClient(ctx context.Context, client *ethclient.Client, startHeig
|
||||
|
||||
return &L1WatcherClient{
|
||||
ctx: ctx,
|
||||
client: client,
|
||||
rpcClient: rpcClient,
|
||||
client: ethclient.NewClient(rpcClient),
|
||||
l1BlockOrm: l1BlockOrm,
|
||||
|
||||
processedBlockHeight: savedL1BlockHeight,
|
||||
@@ -80,7 +84,17 @@ func (w *L1WatcherClient) FetchBlockHeader(blockHeight uint64) error {
|
||||
|
||||
var blobBaseFee uint64
|
||||
if excess := block.ExcessBlobGas; excess != nil {
|
||||
blobBaseFee = misc.CalcBlobFee(*excess).Uint64()
|
||||
// Leave it up to the L1 node to compute the correct blob base fee.
|
||||
// Previously we would compute it locally using `CalcBlobFee`, but
|
||||
// that approach requires syncing any future L1 configuration changes.
|
||||
// Note: The fetched blob base fee might not correspond to the block
|
||||
// that we fetched in the previous step, but this is acceptable.
|
||||
var blobBaseFeeHex hexutil.Big
|
||||
if err := w.rpcClient.CallContext(w.ctx, &blobBaseFeeHex, "eth_blobBaseFee"); err != nil {
|
||||
return fmt.Errorf("failed to call eth_blobBaseFee, err: %w", err)
|
||||
}
|
||||
// A correct L1 node could not return a value that overflows uint64
|
||||
blobBaseFee = blobBaseFeeHex.ToInt().Uint64()
|
||||
}
|
||||
|
||||
l1Block := orm.L1Block{
|
||||
|
||||
@@ -21,10 +21,10 @@ import (
|
||||
|
||||
func setupL1Watcher(t *testing.T) (*L1WatcherClient, *gorm.DB) {
|
||||
db := setupDB(t)
|
||||
client, err := testApps.GetPoSL1Client()
|
||||
l1RawClient, err := testApps.GetPoSL1Client()
|
||||
assert.NoError(t, err)
|
||||
l1Cfg := cfg.L1Config
|
||||
watcher := NewL1WatcherClient(context.Background(), client, l1Cfg.StartHeight, db, nil)
|
||||
watcher := NewL1WatcherClient(context.Background(), l1RawClient, l1Cfg.StartHeight, db, nil)
|
||||
return watcher, db
|
||||
}
|
||||
|
||||
|
||||
@@ -186,7 +186,7 @@ func encodeBatchHeaderValidium(b *encoding.Batch, codecVersion encoding.CodecVer
|
||||
)
|
||||
|
||||
var version uint8
|
||||
if codecVersion == encoding.CodecV8 || codecVersion == encoding.CodecV9 {
|
||||
if codecVersion == encoding.CodecV8 || codecVersion == encoding.CodecV9 || codecVersion == encoding.CodecV10 {
|
||||
// Validium version line starts with v1,
|
||||
// but rollup-relayer behavior follows v8.
|
||||
version = 1
|
||||
|
||||
@@ -19,6 +19,7 @@ import (
|
||||
"github.com/scroll-tech/go-ethereum/crypto"
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"github.com/scroll-tech/go-ethereum/rpc"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"gorm.io/gorm"
|
||||
|
||||
@@ -37,8 +38,9 @@ var (
|
||||
rollupApp *bcmd.MockApp
|
||||
|
||||
// clients
|
||||
l1Client *ethclient.Client
|
||||
l2Client *ethclient.Client
|
||||
l1RawClient *rpc.Client
|
||||
l1Client *ethclient.Client
|
||||
l2Client *ethclient.Client
|
||||
|
||||
l1Auth *bind.TransactOpts
|
||||
l2Auth *bind.TransactOpts
|
||||
@@ -91,8 +93,9 @@ func setupEnv(t *testing.T) {
|
||||
assert.NoError(t, testApps.StartPoSL1Container())
|
||||
rollupApp = bcmd.NewRollupApp(testApps, "../conf/config.json")
|
||||
|
||||
l1Client, err = testApps.GetPoSL1Client()
|
||||
l1RawClient, err = testApps.GetPoSL1Client()
|
||||
assert.NoError(t, err)
|
||||
l1Client = ethclient.NewClient(l1RawClient)
|
||||
l2Client, err = testApps.GetL2GethClient()
|
||||
assert.NoError(t, err)
|
||||
l1GethChainID, err = l1Client.ChainID(context.Background())
|
||||
|
||||
@@ -36,7 +36,7 @@ func testImportL1GasPrice(t *testing.T) {
|
||||
// Create L1Watcher
|
||||
startHeight, err := l1Client.BlockNumber(context.Background())
|
||||
assert.NoError(t, err)
|
||||
l1Watcher := watcher.NewL1WatcherClient(context.Background(), l1Client, startHeight-1, db, nil)
|
||||
l1Watcher := watcher.NewL1WatcherClient(context.Background(), l1RawClient, startHeight-1, db, nil)
|
||||
|
||||
// fetch new blocks
|
||||
number, err := l1Client.BlockNumber(context.Background())
|
||||
@@ -110,7 +110,7 @@ func testImportDefaultL1GasPriceDueToL1GasPriceSpike(t *testing.T) {
|
||||
// Create L1Watcher
|
||||
startHeight, err := l1Client.BlockNumber(context.Background())
|
||||
assert.NoError(t, err)
|
||||
l1Watcher := watcher.NewL1WatcherClient(context.Background(), l1Client, startHeight-2, db, nil)
|
||||
l1Watcher := watcher.NewL1WatcherClient(context.Background(), l1RawClient, startHeight-2, db, nil)
|
||||
|
||||
// fetch new blocks
|
||||
number, err := l1Client.BlockNumber(context.Background())
|
||||
|
||||
84
rollup/tests/integration_tool/block_fetching.go
Normal file
84
rollup/tests/integration_tool/block_fetching.go
Normal file
@@ -0,0 +1,84 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math/big"
|
||||
|
||||
"github.com/scroll-tech/da-codec/encoding"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"github.com/scroll-tech/go-ethereum/rpc"
|
||||
)
|
||||
|
||||
func fetchAndStoreBlocks(ctx context.Context, from, to uint64) ([]*encoding.Block, error) {
|
||||
validiumMode := cfg.ValidiumMode
|
||||
cfg := cfg.FetchConfig
|
||||
client, err := rpc.Dial(cfg.Endpoint)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to connect l2 geth, endpoint %s, err %v", cfg.Endpoint, err)
|
||||
}
|
||||
defer client.Close()
|
||||
|
||||
ethCli := ethclient.NewClient(client)
|
||||
var blocks []*encoding.Block
|
||||
for number := from; number <= to; number++ {
|
||||
log.Debug("retrieving block", "height", number)
|
||||
block, err := ethCli.BlockByNumber(ctx, new(big.Int).SetUint64(number))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to BlockByNumber: %v. number: %v", err, number)
|
||||
}
|
||||
|
||||
blockTxs := block.Transactions()
|
||||
|
||||
var count int
|
||||
for _, tx := range blockTxs {
|
||||
if tx.IsL1MessageTx() {
|
||||
count++
|
||||
}
|
||||
}
|
||||
log.Info("retrieved block", "height", block.Header().Number, "hash", block.Header().Hash().String(), "L1 message count", count)
|
||||
|
||||
// use original (encrypted) L1 message txs in validium mode
|
||||
if validiumMode {
|
||||
var txs []*types.Transaction
|
||||
|
||||
if count > 0 {
|
||||
log.Info("Fetching encrypted messages in validium mode")
|
||||
err = client.CallContext(ctx, &txs, "scroll_getL1MessagesInBlock", block.Hash(), "synced")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get L1 messages: %v, block hash: %v", err, block.Hash().Hex())
|
||||
}
|
||||
}
|
||||
|
||||
// sanity check
|
||||
if len(txs) != count {
|
||||
return nil, fmt.Errorf("L1 message count mismatch: expected %d, got %d", count, len(txs))
|
||||
}
|
||||
|
||||
for ii := 0; ii < count; ii++ {
|
||||
// sanity check
|
||||
if blockTxs[ii].AsL1MessageTx().QueueIndex != txs[ii].AsL1MessageTx().QueueIndex {
|
||||
return nil, fmt.Errorf("L1 message queue index mismatch at index %d: expected %d, got %d", ii, blockTxs[ii].AsL1MessageTx().QueueIndex, txs[ii].AsL1MessageTx().QueueIndex)
|
||||
}
|
||||
|
||||
log.Info("Replacing L1 message tx in validium mode", "index", ii, "queueIndex", txs[ii].AsL1MessageTx().QueueIndex, "decryptedTxHash", blockTxs[ii].Hash().Hex(), "originalTxHash", txs[ii].Hash().Hex())
|
||||
blockTxs[ii] = txs[ii]
|
||||
}
|
||||
}
|
||||
|
||||
withdrawRoot, err3 := ethCli.StorageAt(ctx, cfg.L2MessageQueueAddress, cfg.WithdrawTrieRootSlot, big.NewInt(int64(number)))
|
||||
if err3 != nil {
|
||||
return nil, fmt.Errorf("failed to get withdrawRoot: %v. number: %v", err3, number)
|
||||
}
|
||||
blocks = append(blocks, &encoding.Block{
|
||||
Header: block.Header(),
|
||||
Transactions: encoding.TxsToTxsData(blockTxs),
|
||||
WithdrawRoot: common.BytesToHash(withdrawRoot),
|
||||
})
|
||||
}
|
||||
|
||||
return blocks, nil
|
||||
}
|
||||
@@ -42,13 +42,21 @@ func randomPickKfromN(n, k int, rng *rand.Rand) []int {
|
||||
return ret
|
||||
}
|
||||
|
||||
func importData(ctx context.Context, beginBlk, endBlk uint64, chkNum, batchNum, bundleNum int, seed int64) (*importRecord, error) {
|
||||
func importData(ctx context.Context, beginBlk, endBlk uint64, blocks []*encoding.Block, chkNum, batchNum, bundleNum int, seed int64) (*importRecord, error) {
|
||||
|
||||
db, err := database.InitDB(cfg.DBConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(blocks) > 0 {
|
||||
log.Info("import block")
|
||||
blockOrm := orm.NewL2Block(db)
|
||||
if err := blockOrm.InsertL2Blocks(ctx, blocks); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
ret := &importRecord{}
|
||||
// Create a new random source with the provided seed
|
||||
source := rand.NewSource(seed)
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/scroll-tech/da-codec/encoding"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"github.com/urfave/cli/v2"
|
||||
|
||||
@@ -40,12 +41,6 @@ var seedFlag = cli.Int64Flag{
|
||||
Value: 0,
|
||||
}
|
||||
|
||||
var codecFlag = cli.IntFlag{
|
||||
Name: "codec",
|
||||
Usage: "codec version, valid from 6, default(auto) is 0",
|
||||
Value: 0,
|
||||
}
|
||||
|
||||
func parseThreeIntegers(value string) (int, int, int, error) {
|
||||
// Split the input string by comma
|
||||
parts := strings.Split(value, ",")
|
||||
@@ -84,10 +79,21 @@ func parseThreeIntegers(value string) (int, int, int, error) {
|
||||
return values[0], values[1], values[2], nil
|
||||
}
|
||||
|
||||
type fetchConfig struct {
|
||||
// node url.
|
||||
Endpoint string `json:"endpoint"`
|
||||
// The L2MessageQueue contract address deployed on layer 2 chain.
|
||||
L2MessageQueueAddress common.Address `json:"l2_message_queue_address"`
|
||||
// The WithdrawTrieRootSlot in L2MessageQueue contract.
|
||||
WithdrawTrieRootSlot common.Hash `json:"withdraw_trie_root_slot,omitempty"`
|
||||
}
|
||||
|
||||
// load a comptabile type of config for rollup
|
||||
type config struct {
|
||||
DBConfig *database.Config `json:"db_config"`
|
||||
FetchConfig *fetchConfig `json:"fetch_config,omitempty"`
|
||||
ValidiumMode bool `json:"validium_mode"`
|
||||
CodecVersion int `json:"codec_version"`
|
||||
}
|
||||
|
||||
func init() {
|
||||
@@ -97,7 +103,7 @@ func init() {
|
||||
app.Name = "integration-test-tool"
|
||||
app.Usage = "The Scroll L2 Integration Test Tool"
|
||||
app.Version = version.Version
|
||||
app.Flags = append(app.Flags, &codecFlag, &seedFlag, &outputNumFlag, &outputPathFlag)
|
||||
app.Flags = append(app.Flags, &seedFlag, &outputNumFlag, &outputPathFlag)
|
||||
app.Flags = append(app.Flags, utils.CommonFlags...)
|
||||
app.Before = func(ctx *cli.Context) error {
|
||||
if err := utils.LogSetup(ctx); err != nil {
|
||||
@@ -120,13 +126,13 @@ func newConfig(file string) (*config, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cfg := &config{}
|
||||
err = json.Unmarshal(buf, cfg)
|
||||
loadCfg := &config{}
|
||||
err = json.Unmarshal(buf, loadCfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return cfg, nil
|
||||
return loadCfg, nil
|
||||
}
|
||||
|
||||
func action(ctx *cli.Context) error {
|
||||
@@ -135,9 +141,8 @@ func action(ctx *cli.Context) error {
|
||||
return fmt.Errorf("specify begin and end block number")
|
||||
}
|
||||
|
||||
codecFl := ctx.Int(codecFlag.Name)
|
||||
if codecFl != 0 {
|
||||
switch codecFl {
|
||||
if cfg.CodecVersion != 0 {
|
||||
switch cfg.CodecVersion {
|
||||
case 6:
|
||||
codecCfg = encoding.CodecV6
|
||||
case 7:
|
||||
@@ -146,8 +151,10 @@ func action(ctx *cli.Context) error {
|
||||
codecCfg = encoding.CodecV8
|
||||
case 9:
|
||||
codecCfg = encoding.CodecV9
|
||||
case 10:
|
||||
codecCfg = encoding.CodecV10
|
||||
default:
|
||||
return fmt.Errorf("invalid codec version %d", codecFl)
|
||||
return fmt.Errorf("invalid codec version %d", cfg.CodecVersion)
|
||||
}
|
||||
log.Info("set codec", "version", codecCfg)
|
||||
}
|
||||
@@ -161,6 +168,14 @@ func action(ctx *cli.Context) error {
|
||||
return fmt.Errorf("invalid begin block number: %w", err)
|
||||
}
|
||||
|
||||
var import_blocks []*encoding.Block
|
||||
if cfg.FetchConfig != nil {
|
||||
import_blocks, err = fetchAndStoreBlocks(ctx.Context, beginBlk, endBlk)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
chkNum, batchNum, bundleNum, err := parseThreeIntegers(ctx.String(outputNumFlag.Name))
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -174,7 +189,7 @@ func action(ctx *cli.Context) error {
|
||||
|
||||
outputPath := ctx.String(outputPathFlag.Name)
|
||||
log.Info("output", "Seed", seed, "file", outputPath)
|
||||
ret, err := importData(ctx.Context, beginBlk, endBlk, chkNum, batchNum, bundleNum, seed)
|
||||
ret, err := importData(ctx.Context, beginBlk, endBlk, import_blocks, chkNum, batchNum, bundleNum, seed)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -5,8 +5,8 @@ go 1.22
|
||||
toolchain go1.22.2
|
||||
|
||||
require (
|
||||
github.com/scroll-tech/da-codec v0.9.0
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20251117065849-b5c3dd1aa975
|
||||
github.com/scroll-tech/da-codec v0.10.0
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20251128092113-8629f088d78f
|
||||
github.com/stretchr/testify v1.10.0
|
||||
gorm.io/gorm v1.25.7-0.20240204074919-46816ad31dde
|
||||
)
|
||||
|
||||
@@ -93,10 +93,10 @@ github.com/rjeczalik/notify v0.9.1 h1:CLCKso/QK1snAlnhNR/CNvNiFU2saUtjV0bx3EwNeC
|
||||
github.com/rjeczalik/notify v0.9.1/go.mod h1:rKwnCoCGeuQnwBtTSPL9Dad03Vh2n40ePRrjvIXnJho=
|
||||
github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
|
||||
github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
|
||||
github.com/scroll-tech/da-codec v0.9.0 h1:UvHNdSJuVhi/j9pOH7SXkRck52/zeE8T35Fx6qTPW70=
|
||||
github.com/scroll-tech/da-codec v0.9.0/go.mod h1:w+vwIvNiWdiNHVE9yIY2Klx6G4s+SQhEJVSmVG/IsEQ=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20251117065849-b5c3dd1aa975 h1:KluZffkRRJ4K9UyvH/r2g8Lp16/NSK8j26MR33hHmoQ=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20251117065849-b5c3dd1aa975/go.mod h1:6BVek7YliYh+YeHOSjguPw9GT9BhVBfThArxzVlpqdQ=
|
||||
github.com/scroll-tech/da-codec v0.10.0 h1:IPHxyTyXTWPV0Q+DZ08cod2fWkhUvrfysmj/VBpB+WU=
|
||||
github.com/scroll-tech/da-codec v0.10.0/go.mod h1:MBlIP4wCXPcUDZ/Ci2B7n/2IbVU1WBo9OTFTZ5ffE0U=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20251128092113-8629f088d78f h1:j6SjP98MoWFFX9TwB1/nFYEkayqHQsrtE66Ll2C+oT0=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20251128092113-8629f088d78f/go.mod h1:Aa/kD1XB+OV/7rRxMQrjcPCB4b0pKyLH0gsTrtuHi38=
|
||||
github.com/scroll-tech/zktrie v0.8.4 h1:UagmnZ4Z3ITCk+aUq9NQZJNAwnWl4gSxsLb2Nl7IgRE=
|
||||
github.com/scroll-tech/zktrie v0.8.4/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
|
||||
github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKlUeu/erjjvaPEYiI=
|
||||
|
||||
@@ -9,6 +9,9 @@ ifndef END_BLOCK
|
||||
$(error END_BLOCK is not set. Define it in .make.env or pass END_BLOCK=<end_block>)
|
||||
endif
|
||||
|
||||
BLOCK_PRE_MIGRATIONS := $(wildcard conf/*.sql)
|
||||
.OPTIONAL: $(BLOCK_PRE_MIGRATIONS)
|
||||
|
||||
all: setup_db test_tool import_data
|
||||
|
||||
clean:
|
||||
@@ -25,6 +28,11 @@ check_vars: | conf
|
||||
exit 1; \
|
||||
fi
|
||||
|
||||
migration_blocks: $(BLOCK_PRE_MIGRATIONS)
|
||||
ifneq ($(strip $(BLOCK_PRE_MIGRATIONS)),)
|
||||
GOOSE_MIGRATION_DIR=conf ${GOOSE_CMD} up-to 100
|
||||
endif
|
||||
|
||||
setup_db: clean
|
||||
docker compose up --detach
|
||||
@echo "Waiting for PostgreSQL to be ready..."
|
||||
@@ -42,30 +50,18 @@ setup_db: clean
|
||||
fi; \
|
||||
done
|
||||
${GOOSE_CMD} up
|
||||
GOOSE_MIGRATION_DIR=conf ${GOOSE_CMD} up-to 100
|
||||
|
||||
reset_db:
|
||||
GOOSE_MIGRATION_DIR=conf ${GOOSE_CMD} down
|
||||
${GOOSE_CMD} down-to 0
|
||||
${GOOSE_CMD} up
|
||||
GOOSE_MIGRATION_DIR=conf ${GOOSE_CMD} up-to 100
|
||||
|
||||
test_tool:
|
||||
go build -o $(PWD)/build/bin/e2e_tool ../../rollup/tests/integration_tool
|
||||
|
||||
build/bin/e2e_tool: test_tool
|
||||
|
||||
import_data_euclid: build/bin/e2e_tool check_vars
|
||||
build/bin/e2e_tool --config conf/config.json --codec 7 ${BEGIN_BLOCK} ${END_BLOCK}
|
||||
|
||||
import_data_feynman: build/bin/e2e_tool check_vars
|
||||
build/bin/e2e_tool --config conf/config.json --codec 8 ${BEGIN_BLOCK} ${END_BLOCK}
|
||||
|
||||
import_data_galileo: build/bin/e2e_tool check_vars
|
||||
build/bin/e2e_tool --config conf/config.json --codec 9 ${BEGIN_BLOCK} ${END_BLOCK}
|
||||
|
||||
import_data: build/bin/e2e_tool check_vars
|
||||
build/bin/e2e_tool --config conf/config.json --codec ${CODEC_VERSION} ${BEGIN_BLOCK} ${END_BLOCK}
|
||||
import_data: build/bin/e2e_tool check_vars migration_blocks
|
||||
build/bin/e2e_tool --config conf/config.json ${BEGIN_BLOCK} ${END_BLOCK}
|
||||
|
||||
reimport_data: reset_db import_data
|
||||
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
BEGIN_BLOCK?=35
|
||||
END_BLOCK?=49
|
||||
CODEC_VERSION?=8
|
||||
SCROLL_FORK_NAME=feynman
|
||||
@@ -5,5 +5,6 @@
|
||||
"maxOpenNum": 5,
|
||||
"maxIdleNum": 1
|
||||
},
|
||||
"validium_mode": true
|
||||
"validium_mode": true,
|
||||
"codec_version": 8
|
||||
}
|
||||
File diff suppressed because one or more lines are too long
@@ -25,7 +25,7 @@ SELECT 'INSERT INTO l2_block (number, hash, parent_hash, header, withdraw_root,
|
||||
quote_literal(transactions) ||
|
||||
');'
|
||||
FROM l2_block
|
||||
WHERE number >= 20278000 and number <= 20278050
|
||||
WHERE number >= 15206780 and number <= 15206809
|
||||
ORDER BY number ASC;
|
||||
|
||||
-- Write footer
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
BEGIN_BLOCK?=10973711
|
||||
END_BLOCK?=10973721
|
||||
CODEC_VERSION?=8
|
||||
SCROLL_FORK_NAME=feynman
|
||||
@@ -5,5 +5,6 @@
|
||||
"maxOpenNum": 5,
|
||||
"maxIdleNum": 1
|
||||
},
|
||||
"validium_mode": false
|
||||
"validium_mode": false,
|
||||
"codec_version": 8
|
||||
}
|
||||
@@ -1,4 +1,3 @@
|
||||
BEGIN_BLOCK?=20278022
|
||||
END_BLOCK?=20278025
|
||||
CODEC_VERSION?=9
|
||||
BEGIN_BLOCK?=15206785
|
||||
END_BLOCK?=15206794
|
||||
SCROLL_FORK_NAME=galileo
|
||||
File diff suppressed because one or more lines are too long
@@ -5,5 +5,6 @@
|
||||
"maxOpenNum": 5,
|
||||
"maxIdleNum": 1
|
||||
},
|
||||
"validium_mode": false
|
||||
"validium_mode": false,
|
||||
"codec_version": 9
|
||||
}
|
||||
3
tests/prover-e2e/sepolia-galileoV2/.make.env
Normal file
3
tests/prover-e2e/sepolia-galileoV2/.make.env
Normal file
@@ -0,0 +1,3 @@
|
||||
BEGIN_BLOCK?=20239245
|
||||
END_BLOCK?=20239250
|
||||
SCROLL_FORK_NAME=galileoV2
|
||||
15
tests/prover-e2e/sepolia-galileoV2/config.json
Normal file
15
tests/prover-e2e/sepolia-galileoV2/config.json
Normal file
@@ -0,0 +1,15 @@
|
||||
{
|
||||
"db_config": {
|
||||
"driver_name": "postgres",
|
||||
"dsn": "postgres://dev:dev@localhost:5432/scroll?sslmode=disable",
|
||||
"maxOpenNum": 5,
|
||||
"maxIdleNum": 1
|
||||
},
|
||||
"fetch_config": {
|
||||
"endpoint": "http://l2-sequencer-galileo-6.devnet.scroll.tech:8545",
|
||||
"l2_message_queue_address": "0x5300000000000000000000000000000000000000"
|
||||
},
|
||||
"validium_mode": false,
|
||||
"codec_version": 10
|
||||
|
||||
}
|
||||
40
tests/prover-e2e/sepolia-galileoV2/config.template.json
Normal file
40
tests/prover-e2e/sepolia-galileoV2/config.template.json
Normal file
@@ -0,0 +1,40 @@
|
||||
{
|
||||
"prover_manager": {
|
||||
"provers_per_session": 1,
|
||||
"session_attempts": 5,
|
||||
"external_prover_threshold": 32,
|
||||
"bundle_collection_time_sec": 180,
|
||||
"batch_collection_time_sec": 180,
|
||||
"chunk_collection_time_sec": 180,
|
||||
"verifier": {
|
||||
"min_prover_version": "v4.4.33",
|
||||
"verifiers": [
|
||||
{
|
||||
"assets_path": "assets",
|
||||
"fork_name": "galileoV2"
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"db": {
|
||||
"driver_name": "postgres",
|
||||
"dsn": "postgres://dev:dev@localhost/scroll?sslmode=disable",
|
||||
"maxOpenNum": 200,
|
||||
"maxIdleNum": 20
|
||||
},
|
||||
"l2": {
|
||||
"validium_mode": false,
|
||||
"chain_id": 534351,
|
||||
"l2geth": {
|
||||
"endpoint": "<serach for a public rpc endpoint like alchemy>"
|
||||
}
|
||||
},
|
||||
"auth": {
|
||||
"secret": "prover secret key",
|
||||
"challenge_expire_duration_sec": 3600,
|
||||
"login_expire_duration_sec": 3600
|
||||
},
|
||||
"sequencer": {
|
||||
"decryption_key": "not need"
|
||||
}
|
||||
}
|
||||
111
tests/prover-e2e/sepolia-galileoV2/genesis.json
Normal file
111
tests/prover-e2e/sepolia-galileoV2/genesis.json
Normal file
File diff suppressed because one or more lines are too long
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user