mirror of
https://github.com/scroll-tech/scroll.git
synced 2026-01-11 23:18:07 -05:00
Compare commits
136 Commits
v4.5.11
...
coordinato
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
14e2633ba3 | ||
|
|
7de388ef1a | ||
|
|
21326c25e6 | ||
|
|
27dd62eac3 | ||
|
|
22479a7952 | ||
|
|
690bc01c41 | ||
|
|
e75d6c16a9 | ||
|
|
752e4e1117 | ||
|
|
2ecc42e2f5 | ||
|
|
de72e2dccb | ||
|
|
edb51236e2 | ||
|
|
15a23478d1 | ||
|
|
9100a0bd4a | ||
|
|
0ede0cd41f | ||
|
|
9c2bc02f64 | ||
|
|
9dceae1ca2 | ||
|
|
9e5579c4cb | ||
|
|
ac4a72003c | ||
|
|
19447984bd | ||
|
|
d66d705456 | ||
|
|
c938d6c25e | ||
|
|
235ba874c6 | ||
|
|
6bee33036f | ||
|
|
cf9e3680c0 | ||
|
|
e9470ff7a5 | ||
|
|
51b1e79b31 | ||
|
|
c22d9ecad1 | ||
|
|
e7551650b2 | ||
|
|
20fde41be8 | ||
|
|
4df1dd8acd | ||
|
|
1985e54ab3 | ||
|
|
6696aac16a | ||
|
|
4b79e63c9b | ||
|
|
ac0396db3c | ||
|
|
17e6c5b7ac | ||
|
|
b6e33456fa | ||
|
|
7572bf8923 | ||
|
|
5d41788b07 | ||
|
|
8f8a537fba | ||
|
|
bfc0fdd7ce | ||
|
|
b1c3a4ecc0 | ||
|
|
d9a29cddce | ||
|
|
c992157eb4 | ||
|
|
426c57a5fa | ||
|
|
404c664e10 | ||
|
|
8a15836d20 | ||
|
|
4365aafa9a | ||
|
|
6ee026fa16 | ||
|
|
c79ad57fb7 | ||
|
|
fa5b113248 | ||
|
|
b7fdf48c30 | ||
|
|
ad0c918944 | ||
|
|
884b050866 | ||
|
|
1098876183 | ||
|
|
9e520e7769 | ||
|
|
1d9fa41535 | ||
|
|
b7f23c6734 | ||
|
|
057e22072c | ||
|
|
de7f6e56a9 | ||
|
|
c7b83a0784 | ||
|
|
92ca7a6b76 | ||
|
|
256c90af6f | ||
|
|
50f3e1a97c | ||
|
|
2721503657 | ||
|
|
3b323198dc | ||
|
|
c11e0283e8 | ||
|
|
a04b64df03 | ||
|
|
78dbe6cde1 | ||
|
|
9df6429d98 | ||
|
|
e6be62f633 | ||
|
|
c72ee5d679 | ||
|
|
a5a7844646 | ||
|
|
4725d8a73c | ||
|
|
322766f54f | ||
|
|
7ff5b190ec | ||
|
|
5614ec3b86 | ||
|
|
b297edd28d | ||
|
|
5a07a1652b | ||
|
|
47c85d4983 | ||
|
|
1552e98b79 | ||
|
|
64ef0f4ec0 | ||
|
|
321dd43af8 | ||
|
|
624a7a29b8 | ||
|
|
4f878d9231 | ||
|
|
7b3a65b35b | ||
|
|
0d238d77a6 | ||
|
|
76ecdf064a | ||
|
|
5c6c225f76 | ||
|
|
3adb2e0a1b | ||
|
|
412ad56a64 | ||
|
|
9796d16f6c | ||
|
|
1f2b857671 | ||
|
|
5dbb5c5fb7 | ||
|
|
a65b3066a3 | ||
|
|
1f2b397bbd | ||
|
|
ae791a0714 | ||
|
|
c012f7132d | ||
|
|
6897cc54bd | ||
|
|
d21fa36803 | ||
|
|
fc75299eb3 | ||
|
|
4bfcd35d0c | ||
|
|
6d62f8e5fa | ||
|
|
392ae07736 | ||
|
|
db80b47820 | ||
|
|
daa1387208 | ||
|
|
67b05558e2 | ||
|
|
1e447b0fef | ||
|
|
f7c6ecadf4 | ||
|
|
9d94f943e5 | ||
|
|
de17ad43ff | ||
|
|
4233ad928c | ||
|
|
3050ccb40f | ||
|
|
12e89201a1 | ||
|
|
a0ee508bbd | ||
|
|
b8909d3795 | ||
|
|
b7a172a519 | ||
|
|
80807dbb75 | ||
|
|
a776ca7c82 | ||
|
|
ea38ae7e96 | ||
|
|
9dc57c6126 | ||
|
|
9367565a31 | ||
|
|
d2f7663d26 | ||
|
|
b0943b1035 | ||
|
|
5d6b5a89f4 | ||
|
|
4ee459a602 | ||
|
|
276385fd0a | ||
|
|
82fb15de3b | ||
|
|
5204ad50e0 | ||
|
|
f824fb0efc | ||
|
|
a55c7bdc77 | ||
|
|
47b1a037a9 | ||
|
|
ae34020c34 | ||
|
|
fa9fab6e98 | ||
|
|
c4f869a33a | ||
|
|
0cee9a51e6 | ||
|
|
97de988228 |
18
.github/workflows/common.yml
vendored
18
.github/workflows/common.yml
vendored
@@ -29,7 +29,7 @@ jobs:
|
||||
steps:
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: nightly-2024-12-06
|
||||
toolchain: nightly-2025-08-18
|
||||
override: true
|
||||
components: rustfmt, clippy
|
||||
- name: Install Go
|
||||
@@ -41,16 +41,12 @@ jobs:
|
||||
- name: Cache cargo
|
||||
uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
workspaces: "common/libzkp/impl -> target"
|
||||
- name: Setup SSH for private repos
|
||||
uses: webfactory/ssh-agent@v0.9.0
|
||||
with:
|
||||
ssh-private-key: ${{ secrets.OPENVM_GPU_SSH_PRIVATE_KEY }}
|
||||
- name: Lint
|
||||
working-directory: 'common'
|
||||
run: |
|
||||
rm -rf $HOME/.cache/golangci-lint
|
||||
make lint
|
||||
workspaces: ". -> target"
|
||||
# - name: Lint
|
||||
# working-directory: 'common'
|
||||
# run: |
|
||||
# rm -rf $HOME/.cache/golangci-lint
|
||||
# make lint
|
||||
goimports-lint:
|
||||
if: github.event.pull_request.draft == false
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
4
.github/workflows/coordinator.yml
vendored
4
.github/workflows/coordinator.yml
vendored
@@ -33,7 +33,7 @@ jobs:
|
||||
steps:
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: nightly-2023-12-03
|
||||
toolchain: nightly-2025-08-18
|
||||
override: true
|
||||
components: rustfmt, clippy
|
||||
- name: Install Go
|
||||
@@ -112,7 +112,7 @@ jobs:
|
||||
- name: Test coordinator packages
|
||||
working-directory: 'coordinator'
|
||||
run: |
|
||||
# go test -exec "env LD_LIBRARY_PATH=${PWD}/verifier/lib" -v -race -gcflags="-l" -ldflags="-s=false" -coverpkg="scroll-tech/coordinator" -coverprofile=coverage.txt -covermode=atomic ./...
|
||||
make libzkp
|
||||
go test -v -race -gcflags="-l" -ldflags="-s=false" -coverprofile=coverage.txt -covermode=atomic -tags mock_verifier ./...
|
||||
- name: Upload coverage reports to Codecov
|
||||
uses: codecov/codecov-action@v3
|
||||
|
||||
134
.github/workflows/docker.yml
vendored
134
.github/workflows/docker.yml
vendored
@@ -10,7 +10,8 @@ env:
|
||||
|
||||
jobs:
|
||||
gas_oracle:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on:
|
||||
group: scroll-reth-runner-group
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
@@ -50,12 +51,11 @@ jobs:
|
||||
push: true
|
||||
tags: |
|
||||
scrolltech/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
scrolltech/${{ env.REPOSITORY }}:latest
|
||||
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest
|
||||
|
||||
rollup_relayer:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on:
|
||||
group: scroll-reth-runner-group
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
@@ -95,12 +95,55 @@ jobs:
|
||||
push: true
|
||||
tags: |
|
||||
scrolltech/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
scrolltech/${{ env.REPOSITORY }}:latest
|
||||
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest
|
||||
|
||||
blob_uploader:
|
||||
runs-on:
|
||||
group: scroll-reth-runner-group
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- name: Configure AWS credentials
|
||||
uses: aws-actions/configure-aws-credentials@v4
|
||||
with:
|
||||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
aws-region: ${{ env.AWS_REGION }}
|
||||
- name: Login to Amazon ECR
|
||||
id: login-ecr
|
||||
uses: aws-actions/amazon-ecr-login@v2
|
||||
- name: check repo and create it if not exist
|
||||
env:
|
||||
REPOSITORY: blob-uploader
|
||||
run: |
|
||||
aws --region ${{ env.AWS_REGION }} ecr describe-repositories --repository-names ${{ env.REPOSITORY }} && : || aws --region ${{ env.AWS_REGION }} ecr create-repository --repository-name ${{ env.REPOSITORY }}
|
||||
- name: Build and push
|
||||
uses: docker/build-push-action@v3
|
||||
env:
|
||||
ECR_REGISTRY: ${{ steps.login-ecr.outputs.registry }}
|
||||
REPOSITORY: blob-uploader
|
||||
IMAGE_TAG: ${{ github.ref_name }}
|
||||
with:
|
||||
context: .
|
||||
file: ./build/dockerfiles/blob_uploader.Dockerfile
|
||||
platforms: linux/amd64,linux/arm64
|
||||
push: true
|
||||
tags: |
|
||||
scrolltech/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
|
||||
rollup-db-cli:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on:
|
||||
group: scroll-reth-runner-group
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
@@ -140,12 +183,11 @@ jobs:
|
||||
push: true
|
||||
tags: |
|
||||
scrolltech/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
scrolltech/${{ env.REPOSITORY }}:latest
|
||||
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest
|
||||
|
||||
bridgehistoryapi-fetcher:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on:
|
||||
group: scroll-reth-runner-group
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
@@ -185,12 +227,11 @@ jobs:
|
||||
push: true
|
||||
tags: |
|
||||
scrolltech/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
scrolltech/${{ env.REPOSITORY }}:latest
|
||||
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest
|
||||
|
||||
bridgehistoryapi-api:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on:
|
||||
group: scroll-reth-runner-group
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
@@ -230,12 +271,11 @@ jobs:
|
||||
push: true
|
||||
tags: |
|
||||
scrolltech/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
scrolltech/${{ env.REPOSITORY }}:latest
|
||||
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest
|
||||
|
||||
bridgehistoryapi-db-cli:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on:
|
||||
group: scroll-reth-runner-group
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
@@ -275,12 +315,11 @@ jobs:
|
||||
push: true
|
||||
tags: |
|
||||
scrolltech/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
scrolltech/${{ env.REPOSITORY }}:latest
|
||||
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest
|
||||
|
||||
coordinator-api:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on:
|
||||
group: scroll-reth-runner-group
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
@@ -307,13 +346,6 @@ jobs:
|
||||
REPOSITORY: coordinator-api
|
||||
run: |
|
||||
aws --region ${{ env.AWS_REGION }} ecr describe-repositories --repository-names ${{ env.REPOSITORY }} && : || aws --region ${{ env.AWS_REGION }} ecr create-repository --repository-name ${{ env.REPOSITORY }}
|
||||
- name: Setup SSH for private repos
|
||||
uses: webfactory/ssh-agent@v0.9.0
|
||||
with:
|
||||
ssh-private-key: ${{ secrets.OPENVM_GPU_SSH_PRIVATE_KEY }}
|
||||
- name: Run custom script
|
||||
run: |
|
||||
./build/dockerfiles/coordinator-api/init-openvm.sh
|
||||
- name: Build and push
|
||||
uses: docker/build-push-action@v3
|
||||
env:
|
||||
@@ -326,12 +358,54 @@ jobs:
|
||||
push: true
|
||||
tags: |
|
||||
scrolltech/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
scrolltech/${{ env.REPOSITORY }}:latest
|
||||
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest
|
||||
|
||||
coordinator-proxy:
|
||||
runs-on:
|
||||
group: scroll-reth-runner-group
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- name: Configure AWS credentials
|
||||
uses: aws-actions/configure-aws-credentials@v4
|
||||
with:
|
||||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
aws-region: ${{ env.AWS_REGION }}
|
||||
- name: Login to Amazon ECR
|
||||
id: login-ecr
|
||||
uses: aws-actions/amazon-ecr-login@v2
|
||||
- name: check repo and create it if not exist
|
||||
env:
|
||||
REPOSITORY: coordinator-proxy
|
||||
run: |
|
||||
aws --region ${{ env.AWS_REGION }} ecr describe-repositories --repository-names ${{ env.REPOSITORY }} && : || aws --region ${{ env.AWS_REGION }} ecr create-repository --repository-name ${{ env.REPOSITORY }}
|
||||
- name: Build and push
|
||||
uses: docker/build-push-action@v3
|
||||
env:
|
||||
ECR_REGISTRY: ${{ steps.login-ecr.outputs.registry }}
|
||||
REPOSITORY: coordinator-proxy
|
||||
IMAGE_TAG: ${{ github.ref_name }}
|
||||
with:
|
||||
context: .
|
||||
file: ./build/dockerfiles/coordinator-proxy.Dockerfile
|
||||
push: true
|
||||
tags: |
|
||||
scrolltech/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
|
||||
coordinator-cron:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on:
|
||||
group: scroll-reth-runner-group
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
@@ -371,6 +445,4 @@ jobs:
|
||||
push: true
|
||||
tags: |
|
||||
scrolltech/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
scrolltech/${{ env.REPOSITORY }}:latest
|
||||
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest
|
||||
|
||||
1
.github/workflows/integration.yml
vendored
1
.github/workflows/integration.yml
vendored
@@ -38,6 +38,7 @@ jobs:
|
||||
make dev_docker
|
||||
make -C rollup mock_abi
|
||||
make -C common/bytecode all
|
||||
make -C coordinator/internal/logic/libzkp build
|
||||
- name: Run integration tests
|
||||
run: |
|
||||
go test -v -tags="mock_prover mock_verifier" -p 1 -coverprofile=coverage.txt scroll-tech/integration-test/...
|
||||
|
||||
11
.github/workflows/intermediate-docker.yml
vendored
11
.github/workflows/intermediate-docker.yml
vendored
@@ -22,10 +22,9 @@ on:
|
||||
required: true
|
||||
type: choice
|
||||
options:
|
||||
- nightly-2023-12-03
|
||||
- nightly-2022-12-10
|
||||
- 1.86.0
|
||||
default: "nightly-2023-12-03"
|
||||
- nightly-2025-08-18
|
||||
default: "nightly-2025-08-18"
|
||||
PYTHON_VERSION:
|
||||
description: "Python version"
|
||||
required: false
|
||||
@@ -40,7 +39,8 @@ on:
|
||||
options:
|
||||
- "11.7.1"
|
||||
- "12.2.2"
|
||||
default: "11.7.1"
|
||||
- "12.9.1"
|
||||
default: "12.9.1"
|
||||
CARGO_CHEF_TAG:
|
||||
description: "Cargo chef version"
|
||||
required: true
|
||||
@@ -69,7 +69,8 @@ defaults:
|
||||
|
||||
jobs:
|
||||
build-and-publish-intermediate:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on:
|
||||
group: scroll-reth-runner-group
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -24,3 +24,4 @@ sftp-config.json
|
||||
*~
|
||||
|
||||
target
|
||||
zkvm-prover/config.json
|
||||
5173
zkvm-prover/Cargo.lock → Cargo.lock
generated
5173
zkvm-prover/Cargo.lock → Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
67
Cargo.toml
Normal file
67
Cargo.toml
Normal file
@@ -0,0 +1,67 @@
|
||||
[workspace]
|
||||
members = [
|
||||
"crates/libzkp",
|
||||
"crates/l2geth",
|
||||
"crates/libzkp_c",
|
||||
"crates/prover-bin",
|
||||
]
|
||||
|
||||
resolver = "2"
|
||||
|
||||
[workspace.package]
|
||||
authors = ["Scroll developers"]
|
||||
edition = "2021"
|
||||
homepage = "https://scroll.io"
|
||||
readme = "README.md"
|
||||
repository = "https://github.com/scroll-tech/scroll"
|
||||
version = "4.7.1"
|
||||
|
||||
[workspace.dependencies]
|
||||
scroll-zkvm-prover = { git = "https://github.com/scroll-tech/zkvm-prover", tag = "v0.7.1" }
|
||||
scroll-zkvm-verifier = { git = "https://github.com/scroll-tech/zkvm-prover", tag = "v0.7.1" }
|
||||
scroll-zkvm-types = { git = "https://github.com/scroll-tech/zkvm-prover", tag = "v0.7.1" }
|
||||
|
||||
sbv-primitives = { git = "https://github.com/scroll-tech/stateless-block-verifier", tag = "scroll-v91.2", features = ["scroll", "rkyv"] }
|
||||
sbv-utils = { git = "https://github.com/scroll-tech/stateless-block-verifier", tag = "scroll-v91.2" }
|
||||
sbv-core = { git = "https://github.com/scroll-tech/stateless-block-verifier", tag = "scroll-v91.2", features = ["scroll"] }
|
||||
|
||||
metrics = "0.23.0"
|
||||
metrics-util = "0.17"
|
||||
metrics-tracing-context = "0.16.0"
|
||||
|
||||
anyhow = "1.0"
|
||||
alloy = { version = "1", default-features = false }
|
||||
alloy-primitives = { version = "1.4.1", default-features = false, features = ["tiny-keccak"] }
|
||||
# also use this to trigger "serde" feature for primitives
|
||||
alloy-serde = { version = "1", default-features = false }
|
||||
|
||||
serde = { version = "1", default-features = false, features = ["derive"] }
|
||||
serde_json = { version = "1.0" }
|
||||
serde_derive = "1.0"
|
||||
serde_with = "3"
|
||||
itertools = "0.14"
|
||||
tiny-keccak = "2.0"
|
||||
tracing = "0.1"
|
||||
eyre = "0.6"
|
||||
once_cell = "1.20"
|
||||
base64 = "0.22"
|
||||
|
||||
|
||||
[patch.crates-io]
|
||||
revm = { git = "https://github.com/scroll-tech/revm", tag = "scroll-v91" }
|
||||
revm-bytecode = { git = "https://github.com/scroll-tech/revm", tag = "scroll-v91" }
|
||||
revm-context = { git = "https://github.com/scroll-tech/revm", tag = "scroll-v91" }
|
||||
revm-context-interface = { git = "https://github.com/scroll-tech/revm", tag = "scroll-v91" }
|
||||
revm-database = { git = "https://github.com/scroll-tech/revm", tag = "scroll-v91" }
|
||||
revm-database-interface = { git = "https://github.com/scroll-tech/revm", tag = "scroll-v91" }
|
||||
revm-handler = { git = "https://github.com/scroll-tech/revm", tag = "scroll-v91" }
|
||||
revm-inspector = { git = "https://github.com/scroll-tech/revm", tag = "scroll-v91" }
|
||||
revm-interpreter = { git = "https://github.com/scroll-tech/revm", tag = "scroll-v91" }
|
||||
revm-precompile = { git = "https://github.com/scroll-tech/revm", tag = "scroll-v91" }
|
||||
revm-primitives = { git = "https://github.com/scroll-tech/revm", tag = "scroll-v91" }
|
||||
revm-state = { git = "https://github.com/scroll-tech/revm", tag = "scroll-v91" }
|
||||
|
||||
[profile.maxperf]
|
||||
inherits = "release"
|
||||
lto = "fat"
|
||||
codegen-units = 1
|
||||
2
Makefile
2
Makefile
@@ -1,6 +1,6 @@
|
||||
.PHONY: fmt dev_docker build_test_docker run_test_docker clean update
|
||||
|
||||
L2GETH_TAG=scroll-v5.8.23
|
||||
L2GETH_TAG=scroll-v5.9.17
|
||||
|
||||
help: ## Display this help message
|
||||
@grep -h \
|
||||
|
||||
@@ -28,7 +28,7 @@ We welcome community contributions to this repository. Before you submit any iss
|
||||
|
||||
## Prerequisites
|
||||
+ Go 1.21
|
||||
+ Rust (for version, see [rust-toolchain](./common/libzkp/impl/rust-toolchain))
|
||||
+ Rust (for version, see [rust-toolchain](./rust-toolchain))
|
||||
+ Hardhat / Foundry
|
||||
+ Docker
|
||||
|
||||
|
||||
@@ -10,15 +10,18 @@ require (
|
||||
github.com/go-redis/redis/v8 v8.11.5
|
||||
github.com/pressly/goose/v3 v3.16.0
|
||||
github.com/prometheus/client_golang v1.19.0
|
||||
github.com/scroll-tech/da-codec v0.1.3-0.20250226072559-f8a8d3898f54
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20250305084331-57148478e950
|
||||
github.com/stretchr/testify v1.9.0
|
||||
github.com/scroll-tech/da-codec v0.10.0
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20251128092113-8629f088d78f
|
||||
github.com/stretchr/testify v1.10.0
|
||||
github.com/urfave/cli/v2 v2.25.7
|
||||
golang.org/x/sync v0.11.0
|
||||
gorm.io/gorm v1.25.7-0.20240204074919-46816ad31dde
|
||||
)
|
||||
|
||||
replace github.com/scroll-tech/go-ethereum => github.com/scroll-tech/go-ethereum v1.10.14-0.20250305084331-57148478e950 // It's a hotfix for the header hash incompatibility issue, pls change this with caution
|
||||
// Hotfix for header hash incompatibility issue.
|
||||
// PR: https://github.com/scroll-tech/go-ethereum/pull/1133/
|
||||
// CAUTION: Requires careful handling. When upgrading go-ethereum, ensure this fix remains up-to-date in this branch.
|
||||
replace github.com/scroll-tech/go-ethereum => github.com/scroll-tech/go-ethereum v1.10.14-0.20251128092359-25d5bf6b817b
|
||||
|
||||
require (
|
||||
dario.cat/mergo v1.0.0 // indirect
|
||||
@@ -30,10 +33,10 @@ require (
|
||||
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
||||
github.com/chenzhuoyu/base64x v0.0.0-20230717121745-296ad89f973d // indirect
|
||||
github.com/chenzhuoyu/iasm v0.9.0 // indirect
|
||||
github.com/consensys/bavard v0.1.13 // indirect
|
||||
github.com/consensys/gnark-crypto v0.13.0 // indirect
|
||||
github.com/consensys/bavard v0.1.27 // indirect
|
||||
github.com/consensys/gnark-crypto v0.16.0 // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.3 // indirect
|
||||
github.com/crate-crypto/go-kzg-4844 v1.1.0 // indirect
|
||||
github.com/crate-crypto/go-eth-kzg v1.4.0 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/deckarep/golang-set v0.0.0-20180603214616-504e848d77ea // indirect
|
||||
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect
|
||||
@@ -41,7 +44,7 @@ require (
|
||||
github.com/docker/docker v26.1.0+incompatible // indirect
|
||||
github.com/docker/go-connections v0.5.0 // indirect
|
||||
github.com/edsrzf/mmap-go v1.0.0 // indirect
|
||||
github.com/ethereum/c-kzg-4844 v1.0.3 // indirect
|
||||
github.com/ethereum/c-kzg-4844/v2 v2.1.5 // indirect
|
||||
github.com/fjl/memsize v0.0.2 // indirect
|
||||
github.com/fsnotify/fsnotify v1.6.0 // indirect
|
||||
github.com/gabriel-vasile/mimetype v1.4.2 // indirect
|
||||
@@ -98,7 +101,7 @@ require (
|
||||
github.com/shirou/gopsutil v3.21.11+incompatible // indirect
|
||||
github.com/sourcegraph/conc v0.3.0 // indirect
|
||||
github.com/status-im/keycard-go v0.2.0 // indirect
|
||||
github.com/supranational/blst v0.3.13 // indirect
|
||||
github.com/supranational/blst v0.3.15 // indirect
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 // indirect
|
||||
github.com/tklauser/go-sysconf v0.3.14 // indirect
|
||||
github.com/tklauser/numcpus v0.9.0 // indirect
|
||||
@@ -110,7 +113,7 @@ require (
|
||||
go.opentelemetry.io/otel/trace v1.24.0 // indirect
|
||||
go.uber.org/multierr v1.11.0 // indirect
|
||||
golang.org/x/arch v0.5.0 // indirect
|
||||
golang.org/x/crypto v0.24.0 // indirect
|
||||
golang.org/x/crypto v0.32.0 // indirect
|
||||
golang.org/x/net v0.25.0 // indirect
|
||||
golang.org/x/sys v0.30.0 // indirect
|
||||
golang.org/x/text v0.21.0 // indirect
|
||||
|
||||
@@ -53,16 +53,16 @@ github.com/chenzhuoyu/base64x v0.0.0-20230717121745-296ad89f973d h1:77cEq6EriyTZ
|
||||
github.com/chenzhuoyu/base64x v0.0.0-20230717121745-296ad89f973d/go.mod h1:8EPpVsBuRksnlj1mLy4AWzRNQYxauNi62uWcE3to6eA=
|
||||
github.com/chenzhuoyu/iasm v0.9.0 h1:9fhXjVzq5hUy2gkhhgHl95zG2cEAhw9OSGs8toWWAwo=
|
||||
github.com/chenzhuoyu/iasm v0.9.0/go.mod h1:Xjy2NpN3h7aUqeqM+woSuuvxmIe6+DDsiNLIrkAmYog=
|
||||
github.com/consensys/bavard v0.1.13 h1:oLhMLOFGTLdlda/kma4VOJazblc7IM5y5QPd2A/YjhQ=
|
||||
github.com/consensys/bavard v0.1.13/go.mod h1:9ItSMtA/dXMAiL7BG6bqW2m3NdSEObYWoH223nGHukI=
|
||||
github.com/consensys/gnark-crypto v0.13.0 h1:VPULb/v6bbYELAPTDFINEVaMTTybV5GLxDdcjnS+4oc=
|
||||
github.com/consensys/gnark-crypto v0.13.0/go.mod h1:wKqwsieaKPThcFkHe0d0zMsbHEUWFmZcG7KBCse210o=
|
||||
github.com/consensys/bavard v0.1.27 h1:j6hKUrGAy/H+gpNrpLU3I26n1yc+VMGmd6ID5+gAhOs=
|
||||
github.com/consensys/bavard v0.1.27/go.mod h1:k/zVjHHC4B+PQy1Pg7fgvG3ALicQw540Crag8qx+dZs=
|
||||
github.com/consensys/gnark-crypto v0.16.0 h1:8Dl4eYmUWK9WmlP1Bj6je688gBRJCJbT8Mw4KoTAawo=
|
||||
github.com/consensys/gnark-crypto v0.16.0/go.mod h1:Ke3j06ndtPTVvo++PhGNgvm+lgpLvzbcE2MqljY7diU=
|
||||
github.com/containerd/continuity v0.4.3 h1:6HVkalIp+2u1ZLH1J/pYX2oBVXlJZvh1X1A7bEZ9Su8=
|
||||
github.com/containerd/continuity v0.4.3/go.mod h1:F6PTNCKepoxEaXLQp3wDAjygEnImnZ/7o4JzpodfroQ=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.3 h1:qMCsGGgs+MAzDFyp9LpAe1Lqy/fY/qCovCm0qnXZOBM=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/crate-crypto/go-kzg-4844 v1.1.0 h1:EN/u9k2TF6OWSHrCCDBBU6GLNMq88OspHHlMnHfoyU4=
|
||||
github.com/crate-crypto/go-kzg-4844 v1.1.0/go.mod h1:JolLjpSff1tCCJKaJx4psrlEdlXuJEC996PL3tTAFks=
|
||||
github.com/crate-crypto/go-eth-kzg v1.4.0 h1:WzDGjHk4gFg6YzV0rJOAsTK4z3Qkz5jd4RE3DAvPFkg=
|
||||
github.com/crate-crypto/go-eth-kzg v1.4.0/go.mod h1:J9/u5sWfznSObptgfa92Jq8rTswn6ahQWEuiLHOjCUI=
|
||||
github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
@@ -88,8 +88,8 @@ github.com/elastic/go-sysinfo v1.11.1 h1:g9mwl05njS4r69TisC+vwHWTSKywZFYYUu3so3T
|
||||
github.com/elastic/go-sysinfo v1.11.1/go.mod h1:6KQb31j0QeWBDF88jIdWSxE8cwoOB9tO4Y4osN7Q70E=
|
||||
github.com/elastic/go-windows v1.0.1 h1:AlYZOldA+UJ0/2nBuqWdo90GFCgG9xuyw9SYzGUtJm0=
|
||||
github.com/elastic/go-windows v1.0.1/go.mod h1:FoVvqWSun28vaDQPbj2Elfc0JahhPB7WQEGa3c814Ss=
|
||||
github.com/ethereum/c-kzg-4844 v1.0.3 h1:IEnbOHwjixW2cTvKRUlAAUOeleV7nNM/umJR+qy4WDs=
|
||||
github.com/ethereum/c-kzg-4844 v1.0.3/go.mod h1:VewdlzQmpT5QSrVhbBuGoCdFJkpaJlO1aQputP83wc0=
|
||||
github.com/ethereum/c-kzg-4844/v2 v2.1.5 h1:aVtoLK5xwJ6c5RiqO8g8ptJ5KU+2Hdquf6G3aXiHh5s=
|
||||
github.com/ethereum/c-kzg-4844/v2 v2.1.5/go.mod h1:u59hRTTah4Co6i9fDWtiCjTrblJv0UwsqZKCc0GfgUs=
|
||||
github.com/fjl/memsize v0.0.2 h1:27txuSD9or+NZlnOWdKUxeBzTAUkWCVh+4Gf2dWFOzA=
|
||||
github.com/fjl/memsize v0.0.2/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
@@ -214,8 +214,8 @@ github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c=
|
||||
github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8=
|
||||
github.com/leanovate/gopter v0.2.11 h1:vRjThO1EKPb/1NsDXuDrzldR28RLkBflWYcU9CvzWu4=
|
||||
github.com/leanovate/gopter v0.2.11/go.mod h1:aK3tzZP/C+p1m3SPRE4SYZFGP7jjkuSI4f7Xvpt0S9c=
|
||||
github.com/leodido/go-urn v1.2.4 h1:XlAE/cm/ms7TE/VMVoduSpNBoyc2dOxHs5MZSwAN63Q=
|
||||
github.com/leodido/go-urn v1.2.4/go.mod h1:7ZrI8mTSeBSHl/UaRyKQW1qZeMgak41ANeCNaVckg+4=
|
||||
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
|
||||
@@ -309,10 +309,10 @@ github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik=
|
||||
github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
|
||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/scroll-tech/da-codec v0.1.3-0.20250226072559-f8a8d3898f54 h1:qVpsVu1J91opTn6HYeuzWcBRVhQmPR8g05i+PlOjlI4=
|
||||
github.com/scroll-tech/da-codec v0.1.3-0.20250226072559-f8a8d3898f54/go.mod h1:xECEHZLVzbdUn+tNbRJhRIjLGTOTmnFQuTgUTeVLX58=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20250305084331-57148478e950 h1:qfOaRflvH1vtnFWloB7BveKlP/VqYgMqLJ6e9TlBJ/8=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20250305084331-57148478e950/go.mod h1:OblWe1+QrZwdpwO0j/LY3BSGuKT3YPUFBDQQgvvfStQ=
|
||||
github.com/scroll-tech/da-codec v0.10.0 h1:IPHxyTyXTWPV0Q+DZ08cod2fWkhUvrfysmj/VBpB+WU=
|
||||
github.com/scroll-tech/da-codec v0.10.0/go.mod h1:MBlIP4wCXPcUDZ/Ci2B7n/2IbVU1WBo9OTFTZ5ffE0U=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20251128092359-25d5bf6b817b h1:pMQKnroJoS/FeL1aOWkz7/u1iBHUP8PWjZstNuzoUGE=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20251128092359-25d5bf6b817b/go.mod h1:Aa/kD1XB+OV/7rRxMQrjcPCB4b0pKyLH0gsTrtuHi38=
|
||||
github.com/scroll-tech/zktrie v0.8.4 h1:UagmnZ4Z3ITCk+aUq9NQZJNAwnWl4gSxsLb2Nl7IgRE=
|
||||
github.com/scroll-tech/zktrie v0.8.4/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
|
||||
github.com/segmentio/asm v1.2.0 h1:9BQrFxC+YOHJlTlHGkTrFWf59nbL3XnCoFLTwDCI7ys=
|
||||
@@ -341,10 +341,10 @@ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO
|
||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
|
||||
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/supranational/blst v0.3.13 h1:AYeSxdOMacwu7FBmpfloBz5pbFXDmJL33RuwnKtmTjk=
|
||||
github.com/supranational/blst v0.3.13/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw=
|
||||
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
|
||||
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/supranational/blst v0.3.15 h1:rd9viN6tfARE5wv3KZJ9H8e1cg0jXW8syFCcsbHa76o=
|
||||
github.com/supranational/blst v0.3.15/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw=
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY=
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc=
|
||||
github.com/tklauser/go-sysconf v0.3.14 h1:g5vzr9iPFFz24v2KZXs/pvpvh8/V9Fw6vQK5ZZb78yU=
|
||||
@@ -387,8 +387,8 @@ golang.org/x/arch v0.5.0/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8=
|
||||
golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI=
|
||||
golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM=
|
||||
golang.org/x/crypto v0.32.0 h1:euUpcYgM8WcP71gNpTqQCn6rC2t6ULUPiOzfWaXVVfc=
|
||||
golang.org/x/crypto v0.32.0/go.mod h1:ZnnJkOaASj8g0AjIduWNlq2NRxL0PlBrbKVyZ6V/Ugc=
|
||||
golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA=
|
||||
golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
|
||||
@@ -38,6 +38,7 @@ type FetcherConfig struct {
|
||||
BeaconNodeAPIEndpoint string `json:"BeaconNodeAPIEndpoint"`
|
||||
BlobScanAPIEndpoint string `json:"BlobScanAPIEndpoint"`
|
||||
BlockNativeAPIEndpoint string `json:"BlockNativeAPIEndpoint"`
|
||||
AwsS3Endpoint string `json:"AwsS3Endpoint"`
|
||||
}
|
||||
|
||||
// RedisConfig redis config
|
||||
|
||||
@@ -39,6 +39,9 @@ type L1MessageFetcher struct {
|
||||
// NewL1MessageFetcher creates a new L1MessageFetcher instance.
|
||||
func NewL1MessageFetcher(ctx context.Context, cfg *config.FetcherConfig, db *gorm.DB, client *ethclient.Client) (*L1MessageFetcher, error) {
|
||||
blobClient := blob_client.NewBlobClients()
|
||||
if cfg.AwsS3Endpoint != "" {
|
||||
blobClient.AddBlobClient(blob_client.NewAwsS3Client(cfg.AwsS3Endpoint))
|
||||
}
|
||||
if cfg.BeaconNodeAPIEndpoint != "" {
|
||||
beaconNodeClient, err := blob_client.NewBeaconNodeClient(cfg.BeaconNodeAPIEndpoint)
|
||||
if err != nil {
|
||||
|
||||
@@ -361,7 +361,6 @@ func getTxHistoryInfoFromBridgeBatchDepositMessage(message *orm.BridgeBatchDepos
|
||||
func (h *HistoryLogic) getCachedTxsInfo(ctx context.Context, cacheKey string, pageNum, pageSize uint64) ([]*types.TxHistoryInfo, uint64, bool, error) {
|
||||
start := int64((pageNum - 1) * pageSize)
|
||||
end := start + int64(pageSize) - 1
|
||||
|
||||
total, err := h.redis.ZCard(ctx, cacheKey).Result()
|
||||
if err != nil {
|
||||
log.Error("failed to get zcard result", "error", err)
|
||||
@@ -372,6 +371,10 @@ func (h *HistoryLogic) getCachedTxsInfo(ctx context.Context, cacheKey string, pa
|
||||
return nil, 0, false, nil
|
||||
}
|
||||
|
||||
if start >= total {
|
||||
return nil, 0, false, nil
|
||||
}
|
||||
|
||||
values, err := h.redis.ZRevRange(ctx, cacheKey, start, end).Result()
|
||||
if err != nil {
|
||||
log.Error("failed to get zrange result", "error", err)
|
||||
@@ -450,5 +453,6 @@ func (h *HistoryLogic) processAndCacheTxHistoryInfo(ctx context.Context, cacheKe
|
||||
log.Error("cache miss after write, expect hit", "cached key", cacheKey, "page", page, "page size", pageSize, "error", err)
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
return pagedTxs, total, nil
|
||||
}
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"time"
|
||||
|
||||
"github.com/scroll-tech/da-codec/encoding"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
@@ -252,6 +253,11 @@ func (e *L1EventParser) ParseL1BatchEventLogs(ctx context.Context, logs []types.
|
||||
// Key: commit transaction hash
|
||||
// Value: parent batch hashes (in order) for each processed CommitBatch event in the transaction
|
||||
txBlobIndexMap := make(map[common.Hash][]common.Hash)
|
||||
|
||||
// Cache for the previous transaction to avoid duplicate fetches
|
||||
var lastTxHash common.Hash
|
||||
var lastTx *types.Transaction
|
||||
|
||||
var l1BatchEvents []*orm.BatchEvent
|
||||
for _, vlog := range logs {
|
||||
switch vlog.Topics[0] {
|
||||
@@ -261,11 +267,28 @@ func (e *L1EventParser) ParseL1BatchEventLogs(ctx context.Context, logs []types.
|
||||
log.Error("Failed to unpack CommitBatch event", "err", err)
|
||||
return nil, err
|
||||
}
|
||||
commitTx, isPending, err := client.TransactionByHash(ctx, vlog.TxHash)
|
||||
if err != nil || isPending {
|
||||
log.Error("Failed to get commit batch tx or the tx is still pending", "err", err, "isPending", isPending)
|
||||
return nil, err
|
||||
|
||||
// Get transaction, reuse if it's the same as previous
|
||||
var commitTx *types.Transaction
|
||||
if lastTxHash == vlog.TxHash && lastTx != nil {
|
||||
commitTx = lastTx
|
||||
} else {
|
||||
log.Debug("Fetching commit batch transaction", "txHash", vlog.TxHash.String())
|
||||
|
||||
// Create 10-second timeout context for transaction fetch
|
||||
txCtx, txCancel := context.WithTimeout(ctx, 10*time.Second)
|
||||
fetchedTx, isPending, err := client.TransactionByHash(txCtx, vlog.TxHash)
|
||||
txCancel()
|
||||
|
||||
if err != nil || isPending {
|
||||
log.Error("Failed to get commit batch tx or the tx is still pending", "err", err, "isPending", isPending)
|
||||
return nil, err
|
||||
}
|
||||
commitTx = fetchedTx
|
||||
lastTxHash = vlog.TxHash
|
||||
lastTx = commitTx
|
||||
}
|
||||
|
||||
version, startBlock, endBlock, err := utils.GetBatchVersionAndBlockRangeFromCalldata(commitTx.Data())
|
||||
if err != nil {
|
||||
log.Error("Failed to get batch range from calldata", "hash", commitTx.Hash().String(), "height", vlog.BlockNumber)
|
||||
@@ -305,7 +328,13 @@ func (e *L1EventParser) ParseL1BatchEventLogs(ctx context.Context, logs []types.
|
||||
return nil, fmt.Errorf("batch hash mismatch for batch %d, expected: %s, got: %s", event.BatchIndex, event.BatchHash.String(), calculatedBatch.Hash().String())
|
||||
}
|
||||
|
||||
blocks, err := e.getBatchBlockRangeFromBlob(ctx, codec, blobVersionedHash, blockTimestampsMap[vlog.BlockNumber])
|
||||
log.Debug("Processing blob data", "blobVersionedHash", blobVersionedHash.String(), "batchIndex", event.BatchIndex.Uint64(), "currentIndex", currentIndex)
|
||||
|
||||
// Create 20-second timeout context for blob processing
|
||||
blobCtx, blobCancel := context.WithTimeout(ctx, 20*time.Second)
|
||||
blocks, err := e.getBatchBlockRangeFromBlob(blobCtx, codec, blobVersionedHash, blockTimestampsMap[vlog.BlockNumber])
|
||||
blobCancel()
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to process versioned blob, blobVersionedHash: %s, block number: %d, blob index: %d, err: %w",
|
||||
blobVersionedHash.String(), vlog.BlockNumber, currentIndex, err)
|
||||
|
||||
@@ -154,10 +154,10 @@ func (c *CrossMessage) GetL2UnclaimedWithdrawalsByAddress(ctx context.Context, s
|
||||
db := c.db.WithContext(ctx)
|
||||
db = db.Model(&CrossMessage{})
|
||||
db = db.Where("message_type = ?", btypes.MessageTypeL2SentMessage)
|
||||
db = db.Where("tx_status = ?", types.TxStatusTypeSent)
|
||||
db = db.Where("tx_status in (?)", []types.TxStatusType{types.TxStatusTypeSent, types.TxStatusTypeFailedRelayed, types.TxStatusTypeRelayTxReverted})
|
||||
db = db.Where("sender = ?", sender)
|
||||
db = db.Order("block_timestamp desc")
|
||||
db = db.Limit(500)
|
||||
db = db.Limit(10000)
|
||||
if err := db.Find(&messages).Error; err != nil {
|
||||
return nil, fmt.Errorf("failed to get L2 claimable withdrawal messages by sender address, sender: %v, error: %w", sender, err)
|
||||
}
|
||||
|
||||
30
build/dockerfiles/blob_uploader.Dockerfile
Normal file
30
build/dockerfiles/blob_uploader.Dockerfile
Normal file
@@ -0,0 +1,30 @@
|
||||
# Download Go dependencies
|
||||
FROM scrolltech/go-rust-builder:go-1.22-rust-nightly-2023-12-03 as base
|
||||
|
||||
WORKDIR /src
|
||||
COPY go.work* ./
|
||||
COPY ./rollup/go.* ./rollup/
|
||||
COPY ./common/go.* ./common/
|
||||
COPY ./coordinator/go.* ./coordinator/
|
||||
COPY ./database/go.* ./database/
|
||||
COPY ./tests/integration-test/go.* ./tests/integration-test/
|
||||
COPY ./bridge-history-api/go.* ./bridge-history-api/
|
||||
RUN go mod download -x
|
||||
|
||||
# Build blob_uploader
|
||||
FROM base as builder
|
||||
|
||||
RUN --mount=target=. \
|
||||
--mount=type=cache,target=/root/.cache/go-build \
|
||||
cd /src/rollup/cmd/blob_uploader/ && CGO_LDFLAGS="-ldl" go build -v -p 4 -o /bin/blob_uploader
|
||||
|
||||
# Pull blob_uploader into a second stage deploy ubuntu container
|
||||
FROM ubuntu:20.04
|
||||
|
||||
RUN apt update && apt install vim netcat-openbsd net-tools curl ca-certificates -y
|
||||
|
||||
ENV CGO_LDFLAGS="-ldl"
|
||||
|
||||
COPY --from=builder /bin/blob_uploader /bin/
|
||||
WORKDIR /app
|
||||
ENTRYPOINT ["blob_uploader"]
|
||||
5
build/dockerfiles/blob_uploader.Dockerfile.dockerignore
Normal file
5
build/dockerfiles/blob_uploader.Dockerfile.dockerignore
Normal file
@@ -0,0 +1,5 @@
|
||||
assets/
|
||||
docs/
|
||||
l2geth/
|
||||
rpc-gateway/
|
||||
*target/*
|
||||
@@ -1,26 +1,25 @@
|
||||
# Build libzkp dependency
|
||||
FROM scrolltech/go-rust-builder:go-1.22-rust-nightly-2023-12-03 as chef
|
||||
FROM scrolltech/go-rust-builder:go-1.22.12-rust-nightly-2025-02-14 as chef
|
||||
WORKDIR app
|
||||
|
||||
FROM chef as planner
|
||||
COPY ./common/libzkp/impl/ .
|
||||
COPY ./crates/ ./crates/
|
||||
COPY ./Cargo.* ./
|
||||
COPY ./rust-toolchain ./
|
||||
RUN cargo chef prepare --recipe-path recipe.json
|
||||
|
||||
FROM chef as zkp-builder
|
||||
COPY ./common/libzkp/impl/rust-toolchain ./
|
||||
COPY ./rust-toolchain ./
|
||||
COPY --from=planner /app/recipe.json recipe.json
|
||||
# run ./build/dockerfiles/coordinator-api/init-openvm.sh to get openvm-gpu
|
||||
COPY ./build/dockerfiles/coordinator-api/openvm-gpu /openvm-gpu
|
||||
COPY ./build/dockerfiles/coordinator-api/gitconfig /root/.gitconfig
|
||||
COPY ./build/dockerfiles/coordinator-api/config.toml /root/.cargo/config.toml
|
||||
RUN cargo chef cook --release --recipe-path recipe.json
|
||||
|
||||
COPY ./common/libzkp/impl .
|
||||
RUN cargo build --release
|
||||
COPY ./crates/ ./crates/
|
||||
COPY ./Cargo.* ./
|
||||
COPY .git .git
|
||||
RUN cargo build --release -p libzkp-c
|
||||
|
||||
|
||||
# Download Go dependencies
|
||||
FROM scrolltech/go-rust-builder:go-1.21-rust-nightly-2023-12-03 as base
|
||||
FROM scrolltech/go-rust-builder:go-1.22.12-rust-nightly-2025-02-14 as base
|
||||
WORKDIR /src
|
||||
COPY go.work* ./
|
||||
COPY ./rollup/go.* ./rollup/
|
||||
@@ -35,9 +34,9 @@ RUN go mod download -x
|
||||
# Build coordinator
|
||||
FROM base as builder
|
||||
COPY . .
|
||||
RUN cp -r ./common/libzkp/interface ./coordinator/internal/logic/verifier/lib
|
||||
COPY --from=zkp-builder /app/target/release/libzkp.so ./coordinator/internal/logic/verifier/lib/
|
||||
RUN cd ./coordinator && CGO_LDFLAGS="-Wl,--no-as-needed -ldl" make coordinator_api_skip_libzkp && mv ./build/bin/coordinator_api /bin/coordinator_api && mv internal/logic/verifier/lib /bin/
|
||||
COPY --from=zkp-builder /app/target/release/libzkp.so ./coordinator/internal/logic/libzkp/lib/
|
||||
RUN cd ./coordinator && CGO_LDFLAGS="-Wl,--no-as-needed -ldl" make coordinator_api && mv ./build/bin/coordinator_api /bin/coordinator_api
|
||||
RUN mv coordinator/internal/logic/libzkp/lib /bin/
|
||||
|
||||
# Pull coordinator into a second stage deploy ubuntu container
|
||||
FROM ubuntu:20.04
|
||||
|
||||
@@ -4,3 +4,5 @@ docs/
|
||||
l2geth/
|
||||
rpc-gateway/
|
||||
*target/*
|
||||
|
||||
permissionless-batches/conf/
|
||||
@@ -1,24 +0,0 @@
|
||||
# openvm
|
||||
# same order and features as zkvm-prover/Cargo.toml.gpu
|
||||
[patch."ssh://git@github.com/scroll-tech/openvm-gpu.git"]
|
||||
openvm = { path = "/openvm-gpu/crates/toolchain/openvm", default-features = false }
|
||||
openvm-algebra-complex-macros = { path = "/openvm-gpu/extensions/algebra/complex-macros", default-features = false }
|
||||
openvm-algebra-guest = { path = "/openvm-gpu/extensions/algebra/guest", default-features = false }
|
||||
openvm-bigint-guest = { path = "/openvm-gpu/extensions/bigint/guest", default-features = false }
|
||||
openvm-build = { path = "/openvm-gpu/crates/toolchain/build", default-features = false }
|
||||
openvm-circuit = { path = "/openvm-gpu/crates/vm", default-features = false }
|
||||
openvm-custom-insn = { path = "/openvm-gpu/crates/toolchain/custom_insn", default-features = false }
|
||||
openvm-continuations = { path = "/openvm-gpu/crates/continuations", default-features = false }
|
||||
openvm-ecc-guest = { path = "/openvm-gpu/extensions/ecc/guest", default-features = false }
|
||||
openvm-instructions ={ path = "/openvm-gpu/crates/toolchain/instructions", default-features = false }
|
||||
openvm-keccak256-guest = { path = "/openvm-gpu/extensions/keccak256/guest", default-features = false }
|
||||
openvm-native-circuit = { path = "/openvm-gpu/extensions/native/circuit", default-features = false }
|
||||
openvm-native-compiler = { path = "/openvm-gpu/extensions/native/compiler", default-features = false }
|
||||
openvm-native-recursion = { path = "/openvm-gpu/extensions/native/recursion", default-features = false }
|
||||
openvm-native-transpiler = { path = "/openvm-gpu/extensions/native/transpiler", default-features = false }
|
||||
openvm-pairing-guest = { path = "/openvm-gpu/extensions/pairing/guest", default-features = false }
|
||||
openvm-rv32im-guest = { path = "/openvm-gpu/extensions/rv32im/guest", default-features = false }
|
||||
openvm-rv32im-transpiler = { path = "/openvm-gpu/extensions/rv32im/transpiler", default-features = false }
|
||||
openvm-sdk = { path = "/openvm-gpu/crates/sdk", default-features = false, features = ["parallel", "bench-metrics"] }
|
||||
openvm-sha256-guest = { path = "/openvm-gpu/extensions/sha256/guest", default-features = false }
|
||||
openvm-transpiler = { path = "/openvm-gpu/crates/toolchain/transpiler", default-features = false }
|
||||
@@ -1,2 +0,0 @@
|
||||
[url "https://github.com/"]
|
||||
insteadOf = ssh://git@github.com/
|
||||
@@ -1,12 +0,0 @@
|
||||
#!/bin/bash
|
||||
set -uex
|
||||
|
||||
OPENVM_GPU_COMMIT=dfa10b4
|
||||
|
||||
DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)
|
||||
|
||||
# checkout openvm-gpu
|
||||
if [ ! -d $DIR/openvm-gpu ]; then
|
||||
git clone git@github.com:scroll-tech/openvm-gpu.git $DIR/openvm-gpu
|
||||
fi
|
||||
cd $DIR/openvm-gpu && git fetch && git checkout ${OPENVM_GPU_COMMIT}
|
||||
@@ -4,3 +4,5 @@ docs/
|
||||
l2geth/
|
||||
rpc-gateway/
|
||||
*target/*
|
||||
|
||||
permissionless-batches/conf/
|
||||
26
build/dockerfiles/coordinator-proxy.Dockerfile
Normal file
26
build/dockerfiles/coordinator-proxy.Dockerfile
Normal file
@@ -0,0 +1,26 @@
|
||||
# Download Go dependencies
|
||||
FROM scrolltech/go-rust-builder:go-1.22.12-rust-nightly-2025-02-14 as base
|
||||
WORKDIR /src
|
||||
COPY go.work* ./
|
||||
COPY ./rollup/go.* ./rollup/
|
||||
COPY ./common/go.* ./common/
|
||||
COPY ./coordinator/go.* ./coordinator/
|
||||
COPY ./database/go.* ./database/
|
||||
COPY ./tests/integration-test/go.* ./tests/integration-test/
|
||||
COPY ./bridge-history-api/go.* ./bridge-history-api/
|
||||
RUN go mod download -x
|
||||
|
||||
|
||||
# Build coordinator proxy
|
||||
FROM base as builder
|
||||
COPY . .
|
||||
RUN cd ./coordinator && CGO_LDFLAGS="-Wl,--no-as-needed -ldl" make coordinator_proxy && mv ./build/bin/coordinator_proxy /bin/coordinator_proxy
|
||||
|
||||
# Pull coordinator proxy into a second stage deploy ubuntu container
|
||||
FROM ubuntu:20.04
|
||||
ENV CGO_LDFLAGS="-Wl,--no-as-needed -ldl"
|
||||
RUN apt update && apt install vim netcat-openbsd net-tools curl jq -y
|
||||
COPY --from=builder /bin/coordinator_proxy /bin/
|
||||
RUN /bin/coordinator_proxy --version
|
||||
WORKDIR /app
|
||||
ENTRYPOINT ["/bin/coordinator_proxy"]
|
||||
@@ -0,0 +1,8 @@
|
||||
assets/
|
||||
contracts/
|
||||
docs/
|
||||
l2geth/
|
||||
rpc-gateway/
|
||||
*target/*
|
||||
|
||||
permissionless-batches/conf/
|
||||
@@ -4,3 +4,5 @@ docs/
|
||||
l2geth/
|
||||
rpc-gateway/
|
||||
*target/*
|
||||
|
||||
permissionless-batches/conf/
|
||||
@@ -1,5 +1,8 @@
|
||||
assets/
|
||||
contracts/
|
||||
docs/
|
||||
l2geth/
|
||||
rpc-gateway/
|
||||
*target/*
|
||||
*target/*
|
||||
|
||||
permissionless-batches/conf/
|
||||
@@ -18,6 +18,6 @@ RUN cd /src/zkvm-prover && make prover
|
||||
|
||||
FROM ubuntu:24.04 AS runtime
|
||||
|
||||
COPY --from=builder /src/zkvm-prover/target/release/prover /usr/local/bin/
|
||||
COPY --from=builder /src/target/release/prover /usr/local/bin/
|
||||
|
||||
ENTRYPOINT ["prover"]
|
||||
30
build/dockerfiles/recovery_permissionless_batches.Dockerfile
Normal file
30
build/dockerfiles/recovery_permissionless_batches.Dockerfile
Normal file
@@ -0,0 +1,30 @@
|
||||
# Download Go dependencies
|
||||
FROM scrolltech/go-rust-builder:go-1.21-rust-nightly-2023-12-03 as base
|
||||
|
||||
WORKDIR /src
|
||||
COPY go.work* ./
|
||||
COPY ./rollup/go.* ./rollup/
|
||||
COPY ./common/go.* ./common/
|
||||
COPY ./coordinator/go.* ./coordinator/
|
||||
COPY ./database/go.* ./database/
|
||||
COPY ./tests/integration-test/go.* ./tests/integration-test/
|
||||
COPY ./bridge-history-api/go.* ./bridge-history-api/
|
||||
RUN go mod download -x
|
||||
|
||||
# Build rollup_relayer
|
||||
FROM base as builder
|
||||
|
||||
RUN --mount=target=. \
|
||||
--mount=type=cache,target=/root/.cache/go-build \
|
||||
cd /src/rollup/cmd/permissionless_batches/ && CGO_LDFLAGS="-ldl" go build -v -p 4 -o /bin/rollup_relayer
|
||||
|
||||
# Pull rollup_relayer into a second stage deploy ubuntu container
|
||||
FROM ubuntu:20.04
|
||||
|
||||
RUN apt update && apt install vim netcat-openbsd net-tools curl ca-certificates -y
|
||||
|
||||
ENV CGO_LDFLAGS="-ldl"
|
||||
|
||||
COPY --from=builder /bin/rollup_relayer /bin/
|
||||
WORKDIR /app
|
||||
ENTRYPOINT ["rollup_relayer"]
|
||||
@@ -0,0 +1,8 @@
|
||||
assets/
|
||||
contracts/
|
||||
docs/
|
||||
l2geth/
|
||||
rpc-gateway/
|
||||
*target/*
|
||||
|
||||
permissionless-batches/conf/
|
||||
@@ -1,5 +1,8 @@
|
||||
assets/
|
||||
contracts/
|
||||
docs/
|
||||
l2geth/
|
||||
rpc-gateway/
|
||||
*target/*
|
||||
*target/*
|
||||
|
||||
permissionless-batches/conf/
|
||||
3
common/.gitignore
vendored
3
common/.gitignore
vendored
@@ -1,4 +1,3 @@
|
||||
/build/bin
|
||||
.idea
|
||||
libzkp/impl/target
|
||||
libzkp/interface/*.a
|
||||
libzkp
|
||||
@@ -4,5 +4,4 @@ test:
|
||||
go test -v -race -coverprofile=coverage.txt -covermode=atomic -p 1 $(PWD)/...
|
||||
|
||||
lint: ## Lint the files - used for CI
|
||||
GOBIN=$(PWD)/build/bin go run ../build/lint.go
|
||||
cd libzkp/impl && cargo fmt --all -- --check && cargo clippy --release -- -D warnings
|
||||
GOBIN=$(PWD)/build/bin go run ../build/lint.go
|
||||
@@ -41,7 +41,7 @@ func (g *gormLogger) Error(_ context.Context, msg string, data ...interface{}) {
|
||||
func (g *gormLogger) Trace(_ context.Context, begin time.Time, fc func() (string, int64), err error) {
|
||||
elapsed := time.Since(begin)
|
||||
sql, rowsAffected := fc()
|
||||
g.gethLogger.Debug("gorm", "line", utils.FileWithLineNum(), "cost", elapsed, "sql", sql, "rowsAffected", rowsAffected, "err", err)
|
||||
g.gethLogger.Trace("gorm", "line", utils.FileWithLineNum(), "cost", elapsed, "sql", sql, "rowsAffected", rowsAffected, "err", err)
|
||||
}
|
||||
|
||||
// InitDB init the db handler
|
||||
|
||||
@@ -12,10 +12,11 @@ require (
|
||||
github.com/gin-gonic/gin v1.9.1
|
||||
github.com/mattn/go-colorable v0.1.13
|
||||
github.com/mattn/go-isatty v0.0.20
|
||||
github.com/mitchellh/mapstructure v1.5.0
|
||||
github.com/modern-go/reflect2 v1.0.2
|
||||
github.com/orcaman/concurrent-map v1.0.0
|
||||
github.com/prometheus/client_golang v1.19.0
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20250305151038-478940e79601
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20251128092113-8629f088d78f
|
||||
github.com/stretchr/testify v1.10.0
|
||||
github.com/testcontainers/testcontainers-go v0.30.0
|
||||
github.com/testcontainers/testcontainers-go/modules/compose v0.30.0
|
||||
@@ -64,7 +65,7 @@ require (
|
||||
github.com/containerd/typeurl/v2 v2.1.1 // indirect
|
||||
github.com/cpuguy83/dockercfg v0.3.1 // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.4 // indirect
|
||||
github.com/crate-crypto/go-kzg-4844 v1.1.0 // indirect
|
||||
github.com/crate-crypto/go-eth-kzg v1.4.0 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/deckarep/golang-set v0.0.0-20180603214616-504e848d77ea // indirect
|
||||
github.com/distribution/reference v0.5.0 // indirect
|
||||
@@ -79,7 +80,7 @@ require (
|
||||
github.com/docker/go-units v0.5.0 // indirect
|
||||
github.com/edsrzf/mmap-go v1.0.0 // indirect
|
||||
github.com/emicklei/go-restful/v3 v3.10.1 // indirect
|
||||
github.com/ethereum/c-kzg-4844 v1.0.3 // indirect
|
||||
github.com/ethereum/c-kzg-4844/v2 v2.1.5 // indirect
|
||||
github.com/felixge/httpsnoop v1.0.4 // indirect
|
||||
github.com/fjl/memsize v0.0.2 // indirect
|
||||
github.com/fsnotify/fsevents v0.1.1 // indirect
|
||||
@@ -147,7 +148,6 @@ require (
|
||||
github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b // indirect
|
||||
github.com/miekg/pkcs11 v1.1.1 // indirect
|
||||
github.com/mitchellh/copystructure v1.2.0 // indirect
|
||||
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
||||
github.com/mitchellh/pointerstructure v1.2.0 // indirect
|
||||
github.com/mitchellh/reflectwalk v1.0.2 // indirect
|
||||
github.com/mmcloughlin/addchain v0.4.0 // indirect
|
||||
@@ -184,7 +184,7 @@ require (
|
||||
github.com/rjeczalik/notify v0.9.1 // indirect
|
||||
github.com/rs/cors v1.7.0 // indirect
|
||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||
github.com/scroll-tech/da-codec v0.1.3-0.20250310095435-012aaee6b435 // indirect
|
||||
github.com/scroll-tech/da-codec v0.10.0 // indirect
|
||||
github.com/scroll-tech/zktrie v0.8.4 // indirect
|
||||
github.com/secure-systems-lab/go-securesystemslib v0.4.0 // indirect
|
||||
github.com/serialx/hashring v0.0.0-20190422032157-8b2912629002 // indirect
|
||||
@@ -198,7 +198,7 @@ require (
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
github.com/spf13/viper v1.4.0 // indirect
|
||||
github.com/status-im/keycard-go v0.2.0 // indirect
|
||||
github.com/supranational/blst v0.3.13 // indirect
|
||||
github.com/supranational/blst v0.3.15 // indirect
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 // indirect
|
||||
github.com/theupdateframework/notary v0.7.0 // indirect
|
||||
github.com/tilt-dev/fsnotify v1.4.8-0.20220602155310-fff9c274a375 // indirect
|
||||
|
||||
@@ -155,8 +155,8 @@ github.com/cpuguy83/dockercfg v0.3.1 h1:/FpZ+JaygUR/lZP2NlFI2DVfrOEMAIKP5wWEJdoY
|
||||
github.com/cpuguy83/dockercfg v0.3.1/go.mod h1:sugsbF4//dDlL/i+S+rtpIWp+5h0BHJHfjj5/jFyUJc=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.4 h1:wfIWP927BUkWJb2NmU/kNDYIBTh/ziUX91+lVfRxZq4=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/crate-crypto/go-kzg-4844 v1.1.0 h1:EN/u9k2TF6OWSHrCCDBBU6GLNMq88OspHHlMnHfoyU4=
|
||||
github.com/crate-crypto/go-kzg-4844 v1.1.0/go.mod h1:JolLjpSff1tCCJKaJx4psrlEdlXuJEC996PL3tTAFks=
|
||||
github.com/crate-crypto/go-eth-kzg v1.4.0 h1:WzDGjHk4gFg6YzV0rJOAsTK4z3Qkz5jd4RE3DAvPFkg=
|
||||
github.com/crate-crypto/go-eth-kzg v1.4.0/go.mod h1:J9/u5sWfznSObptgfa92Jq8rTswn6ahQWEuiLHOjCUI=
|
||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/creack/pty v1.1.17/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4=
|
||||
github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY=
|
||||
@@ -214,8 +214,8 @@ github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7
|
||||
github.com/envoyproxy/protoc-gen-validate v1.0.2 h1:QkIBuU5k+x7/QXPvPPnWXWlCdaBFApVqftFV6k087DA=
|
||||
github.com/envoyproxy/protoc-gen-validate v1.0.2/go.mod h1:GpiZQP3dDbg4JouG/NNS7QWXpgx6x8QiMKdmN72jogE=
|
||||
github.com/erikstmartin/go-testdb v0.0.0-20160219214506-8d10e4a1bae5/go.mod h1:a2zkGnVExMxdzMo3M0Hi/3sEU+cWnZpSni0O6/Yb/P0=
|
||||
github.com/ethereum/c-kzg-4844 v1.0.3 h1:IEnbOHwjixW2cTvKRUlAAUOeleV7nNM/umJR+qy4WDs=
|
||||
github.com/ethereum/c-kzg-4844 v1.0.3/go.mod h1:VewdlzQmpT5QSrVhbBuGoCdFJkpaJlO1aQputP83wc0=
|
||||
github.com/ethereum/c-kzg-4844/v2 v2.1.5 h1:aVtoLK5xwJ6c5RiqO8g8ptJ5KU+2Hdquf6G3aXiHh5s=
|
||||
github.com/ethereum/c-kzg-4844/v2 v2.1.5/go.mod h1:u59hRTTah4Co6i9fDWtiCjTrblJv0UwsqZKCc0GfgUs=
|
||||
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
|
||||
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
|
||||
github.com/fjl/memsize v0.0.2 h1:27txuSD9or+NZlnOWdKUxeBzTAUkWCVh+4Gf2dWFOzA=
|
||||
@@ -636,10 +636,10 @@ github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik=
|
||||
github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
|
||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/scroll-tech/da-codec v0.1.3-0.20250310095435-012aaee6b435 h1:X9fkvjrYBY79lGgKEPpUhuiJ4vWpWwzOVw4H8CU8L54=
|
||||
github.com/scroll-tech/da-codec v0.1.3-0.20250310095435-012aaee6b435/go.mod h1:yhTS9OVC0xQGhg7DN5iV5KZJvnSIlFWAxDdp+6jxQtY=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20250305151038-478940e79601 h1:NEsjCG6uSvLRBlsP3+x6PL1kM+Ojs3g8UGotIPgJSz8=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20250305151038-478940e79601/go.mod h1:OblWe1+QrZwdpwO0j/LY3BSGuKT3YPUFBDQQgvvfStQ=
|
||||
github.com/scroll-tech/da-codec v0.10.0 h1:IPHxyTyXTWPV0Q+DZ08cod2fWkhUvrfysmj/VBpB+WU=
|
||||
github.com/scroll-tech/da-codec v0.10.0/go.mod h1:MBlIP4wCXPcUDZ/Ci2B7n/2IbVU1WBo9OTFTZ5ffE0U=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20251128092113-8629f088d78f h1:j6SjP98MoWFFX9TwB1/nFYEkayqHQsrtE66Ll2C+oT0=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20251128092113-8629f088d78f/go.mod h1:Aa/kD1XB+OV/7rRxMQrjcPCB4b0pKyLH0gsTrtuHi38=
|
||||
github.com/scroll-tech/zktrie v0.8.4 h1:UagmnZ4Z3ITCk+aUq9NQZJNAwnWl4gSxsLb2Nl7IgRE=
|
||||
github.com/scroll-tech/zktrie v0.8.4/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
|
||||
github.com/secure-systems-lab/go-securesystemslib v0.4.0 h1:b23VGrQhTA8cN2CbBw7/FulN9fTtqYUdS5+Oxzt+DUE=
|
||||
@@ -707,8 +707,8 @@ github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o
|
||||
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
|
||||
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/supranational/blst v0.3.13 h1:AYeSxdOMacwu7FBmpfloBz5pbFXDmJL33RuwnKtmTjk=
|
||||
github.com/supranational/blst v0.3.13/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw=
|
||||
github.com/supranational/blst v0.3.15 h1:rd9viN6tfARE5wv3KZJ9H8e1cg0jXW8syFCcsbHa76o=
|
||||
github.com/supranational/blst v0.3.15/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw=
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY=
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc=
|
||||
github.com/testcontainers/testcontainers-go v0.30.0 h1:jmn/XS22q4YRrcMwWg0pAwlClzs/abopbsBzrepyc4E=
|
||||
|
||||
7197
common/libzkp/impl/Cargo.lock
generated
7197
common/libzkp/impl/Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -1,34 +0,0 @@
|
||||
[package]
|
||||
name = "zkp"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
[lib]
|
||||
crate-type = ["cdylib"]
|
||||
|
||||
[patch.crates-io]
|
||||
# patched add rkyv support & MSRV 1.77
|
||||
alloy-primitives = { git = "https://github.com/scroll-tech/alloy-core", branch = "v0.8.21" }
|
||||
ruint = { git = "https://github.com/scroll-tech/uint.git", branch = "v1.12.3" }
|
||||
tiny-keccak = { git = "https://github.com/scroll-tech/tiny-keccak", branch = "scroll-patch-v2.0.2-openvm-v1.0.0-rc.1" }
|
||||
|
||||
[dependencies]
|
||||
euclid_prover = { git = "https://github.com/scroll-tech/zkvm-prover.git", tag = "v0.3.0", package = "scroll-zkvm-prover" }
|
||||
euclid_verifier = { git = "https://github.com/scroll-tech/zkvm-prover.git", tag = "v0.3.0", package = "scroll-zkvm-verifier" }
|
||||
|
||||
base64 = "0.13.0"
|
||||
env_logger = "0.9.0"
|
||||
libc = "0.2"
|
||||
log = "0.4"
|
||||
once_cell = "1.19"
|
||||
serde = "1.0"
|
||||
serde_derive = "1.0"
|
||||
serde_json = "1.0.66"
|
||||
anyhow = "1.0.86"
|
||||
|
||||
[profile.test]
|
||||
opt-level = 3
|
||||
|
||||
[profile.release]
|
||||
opt-level = 3
|
||||
@@ -1,11 +0,0 @@
|
||||
.PHONY: help fmt clippy test test-ci test-all
|
||||
|
||||
build:
|
||||
@cargo build --release
|
||||
|
||||
fmt:
|
||||
@cargo fmt --all -- --check
|
||||
|
||||
clippy:
|
||||
@cargo check --all-features
|
||||
@cargo clippy --release -- -D warnings
|
||||
@@ -1 +0,0 @@
|
||||
nightly-2024-12-06
|
||||
@@ -1,76 +0,0 @@
|
||||
mod utils;
|
||||
mod verifier;
|
||||
|
||||
use std::path::Path;
|
||||
|
||||
use crate::utils::{c_char_to_str, c_char_to_vec};
|
||||
use libc::c_char;
|
||||
use verifier::{TaskType, VerifierConfig};
|
||||
|
||||
/// # Safety
|
||||
#[no_mangle]
|
||||
pub unsafe extern "C" fn init(config: *const c_char) {
|
||||
let config_str = c_char_to_str(config);
|
||||
let verifier_config = serde_json::from_str::<VerifierConfig>(config_str).unwrap();
|
||||
verifier::init(verifier_config);
|
||||
}
|
||||
|
||||
/// # Safety
|
||||
#[no_mangle]
|
||||
pub unsafe extern "C" fn verify_chunk_proof(
|
||||
proof: *const c_char,
|
||||
fork_name: *const c_char,
|
||||
) -> c_char {
|
||||
verify_proof(proof, fork_name, TaskType::Chunk)
|
||||
}
|
||||
|
||||
fn verify_proof(proof: *const c_char, fork_name: *const c_char, task_type: TaskType) -> c_char {
|
||||
let fork_name_str = c_char_to_str(fork_name);
|
||||
let proof = c_char_to_vec(proof);
|
||||
let verifier = verifier::get_verifier(fork_name_str);
|
||||
|
||||
if let Err(e) = verifier {
|
||||
log::warn!("failed to get verifier, error: {:#}", e);
|
||||
return 0 as c_char;
|
||||
}
|
||||
match verifier.unwrap().verify(task_type, proof) {
|
||||
Err(e) => {
|
||||
log::error!("{:?} verify failed, error: {:#}", task_type, e);
|
||||
false as c_char
|
||||
}
|
||||
Ok(result) => result as c_char,
|
||||
}
|
||||
}
|
||||
|
||||
/// # Safety
|
||||
#[no_mangle]
|
||||
pub unsafe extern "C" fn verify_batch_proof(
|
||||
proof: *const c_char,
|
||||
fork_name: *const c_char,
|
||||
) -> c_char {
|
||||
verify_proof(proof, fork_name, TaskType::Batch)
|
||||
}
|
||||
|
||||
/// # Safety
|
||||
#[no_mangle]
|
||||
pub unsafe extern "C" fn verify_bundle_proof(
|
||||
proof: *const c_char,
|
||||
fork_name: *const c_char,
|
||||
) -> c_char {
|
||||
verify_proof(proof, fork_name, TaskType::Bundle)
|
||||
}
|
||||
|
||||
/// # Safety
|
||||
#[no_mangle]
|
||||
pub unsafe extern "C" fn dump_vk(fork_name: *const c_char, file: *const c_char) {
|
||||
_dump_vk(fork_name, file);
|
||||
}
|
||||
|
||||
fn _dump_vk(fork_name: *const c_char, file: *const c_char) {
|
||||
let fork_name_str = c_char_to_str(fork_name);
|
||||
let verifier = verifier::get_verifier(fork_name_str);
|
||||
|
||||
if let Ok(verifier) = verifier {
|
||||
verifier.as_ref().dump_vk(Path::new(c_char_to_str(file)));
|
||||
}
|
||||
}
|
||||
@@ -1,27 +0,0 @@
|
||||
use std::{
|
||||
ffi::CStr,
|
||||
os::raw::c_char,
|
||||
panic::{catch_unwind, AssertUnwindSafe},
|
||||
};
|
||||
|
||||
pub(crate) fn c_char_to_str(c: *const c_char) -> &'static str {
|
||||
let cstr = unsafe { CStr::from_ptr(c) };
|
||||
cstr.to_str().unwrap()
|
||||
}
|
||||
|
||||
pub(crate) fn c_char_to_vec(c: *const c_char) -> Vec<u8> {
|
||||
let cstr = unsafe { CStr::from_ptr(c) };
|
||||
cstr.to_bytes().to_vec()
|
||||
}
|
||||
|
||||
pub(crate) fn panic_catch<F: FnOnce() -> R, R>(f: F) -> Result<R, String> {
|
||||
catch_unwind(AssertUnwindSafe(f)).map_err(|err| {
|
||||
if let Some(s) = err.downcast_ref::<String>() {
|
||||
s.to_string()
|
||||
} else if let Some(s) = err.downcast_ref::<&str>() {
|
||||
s.to_string()
|
||||
} else {
|
||||
format!("unable to get panic info {err:?}")
|
||||
}
|
||||
})
|
||||
}
|
||||
@@ -1,88 +0,0 @@
|
||||
#![allow(static_mut_refs)]
|
||||
|
||||
mod euclid;
|
||||
mod euclidv2;
|
||||
|
||||
use anyhow::{bail, Result};
|
||||
use euclid::EuclidVerifier;
|
||||
use euclidv2::EuclidV2Verifier;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::{cell::OnceCell, path::Path, rc::Rc};
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq)]
|
||||
pub enum TaskType {
|
||||
Chunk,
|
||||
Batch,
|
||||
Bundle,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct VKDump {
|
||||
pub chunk_vk: String,
|
||||
pub batch_vk: String,
|
||||
pub bundle_vk: String,
|
||||
}
|
||||
|
||||
pub trait ProofVerifier {
|
||||
fn verify(&self, task_type: TaskType, proof: Vec<u8>) -> Result<bool>;
|
||||
fn dump_vk(&self, file: &Path);
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct CircuitConfig {
|
||||
pub fork_name: String,
|
||||
pub params_path: String,
|
||||
pub assets_path: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct VerifierConfig {
|
||||
pub low_version_circuit: CircuitConfig,
|
||||
pub high_version_circuit: CircuitConfig,
|
||||
}
|
||||
|
||||
type HardForkName = String;
|
||||
|
||||
struct VerifierPair(HardForkName, Rc<Box<dyn ProofVerifier>>);
|
||||
|
||||
static mut VERIFIER_LOW: OnceCell<VerifierPair> = OnceCell::new();
|
||||
static mut VERIFIER_HIGH: OnceCell<VerifierPair> = OnceCell::new();
|
||||
|
||||
pub fn init(config: VerifierConfig) {
|
||||
let verifier = EuclidVerifier::new(&config.high_version_circuit.assets_path);
|
||||
unsafe {
|
||||
VERIFIER_LOW
|
||||
.set(VerifierPair(
|
||||
"euclid".to_string(),
|
||||
Rc::new(Box::new(verifier)),
|
||||
))
|
||||
.unwrap_unchecked();
|
||||
}
|
||||
|
||||
let verifier = EuclidV2Verifier::new(&config.high_version_circuit.assets_path);
|
||||
unsafe {
|
||||
VERIFIER_HIGH
|
||||
.set(VerifierPair(
|
||||
"euclidV2".to_string(),
|
||||
Rc::new(Box::new(verifier)),
|
||||
))
|
||||
.unwrap_unchecked();
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_verifier(fork_name: &str) -> Result<Rc<Box<dyn ProofVerifier>>> {
|
||||
unsafe {
|
||||
if let Some(verifier) = VERIFIER_LOW.get() {
|
||||
if verifier.0 == fork_name {
|
||||
return Ok(verifier.1.clone());
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(verifier) = VERIFIER_HIGH.get() {
|
||||
if verifier.0 == fork_name {
|
||||
return Ok(verifier.1.clone());
|
||||
}
|
||||
}
|
||||
}
|
||||
bail!("failed to get verifier, key not found, {}", fork_name)
|
||||
}
|
||||
@@ -1,65 +0,0 @@
|
||||
use super::{ProofVerifier, TaskType, VKDump};
|
||||
|
||||
use anyhow::Result;
|
||||
|
||||
use crate::utils::panic_catch;
|
||||
use euclid_prover::{BatchProof, BundleProof, ChunkProof};
|
||||
use euclid_verifier::verifier::{BatchVerifier, BundleVerifierEuclidV1, ChunkVerifier};
|
||||
use std::{fs::File, path::Path};
|
||||
|
||||
pub struct EuclidVerifier {
|
||||
chunk_verifier: ChunkVerifier,
|
||||
batch_verifier: BatchVerifier,
|
||||
bundle_verifier: BundleVerifierEuclidV1,
|
||||
}
|
||||
|
||||
impl EuclidVerifier {
|
||||
pub fn new(assets_dir: &str) -> Self {
|
||||
let verifier_bin = Path::new(assets_dir).join("verifier.bin");
|
||||
let config = Path::new(assets_dir).join("root-verifier-vm-config");
|
||||
let exe = Path::new(assets_dir).join("root-verifier-committed-exe");
|
||||
|
||||
Self {
|
||||
chunk_verifier: ChunkVerifier::setup(&config, &exe, &verifier_bin)
|
||||
.expect("Setting up chunk verifier"),
|
||||
batch_verifier: BatchVerifier::setup(&config, &exe, &verifier_bin)
|
||||
.expect("Setting up batch verifier"),
|
||||
bundle_verifier: BundleVerifierEuclidV1::setup(&config, &exe, &verifier_bin)
|
||||
.expect("Setting up bundle verifier"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl ProofVerifier for EuclidVerifier {
|
||||
fn verify(&self, task_type: super::TaskType, proof: Vec<u8>) -> Result<bool> {
|
||||
panic_catch(|| match task_type {
|
||||
TaskType::Chunk => {
|
||||
let proof = serde_json::from_slice::<ChunkProof>(proof.as_slice()).unwrap();
|
||||
self.chunk_verifier
|
||||
.verify_proof(proof.proof.as_root_proof().unwrap())
|
||||
}
|
||||
TaskType::Batch => {
|
||||
let proof = serde_json::from_slice::<BatchProof>(proof.as_slice()).unwrap();
|
||||
self.batch_verifier
|
||||
.verify_proof(proof.proof.as_root_proof().unwrap())
|
||||
}
|
||||
TaskType::Bundle => {
|
||||
let proof = serde_json::from_slice::<BundleProof>(proof.as_slice()).unwrap();
|
||||
self.bundle_verifier
|
||||
.verify_proof_evm(&proof.proof.as_evm_proof().unwrap())
|
||||
}
|
||||
})
|
||||
.map_err(|err_str: String| anyhow::anyhow!(err_str))
|
||||
}
|
||||
|
||||
fn dump_vk(&self, file: &Path) {
|
||||
let f = File::create(file).expect("Failed to open file to dump VK");
|
||||
|
||||
let dump = VKDump {
|
||||
chunk_vk: base64::encode(self.chunk_verifier.get_app_vk()),
|
||||
batch_vk: base64::encode(self.batch_verifier.get_app_vk()),
|
||||
bundle_vk: base64::encode(self.bundle_verifier.get_app_vk()),
|
||||
};
|
||||
serde_json::to_writer(f, &dump).expect("Failed to dump VK");
|
||||
}
|
||||
}
|
||||
@@ -1,65 +0,0 @@
|
||||
use super::{ProofVerifier, TaskType, VKDump};
|
||||
|
||||
use anyhow::Result;
|
||||
|
||||
use crate::utils::panic_catch;
|
||||
use euclid_prover::{BatchProof, BundleProof, ChunkProof};
|
||||
use euclid_verifier::verifier::{BatchVerifier, BundleVerifierEuclidV2, ChunkVerifier};
|
||||
use std::{fs::File, path::Path};
|
||||
|
||||
pub struct EuclidV2Verifier {
|
||||
chunk_verifier: ChunkVerifier,
|
||||
batch_verifier: BatchVerifier,
|
||||
bundle_verifier: BundleVerifierEuclidV2,
|
||||
}
|
||||
|
||||
impl EuclidV2Verifier {
|
||||
pub fn new(assets_dir: &str) -> Self {
|
||||
let verifier_bin = Path::new(assets_dir).join("verifier.bin");
|
||||
let config = Path::new(assets_dir).join("root-verifier-vm-config");
|
||||
let exe = Path::new(assets_dir).join("root-verifier-committed-exe");
|
||||
|
||||
Self {
|
||||
chunk_verifier: ChunkVerifier::setup(&config, &exe, &verifier_bin)
|
||||
.expect("Setting up chunk verifier"),
|
||||
batch_verifier: BatchVerifier::setup(&config, &exe, &verifier_bin)
|
||||
.expect("Setting up batch verifier"),
|
||||
bundle_verifier: BundleVerifierEuclidV2::setup(&config, &exe, &verifier_bin)
|
||||
.expect("Setting up bundle verifier"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl ProofVerifier for EuclidV2Verifier {
|
||||
fn verify(&self, task_type: super::TaskType, proof: Vec<u8>) -> Result<bool> {
|
||||
panic_catch(|| match task_type {
|
||||
TaskType::Chunk => {
|
||||
let proof = serde_json::from_slice::<ChunkProof>(proof.as_slice()).unwrap();
|
||||
self.chunk_verifier
|
||||
.verify_proof(proof.proof.as_root_proof().unwrap())
|
||||
}
|
||||
TaskType::Batch => {
|
||||
let proof = serde_json::from_slice::<BatchProof>(proof.as_slice()).unwrap();
|
||||
self.batch_verifier
|
||||
.verify_proof(proof.proof.as_root_proof().unwrap())
|
||||
}
|
||||
TaskType::Bundle => {
|
||||
let proof = serde_json::from_slice::<BundleProof>(proof.as_slice()).unwrap();
|
||||
self.bundle_verifier
|
||||
.verify_proof_evm(&proof.proof.as_evm_proof().unwrap())
|
||||
}
|
||||
})
|
||||
.map_err(|err_str: String| anyhow::anyhow!(err_str))
|
||||
}
|
||||
|
||||
fn dump_vk(&self, file: &Path) {
|
||||
let f = File::create(file).expect("Failed to open file to dump VK");
|
||||
|
||||
let dump = VKDump {
|
||||
chunk_vk: base64::encode(self.chunk_verifier.get_app_vk()),
|
||||
batch_vk: base64::encode(self.batch_verifier.get_app_vk()),
|
||||
bundle_vk: base64::encode(self.bundle_verifier.get_app_vk()),
|
||||
};
|
||||
serde_json::to_writer(f, &dump).expect("Failed to dump VK");
|
||||
}
|
||||
}
|
||||
@@ -1,12 +0,0 @@
|
||||
// BatchVerifier is used to:
|
||||
// - Verify a batch proof
|
||||
// - Verify a bundle proof
|
||||
void init(char* config);
|
||||
|
||||
char verify_batch_proof(char* proof, char* fork_name);
|
||||
|
||||
char verify_bundle_proof(char* proof, char* fork_name);
|
||||
|
||||
char verify_chunk_proof(char* proof, char* fork_name);
|
||||
|
||||
void dump_vk(char* fork_name, char* file);
|
||||
@@ -34,7 +34,7 @@ services:
|
||||
|
||||
# Sets up the genesis configuration for the go-ethereum client from a JSON file.
|
||||
geth-genesis:
|
||||
image: "ethereum/client-go:v1.13.14"
|
||||
image: "ethereum/client-go:v1.14.0"
|
||||
command: --datadir=/data/execution init /data/execution/genesis.json
|
||||
volumes:
|
||||
- data:/data
|
||||
@@ -80,7 +80,7 @@ services:
|
||||
# Runs the go-ethereum execution client with the specified, unlocked account and necessary
|
||||
# APIs to allow for proof-of-stake consensus via Prysm.
|
||||
geth:
|
||||
image: "ethereum/client-go:v1.13.14"
|
||||
image: "ethereum/client-go:v1.14.0"
|
||||
command:
|
||||
- --http
|
||||
- --http.api=eth,net,web3
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
FROM ethereum/client-go:v1.13.14
|
||||
FROM ethereum/client-go:v1.14.0
|
||||
|
||||
COPY password /l1geth/
|
||||
COPY genesis.json /l1geth/
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/scroll-tech/go-ethereum/rpc"
|
||||
"github.com/testcontainers/testcontainers-go"
|
||||
"github.com/testcontainers/testcontainers-go/modules/compose"
|
||||
"github.com/testcontainers/testcontainers-go/modules/postgres"
|
||||
@@ -166,13 +167,13 @@ func (t *TestcontainerApps) GetPoSL1EndPoint() (string, error) {
|
||||
return contrainer.PortEndpoint(context.Background(), "8545/tcp", "http")
|
||||
}
|
||||
|
||||
// GetPoSL1Client returns a ethclient by dialing running PoS L1 client
|
||||
func (t *TestcontainerApps) GetPoSL1Client() (*ethclient.Client, error) {
|
||||
// GetPoSL1Client returns a raw rpc client by dialing the L1 node
|
||||
func (t *TestcontainerApps) GetPoSL1Client() (*rpc.Client, error) {
|
||||
endpoint, err := t.GetPoSL1EndPoint()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ethclient.Dial(endpoint)
|
||||
return rpc.Dial(endpoint)
|
||||
}
|
||||
|
||||
// GetDBEndPoint returns the endpoint of the running postgres container
|
||||
@@ -220,11 +221,20 @@ func (t *TestcontainerApps) GetGormDBClient() (*gorm.DB, error) {
|
||||
|
||||
// GetL2GethClient returns a ethclient by dialing running L2Geth
|
||||
func (t *TestcontainerApps) GetL2GethClient() (*ethclient.Client, error) {
|
||||
rpcCli, err := t.GetL2Client()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ethclient.NewClient(rpcCli), nil
|
||||
}
|
||||
|
||||
// GetL2GethClient returns a rpc client by dialing running L2Geth
|
||||
func (t *TestcontainerApps) GetL2Client() (*rpc.Client, error) {
|
||||
endpoint, err := t.GetL2GethEndPoint()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
client, err := ethclient.Dial(endpoint)
|
||||
client, err := rpc.Dial(endpoint)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -3,7 +3,6 @@ package testcontainers
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"gorm.io/gorm"
|
||||
)
|
||||
@@ -14,7 +13,6 @@ func TestNewTestcontainerApps(t *testing.T) {
|
||||
err error
|
||||
endpoint string
|
||||
gormDBclient *gorm.DB
|
||||
ethclient *ethclient.Client
|
||||
)
|
||||
|
||||
testApps := NewTestcontainerApps()
|
||||
@@ -32,17 +30,17 @@ func TestNewTestcontainerApps(t *testing.T) {
|
||||
endpoint, err = testApps.GetL2GethEndPoint()
|
||||
assert.NoError(t, err)
|
||||
assert.NotEmpty(t, endpoint)
|
||||
ethclient, err = testApps.GetL2GethClient()
|
||||
l2RawClient, err := testApps.GetL2Client()
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, ethclient)
|
||||
assert.NotNil(t, l2RawClient)
|
||||
|
||||
assert.NoError(t, testApps.StartPoSL1Container())
|
||||
endpoint, err = testApps.GetPoSL1EndPoint()
|
||||
assert.NoError(t, err)
|
||||
assert.NotEmpty(t, endpoint)
|
||||
ethclient, err = testApps.GetPoSL1Client()
|
||||
l1RawClient, err := testApps.GetPoSL1Client()
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, ethclient)
|
||||
assert.NotNil(t, l1RawClient)
|
||||
|
||||
assert.NoError(t, testApps.StartWeb3SignerContainer(1))
|
||||
endpoint, err = testApps.GetWeb3SignerEndpoint()
|
||||
|
||||
4
common/testdata/blobdata.json
vendored
Normal file
4
common/testdata/blobdata.json
vendored
Normal file
File diff suppressed because one or more lines are too long
@@ -326,3 +326,53 @@ func (s TxStatus) String() string {
|
||||
return fmt.Sprintf("Unknown TxStatus (%d)", int32(s))
|
||||
}
|
||||
}
|
||||
|
||||
// BlobUploadStatus represents the status of a blob upload
|
||||
type BlobUploadStatus int
|
||||
|
||||
const (
|
||||
// BlobUploadStatusUndefined indicates an undefined status
|
||||
BlobUploadStatusUndefined BlobUploadStatus = iota
|
||||
// BlobUploadStatusPending indicates a pending upload status
|
||||
BlobUploadStatusPending
|
||||
// BlobUploadStatusUploaded indicates a successful upload status
|
||||
BlobUploadStatusUploaded
|
||||
// BlobUploadStatusFailed indicates a failed upload status
|
||||
BlobUploadStatusFailed
|
||||
)
|
||||
|
||||
func (s BlobUploadStatus) String() string {
|
||||
switch s {
|
||||
case BlobUploadStatusPending:
|
||||
return "BlobUploadStatusPending"
|
||||
case BlobUploadStatusUploaded:
|
||||
return "BlobUploadStatusUploaded"
|
||||
case BlobUploadStatusFailed:
|
||||
return "BlobUploadStatusFailed"
|
||||
default:
|
||||
return fmt.Sprintf("Unknown BlobUploadStatus (%d)", int32(s))
|
||||
}
|
||||
}
|
||||
|
||||
// BlobStoragePlatform represents the platform a blob upload to
|
||||
type BlobStoragePlatform int
|
||||
|
||||
const (
|
||||
// BlobStoragePlatformUndefined indicates an undefined platform
|
||||
BlobStoragePlatformUndefined BlobStoragePlatform = iota
|
||||
// BlobStoragePlatformS3 represents AWS S3
|
||||
BlobStoragePlatformS3
|
||||
// BlobStoragePlatformArweave represents storage blockchain Arweave
|
||||
BlobStoragePlatformArweave
|
||||
)
|
||||
|
||||
func (s BlobStoragePlatform) String() string {
|
||||
switch s {
|
||||
case BlobStoragePlatformS3:
|
||||
return "BlobStoragePlatformS3"
|
||||
case BlobStoragePlatformArweave:
|
||||
return "BlobStoragePlatformArweave"
|
||||
default:
|
||||
return fmt.Sprintf("Unknown BlobStoragePlatform (%d)", int32(s))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -10,13 +10,6 @@ import (
|
||||
"github.com/scroll-tech/go-ethereum/common/hexutil"
|
||||
)
|
||||
|
||||
const (
|
||||
EuclidFork = "euclid"
|
||||
EuclidV2Fork = "euclidV2"
|
||||
|
||||
EuclidV2ForkNameForProver = "euclidv2"
|
||||
)
|
||||
|
||||
// ProofType represents the type of task.
|
||||
type ProofType uint8
|
||||
|
||||
@@ -46,10 +39,12 @@ const (
|
||||
|
||||
// ChunkTaskDetail is a type containing ChunkTask detail for chunk task.
|
||||
type ChunkTaskDetail struct {
|
||||
// use one of the string of EuclidFork / EuclidV2Fork
|
||||
Version uint8 `json:"version"`
|
||||
// use one of the string of "euclidv1" / "euclidv2"
|
||||
ForkName string `json:"fork_name"`
|
||||
BlockHashes []common.Hash `json:"block_hashes"`
|
||||
PrevMsgQueueHash common.Hash `json:"prev_msg_queue_hash"`
|
||||
PostMsgQueueHash common.Hash `json:"post_msg_queue_hash"`
|
||||
}
|
||||
|
||||
// it is a hex encoded big with fixed length on 48 bytes
|
||||
@@ -97,40 +92,59 @@ func (e *Byte48) UnmarshalJSON(input []byte) error {
|
||||
|
||||
// BatchTaskDetail is a type containing BatchTask detail.
|
||||
type BatchTaskDetail struct {
|
||||
// use one of the string of EuclidFork / EuclidV2Fork
|
||||
ForkName string `json:"fork_name"`
|
||||
ChunkInfos []*ChunkInfo `json:"chunk_infos"`
|
||||
ChunkProofs []*OpenVMChunkProof `json:"chunk_proofs"`
|
||||
BatchHeader interface{} `json:"batch_header"`
|
||||
BlobBytes []byte `json:"blob_bytes"`
|
||||
KzgProof Byte48 `json:"kzg_proof,omitempty"`
|
||||
KzgCommitment Byte48 `json:"kzg_commitment,omitempty"`
|
||||
ChallengeDigest common.Hash `json:"challenge_digest,omitempty"`
|
||||
Version uint8 `json:"version"`
|
||||
// use one of the string of "euclidv1" / "euclidv2"
|
||||
ForkName string `json:"fork_name"`
|
||||
ChunkProofs []*OpenVMChunkProof `json:"chunk_proofs"`
|
||||
BatchHeader interface{} `json:"batch_header"`
|
||||
BlobBytes []byte `json:"blob_bytes"`
|
||||
KzgProof *Byte48 `json:"kzg_proof,omitempty"`
|
||||
KzgCommitment *Byte48 `json:"kzg_commitment,omitempty"`
|
||||
// ChallengeDigest should be a common.Hash type if it is not nil
|
||||
ChallengeDigest interface{} `json:"challenge_digest,omitempty"`
|
||||
}
|
||||
|
||||
// BundleTaskDetail consists of all the information required to describe the task to generate a proof for a bundle of batches.
|
||||
type BundleTaskDetail struct {
|
||||
// use one of the string of EuclidFork / EuclidV2Fork
|
||||
Version uint8 `json:"version"`
|
||||
// use one of the string of "euclidv1" / "euclidv2"
|
||||
ForkName string `json:"fork_name"`
|
||||
BatchProofs []*OpenVMBatchProof `json:"batch_proofs"`
|
||||
BundleInfo *OpenVMBundleInfo `json:"bundle_info,omitempty"`
|
||||
}
|
||||
|
||||
type RawBytes []byte
|
||||
|
||||
func (r RawBytes) MarshalJSON() ([]byte, error) {
|
||||
if r == nil {
|
||||
return []byte("null"), nil
|
||||
}
|
||||
// Marshal the []byte as a JSON array of numbers
|
||||
rn := make([]uint16, len(r))
|
||||
for i := range r {
|
||||
rn[i] = uint16(r[i])
|
||||
}
|
||||
return json.Marshal(rn)
|
||||
}
|
||||
|
||||
// ChunkInfo is for calculating pi_hash for chunk
|
||||
type ChunkInfo struct {
|
||||
ChainID uint64 `json:"chain_id"`
|
||||
PrevStateRoot common.Hash `json:"prev_state_root"`
|
||||
PostStateRoot common.Hash `json:"post_state_root"`
|
||||
WithdrawRoot common.Hash `json:"withdraw_root"`
|
||||
DataHash common.Hash `json:"data_hash"`
|
||||
IsPadding bool `json:"is_padding"`
|
||||
TxBytes []byte `json:"tx_bytes"`
|
||||
ChainID uint64 `json:"chain_id"`
|
||||
PrevStateRoot common.Hash `json:"prev_state_root"`
|
||||
PostStateRoot common.Hash `json:"post_state_root"`
|
||||
WithdrawRoot common.Hash `json:"withdraw_root"`
|
||||
DataHash common.Hash `json:"data_hash"`
|
||||
IsPadding bool `json:"is_padding"`
|
||||
// TxBytes []byte `json:"tx_bytes"`
|
||||
TxBytesHash common.Hash `json:"tx_data_digest"`
|
||||
PrevMsgQueueHash common.Hash `json:"prev_msg_queue_hash"`
|
||||
PostMsgQueueHash common.Hash `json:"post_msg_queue_hash"`
|
||||
TxDataLength uint64 `json:"tx_data_length"`
|
||||
InitialBlockNumber uint64 `json:"initial_block_number"`
|
||||
BlockCtxs []BlockContextV2 `json:"block_ctxs"`
|
||||
PrevBlockhash common.Hash `json:"prev_blockhash"`
|
||||
PostBlockhash common.Hash `json:"post_blockhash"`
|
||||
EncryptionKey RawBytes `json:"encryption_key"`
|
||||
}
|
||||
|
||||
// BlockContextV2 is the block context for euclid v2
|
||||
@@ -142,10 +156,18 @@ type BlockContextV2 struct {
|
||||
NumL1Msgs uint16 `json:"num_l1_msgs"`
|
||||
}
|
||||
|
||||
// Metric data carried with OpenVMProof
|
||||
type OpenVMProofStat struct {
|
||||
TotalCycle uint64 `json:"total_cycles"`
|
||||
ExecutionTimeMills uint64 `json:"execution_time_mills"`
|
||||
ProvingTimeMills uint64 `json:"proving_time_mills"`
|
||||
}
|
||||
|
||||
// Proof for flatten VM proof
|
||||
type OpenVMProof struct {
|
||||
Proof []byte `json:"proofs"`
|
||||
PublicValues []byte `json:"public_values"`
|
||||
Proof []byte `json:"proofs"`
|
||||
PublicValues []byte `json:"public_values"`
|
||||
Stat *OpenVMProofStat `json:"stat,omitempty"`
|
||||
}
|
||||
|
||||
// Proof for flatten EVM proof
|
||||
@@ -157,7 +179,8 @@ type OpenVMEvmProof struct {
|
||||
// OpenVMChunkProof includes the proof info that are required for chunk verification and rollup.
|
||||
type OpenVMChunkProof struct {
|
||||
MetaData struct {
|
||||
ChunkInfo *ChunkInfo `json:"chunk_info"`
|
||||
ChunkInfo *ChunkInfo `json:"chunk_info"`
|
||||
TotalGasUsed uint64 `json:"chunk_total_gas"`
|
||||
} `json:"metadata"`
|
||||
|
||||
VmProof *OpenVMProof `json:"proof"`
|
||||
@@ -184,6 +207,7 @@ type OpenVMBatchInfo struct {
|
||||
ChainID uint64 `json:"chain_id"`
|
||||
PrevMsgQueueHash common.Hash `json:"prev_msg_queue_hash"`
|
||||
PostMsgQueueHash common.Hash `json:"post_msg_queue_hash"`
|
||||
EncryptionKey RawBytes `json:"encryption_key"`
|
||||
}
|
||||
|
||||
// BatchProof includes the proof info that are required for batch verification and rollup.
|
||||
@@ -244,6 +268,7 @@ type OpenVMBundleInfo struct {
|
||||
PrevBatchHash common.Hash `json:"prev_batch_hash"`
|
||||
BatchHash common.Hash `json:"batch_hash"`
|
||||
MsgQueueHash common.Hash `json:"msg_queue_hash"`
|
||||
EncryptionKey RawBytes `json:"encryption_key"`
|
||||
}
|
||||
|
||||
// OpenVMBundleProof includes the proof info that are required for verification of a bundle of batch proofs.
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"net/http"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/mitchellh/mapstructure"
|
||||
)
|
||||
|
||||
// Response the response schema
|
||||
@@ -13,6 +14,19 @@ type Response struct {
|
||||
Data interface{} `json:"data"`
|
||||
}
|
||||
|
||||
func (resp *Response) DecodeData(out interface{}) error {
|
||||
// Decode generically unmarshaled JSON (map[string]any, []any) into a typed struct
|
||||
// honoring `json` tags and allowing weak type conversions.
|
||||
dec, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{
|
||||
TagName: "json",
|
||||
Result: out,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return dec.Decode(resp.Data)
|
||||
}
|
||||
|
||||
// RenderJSON renders response with json
|
||||
func RenderJSON(ctx *gin.Context, errCode int, err error, data interface{}) {
|
||||
var errMsg string
|
||||
|
||||
23
common/utils/blob.go
Normal file
23
common/utils/blob.go
Normal file
@@ -0,0 +1,23 @@
|
||||
package utils
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"fmt"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/crypto/kzg4844"
|
||||
)
|
||||
|
||||
// CalculateVersionedBlobHash calculate the kzg4844 versioned blob hash from a blob
|
||||
func CalculateVersionedBlobHash(blob kzg4844.Blob) ([32]byte, error) {
|
||||
// calculate kzg4844 commitment from blob
|
||||
commit, err := kzg4844.BlobToCommitment(&blob)
|
||||
if err != nil {
|
||||
return [32]byte{}, fmt.Errorf("failed to get blob commitment, err: %w", err)
|
||||
}
|
||||
|
||||
// calculate kzg4844 versioned blob hash from blob commitment
|
||||
hasher := sha256.New()
|
||||
vh := kzg4844.CalcBlobHashV1(hasher, &commit)
|
||||
|
||||
return vh, nil
|
||||
}
|
||||
51
common/utils/blob_test.go
Normal file
51
common/utils/blob_test.go
Normal file
@@ -0,0 +1,51 @@
|
||||
package utils
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/crypto/kzg4844"
|
||||
)
|
||||
|
||||
type BlobData struct {
|
||||
VersionedBlobHash string `json:"versionedBlobHash"`
|
||||
BlobData string `json:"blobData"`
|
||||
}
|
||||
|
||||
// TestCalculateVersionedBlobHash tests the CalculateVersionedBlobHash function
|
||||
func TestCalculateVersionedBlobHash(t *testing.T) {
|
||||
// Read the test data
|
||||
data, err := os.ReadFile("../testdata/blobdata.json")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to read blobdata.json: %v", err)
|
||||
}
|
||||
|
||||
var blobData BlobData
|
||||
if err := json.Unmarshal(data, &blobData); err != nil {
|
||||
t.Fatalf("Failed to parse blobdata.json: %v", err)
|
||||
}
|
||||
|
||||
blobBytes, err := hex.DecodeString(blobData.BlobData)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to decode blob data: %v", err)
|
||||
}
|
||||
|
||||
// Convert []byte to kzg4844.Blob
|
||||
var blob kzg4844.Blob
|
||||
copy(blob[:], blobBytes)
|
||||
|
||||
// Calculate the hash
|
||||
calculatedHashBytes, err := CalculateVersionedBlobHash(blob)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to calculate versioned blob hash: %v", err)
|
||||
}
|
||||
|
||||
calculatedHash := hex.EncodeToString(calculatedHashBytes[:])
|
||||
|
||||
if calculatedHash != blobData.VersionedBlobHash {
|
||||
t.Fatalf("Hash mismatch: got %s, want %s", calculatedHash, blobData.VersionedBlobHash)
|
||||
}
|
||||
|
||||
}
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
"runtime/debug"
|
||||
)
|
||||
|
||||
var tag = "v4.5.11"
|
||||
var tag = "v4.7.10"
|
||||
|
||||
var commit = func() string {
|
||||
if info, ok := debug.ReadBuildInfo(); ok {
|
||||
@@ -23,7 +23,7 @@ var commit = func() string {
|
||||
return "000000"
|
||||
}()
|
||||
|
||||
// ZkVersion is commit-id of common/libzkp/impl/cargo.lock/scroll-prover and halo2, contacted by a "-"
|
||||
// ZkVersion is commit-id of cargo.lock/zkvm-prover and openvm, contacted by a "-"
|
||||
// The default `000000-000000` is set for integration test, and will be overwritten by coordinator's & prover's actual compilations (see their Makefiles).
|
||||
var ZkVersion = "000000-000000"
|
||||
|
||||
|
||||
1
coordinator/.gitignore
vendored
1
coordinator/.gitignore
vendored
@@ -1,3 +1,4 @@
|
||||
/build/bin
|
||||
.idea
|
||||
internal/logic/verifier/lib
|
||||
internal/libzkp/lib/libzkp.so
|
||||
|
||||
@@ -1,26 +1,31 @@
|
||||
.PHONY: lint docker clean coordinator coordinator_skip_libzkp mock_coordinator
|
||||
.PHONY: lint docker clean coordinator coordinator_skip_libzkp mock_coordinator libzkp
|
||||
|
||||
IMAGE_VERSION=latest
|
||||
REPO_ROOT_DIR=./..
|
||||
LIBZKP_PATH=./internal/logic/libzkp/lib/libzkp.so
|
||||
|
||||
ifeq (4.3,$(firstword $(sort $(MAKE_VERSION) 4.3)))
|
||||
ZKEVM_VERSION=$(shell grep -m 1 "zkevm-circuits" ../common/libzkp/impl/Cargo.lock | cut -d "#" -f2 | cut -c-7)
|
||||
HALO2_VERSION=$(shell grep -m 1 "halo2.git" ../common/libzkp/impl/Cargo.lock | cut -d "#" -f2 | cut -c-7)
|
||||
ZKVM_VERSION=$(shell grep -m 1 "zkvm-prover?" ../Cargo.lock | cut -d "#" -f2 | cut -c-7)
|
||||
OPENVM_VERSION=$(shell grep -m 1 "openvm.git" ../Cargo.lock | cut -d "#" -f2 | cut -c-7)
|
||||
else
|
||||
ZKEVM_VERSION=$(shell grep -m 1 "zkevm-circuits" ../common/libzkp/impl/Cargo.lock | cut -d "\#" -f2 | cut -c-7)
|
||||
HALO2_VERSION=$(shell grep -m 1 "halo2.git" ../common/libzkp/impl/Cargo.lock | cut -d "\#" -f2 | cut -c-7)
|
||||
ZKVM_VERSION=$(shell grep -m 1 "zkvm-prover?" ../Cargo.lock | cut -d "\#" -f2 | cut -c-7)
|
||||
OPENVM_VERSION=$(shell grep -m 1 "openvm.git" ../Cargo.lock | cut -d "\#" -f2 | cut -c-7)
|
||||
endif
|
||||
|
||||
ZK_VERSION=${ZKEVM_VERSION}-${HALO2_VERSION}
|
||||
ZK_VERSION=${ZKVM_VERSION}-${OPENVM_VERSION}
|
||||
|
||||
test:
|
||||
go test -v -race -coverprofile=coverage.txt -covermode=atomic -p 1 $(PWD)/...
|
||||
|
||||
libzkp:
|
||||
cd ../common/libzkp/impl && cargo clean && cargo build --release && cp ./target/release/libzkp.so ../interface/
|
||||
rm -rf ./internal/logic/verifier/lib && cp -r ../common/libzkp/interface ./internal/logic/verifier/lib
|
||||
$(LIBZKP_PATH):
|
||||
$(MAKE) -C ./internal/logic/libzkp build
|
||||
|
||||
coordinator_api: libzkp ## Builds the Coordinator api instance.
|
||||
clean_libzkp:
|
||||
$(MAKE) -C ./internal/logic/libzkp clean
|
||||
|
||||
libzkp: clean_libzkp $(LIBZKP_PATH)
|
||||
|
||||
coordinator_api: $(LIBZKP_PATH) ## Builds the Coordinator api instance.
|
||||
go build -ldflags "-X scroll-tech/common/version.ZkVersion=${ZK_VERSION}" -o $(PWD)/build/bin/coordinator_api ./cmd/api
|
||||
|
||||
coordinator_cron:
|
||||
@@ -29,8 +34,25 @@ coordinator_cron:
|
||||
coordinator_tool:
|
||||
go build -ldflags "-X scroll-tech/common/version.ZkVersion=${ZK_VERSION}" -o $(PWD)/build/bin/coordinator_tool ./cmd/tool
|
||||
|
||||
coordinator_api_skip_libzkp:
|
||||
go build -ldflags "-X scroll-tech/common/version.ZkVersion=${ZK_VERSION}" -o $(PWD)/build/bin/coordinator_api ./cmd/api
|
||||
coordinator_proxy:
|
||||
go build -ldflags "-X scroll-tech/common/version.ZkVersion=${ZK_VERSION}" -tags="mock_prover mock_verifier" -o $(PWD)/build/bin/coordinator_proxy ./cmd/proxy
|
||||
|
||||
|
||||
localsetup: coordinator_api ## Local setup: build coordinator_api, copy config, and setup releases
|
||||
mkdir -p build/bin/conf
|
||||
@echo "Copying configuration files..."
|
||||
@if [ -f "$(PWD)/conf/config.template.json" ]; then \
|
||||
SRC="$(PWD)/conf/config.template.json"; \
|
||||
else \
|
||||
SRC="$(CURDIR)/conf/config.json"; \
|
||||
fi; \
|
||||
cp -fL "$$SRC" "$(CURDIR)/build/bin/conf/config.template.json"
|
||||
@echo "Setting up releases..."
|
||||
cd $(CURDIR)/build && bash setup_releases.sh
|
||||
|
||||
|
||||
#coordinator_api_skip_libzkp:
|
||||
# go build -ldflags "-X scroll-tech/common/version.ZkVersion=${ZK_VERSION}" -o $(PWD)/build/bin/coordinator_api ./cmd/api
|
||||
|
||||
mock_coordinator_api: ## Builds the mocked Coordinator instance.
|
||||
go build -tags="mock_prover mock_verifier" -o $(PWD)/build/bin/coordinator_api ./cmd/api
|
||||
@@ -38,15 +60,15 @@ mock_coordinator_api: ## Builds the mocked Coordinator instance.
|
||||
mock_coordinator_cron: ## Builds the mocked Coordinator instance.
|
||||
go build -tags="mock_prover mock_verifier" -o $(PWD)/build/bin/coordinator_cron ./cmd/cron
|
||||
|
||||
test-verifier: libzkp
|
||||
test-verifier: $(LIBZKP_PATH)
|
||||
go test -tags ffi -timeout 0 -v ./internal/logic/verifier
|
||||
|
||||
test-gpu-verifier: libzkp
|
||||
test-gpu-verifier: $(LIBZKP_PATH)
|
||||
go test -tags="gpu ffi" -timeout 0 -v ./internal/logic/verifier
|
||||
|
||||
lint: ## Lint the files - used for CI
|
||||
cp -r ../common/libzkp/interface ./internal/logic/verifier/lib
|
||||
GOBIN=$(PWD)/build/bin go run ../build/lint.go
|
||||
cd ../ && cargo fmt --all -- --check && cargo clippy --release -- -D warnings
|
||||
|
||||
clean: ## Empty out the bin folder
|
||||
@rm -rf build/bin
|
||||
|
||||
72
coordinator/build/setup_releases.sh
Normal file
72
coordinator/build/setup_releases.sh
Normal file
@@ -0,0 +1,72 @@
|
||||
#!/bin/bash
|
||||
|
||||
# release version
|
||||
if [ -z "${SCROLL_ZKVM_VERSION}" ]; then
|
||||
echo "SCROLL_ZKVM_VERSION not set"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# default fork name from env or "galileo"
|
||||
SCROLL_FORK_NAME="${SCROLL_FORK_NAME:-galileov2}"
|
||||
|
||||
# set ASSET_DIR by reading from config.json
|
||||
CONFIG_FILE="bin/conf/config.template.json"
|
||||
if [ ! -f "$CONFIG_FILE" ]; then
|
||||
echo "Config file $CONFIG_FILE not found"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# get the number of verifiers in the array
|
||||
VERIFIER_COUNT=$(jq -r '.prover_manager.verifier.verifiers | length' "$CONFIG_FILE")
|
||||
|
||||
if [ "$VERIFIER_COUNT" = "null" ] || [ "$VERIFIER_COUNT" -eq 0 ]; then
|
||||
echo "No verifiers found in config file"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Found $VERIFIER_COUNT verifier(s) in config"
|
||||
|
||||
# iterate through each verifier entry
|
||||
for ((i=0; i<$VERIFIER_COUNT; i++)); do
|
||||
# extract assets_path for current verifier
|
||||
ASSETS_PATH=$(jq -r ".prover_manager.verifier.verifiers[$i].assets_path" "$CONFIG_FILE")
|
||||
FORK_NAME=$(jq -r ".prover_manager.verifier.verifiers[$i].fork_name" "$CONFIG_FILE")
|
||||
|
||||
# skip if this verifier's fork doesn't match the target fork
|
||||
if [ "$FORK_NAME" != "$SCROLL_FORK_NAME" ]; then
|
||||
echo "Expect $SCROLL_FORK_NAME, skip current fork ($FORK_NAME)"
|
||||
continue
|
||||
fi
|
||||
|
||||
if [ "$ASSETS_PATH" = "null" ]; then
|
||||
echo "Warning: Could not find assets_path for verifier $i, skipping..."
|
||||
continue
|
||||
fi
|
||||
|
||||
echo "Processing verifier $i ($FORK_NAME): assets_path=$ASSETS_PATH"
|
||||
|
||||
# check if it's an absolute path (starts with /)
|
||||
if [[ "$ASSETS_PATH" = /* ]]; then
|
||||
# absolute path, use as is
|
||||
ASSET_DIR="$ASSETS_PATH"
|
||||
else
|
||||
# relative path, prefix with "bin/"
|
||||
ASSET_DIR="bin/$ASSETS_PATH"
|
||||
fi
|
||||
|
||||
echo "Using ASSET_DIR: $ASSET_DIR"
|
||||
|
||||
# create directory if it doesn't exist
|
||||
mkdir -p "$ASSET_DIR"
|
||||
|
||||
# assets for verifier-only mode
|
||||
echo "Downloading assets for $FORK_NAME to $ASSET_DIR..."
|
||||
wget https://circuit-release.s3.us-west-2.amazonaws.com/scroll-zkvm/releases/$SCROLL_ZKVM_VERSION/verifier/verifier.bin -O ${ASSET_DIR}/verifier.bin
|
||||
wget https://circuit-release.s3.us-west-2.amazonaws.com/scroll-zkvm/releases/$SCROLL_ZKVM_VERSION/verifier/root_verifier_vk -O ${ASSET_DIR}/root_verifier_vk
|
||||
wget https://circuit-release.s3.us-west-2.amazonaws.com/scroll-zkvm/releases/$SCROLL_ZKVM_VERSION/verifier/openVmVk.json -O ${ASSET_DIR}/openVmVk.json
|
||||
|
||||
echo "Completed downloading assets for $FORK_NAME"
|
||||
echo "---"
|
||||
done
|
||||
|
||||
echo "All verifier assets downloaded successfully"
|
||||
@@ -90,10 +90,12 @@ func (c *CoordinatorApp) MockConfig(store bool) error {
|
||||
cfg.ProverManager = &coordinatorConfig.ProverManager{
|
||||
ProversPerSession: 1,
|
||||
Verifier: &coordinatorConfig.VerifierConfig{
|
||||
HighVersionCircuit: &coordinatorConfig.CircuitConfig{
|
||||
AssetsPath: "",
|
||||
ForkName: "euclidV2",
|
||||
MinProverVersion: "v4.4.89",
|
||||
MinProverVersion: "v4.4.89",
|
||||
Verifiers: []coordinatorConfig.AssetConfig{
|
||||
{
|
||||
AssetsPath: "",
|
||||
ForkName: "galileo",
|
||||
},
|
||||
},
|
||||
},
|
||||
BatchCollectionTimeSec: 60,
|
||||
|
||||
122
coordinator/cmd/proxy/app/app.go
Normal file
122
coordinator/cmd/proxy/app/app.go
Normal file
@@ -0,0 +1,122 @@
|
||||
package app
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"os"
|
||||
"os/signal"
|
||||
"time"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"github.com/urfave/cli/v2"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/common/database"
|
||||
"scroll-tech/common/observability"
|
||||
"scroll-tech/common/utils"
|
||||
"scroll-tech/common/version"
|
||||
|
||||
"scroll-tech/coordinator/internal/config"
|
||||
"scroll-tech/coordinator/internal/controller/proxy"
|
||||
"scroll-tech/coordinator/internal/route"
|
||||
)
|
||||
|
||||
var app *cli.App
|
||||
|
||||
func init() {
|
||||
// Set up coordinator app info.
|
||||
app = cli.NewApp()
|
||||
app.Action = action
|
||||
app.Name = "coordinator proxy"
|
||||
app.Usage = "Proxy for multiple Scroll L2 Coordinators"
|
||||
app.Version = version.Version
|
||||
app.Flags = append(app.Flags, utils.CommonFlags...)
|
||||
app.Flags = append(app.Flags, apiFlags...)
|
||||
app.Before = func(ctx *cli.Context) error {
|
||||
return utils.LogSetup(ctx)
|
||||
}
|
||||
// Register `coordinator-test` app for integration-test.
|
||||
utils.RegisterSimulation(app, utils.CoordinatorAPIApp)
|
||||
}
|
||||
|
||||
func action(ctx *cli.Context) error {
|
||||
cfgFile := ctx.String(utils.ConfigFileFlag.Name)
|
||||
cfg, err := config.NewProxyConfig(cfgFile)
|
||||
if err != nil {
|
||||
log.Crit("failed to load config file", "config file", cfgFile, "error", err)
|
||||
}
|
||||
|
||||
var db *gorm.DB
|
||||
if dbCfg := cfg.ProxyManager.DB; dbCfg != nil {
|
||||
log.Info("Apply persistent storage", "via", cfg.ProxyManager.DB.DSN)
|
||||
db, err = database.InitDB(cfg.ProxyManager.DB)
|
||||
if err != nil {
|
||||
log.Crit("failed to init db connection", "err", err)
|
||||
}
|
||||
defer func() {
|
||||
if err = database.CloseDB(db); err != nil {
|
||||
log.Error("can not close db connection", "error", err)
|
||||
}
|
||||
}()
|
||||
observability.Server(ctx, db)
|
||||
}
|
||||
registry := prometheus.DefaultRegisterer
|
||||
|
||||
apiSrv := server(ctx, cfg, db, registry)
|
||||
|
||||
log.Info(
|
||||
"Start coordinator api successfully.",
|
||||
"version", version.Version,
|
||||
)
|
||||
|
||||
// Catch CTRL-C to ensure a graceful shutdown.
|
||||
interrupt := make(chan os.Signal, 1)
|
||||
signal.Notify(interrupt, os.Interrupt)
|
||||
|
||||
// Wait until the interrupt signal is received from an OS signal.
|
||||
<-interrupt
|
||||
log.Info("start shutdown coordinator proxy server ...")
|
||||
|
||||
closeCtx, cancelExit := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer cancelExit()
|
||||
if err = apiSrv.Shutdown(closeCtx); err != nil {
|
||||
log.Warn("shutdown coordinator proxy server failure", "error", err)
|
||||
return nil
|
||||
}
|
||||
|
||||
<-closeCtx.Done()
|
||||
log.Info("coordinator proxy server exiting success")
|
||||
return nil
|
||||
}
|
||||
|
||||
func server(ctx *cli.Context, cfg *config.ProxyConfig, db *gorm.DB, reg prometheus.Registerer) *http.Server {
|
||||
router := gin.New()
|
||||
proxy.InitController(cfg, db, reg)
|
||||
route.ProxyRoute(router, cfg, reg)
|
||||
port := ctx.String(httpPortFlag.Name)
|
||||
srv := &http.Server{
|
||||
Addr: fmt.Sprintf(":%s", port),
|
||||
Handler: router,
|
||||
ReadHeaderTimeout: time.Minute,
|
||||
}
|
||||
|
||||
go func() {
|
||||
if runServerErr := srv.ListenAndServe(); runServerErr != nil && !errors.Is(runServerErr, http.ErrServerClosed) {
|
||||
log.Crit("run coordinator proxy http server failure", "error", runServerErr)
|
||||
}
|
||||
}()
|
||||
return srv
|
||||
}
|
||||
|
||||
// Run coordinator.
|
||||
func Run() {
|
||||
// RunApp the coordinator.
|
||||
if err := app.Run(os.Args); err != nil {
|
||||
_, _ = fmt.Fprintln(os.Stderr, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
30
coordinator/cmd/proxy/app/flags.go
Normal file
30
coordinator/cmd/proxy/app/flags.go
Normal file
@@ -0,0 +1,30 @@
|
||||
package app
|
||||
|
||||
import "github.com/urfave/cli/v2"
|
||||
|
||||
var (
|
||||
apiFlags = []cli.Flag{
|
||||
// http flags
|
||||
&httpEnabledFlag,
|
||||
&httpListenAddrFlag,
|
||||
&httpPortFlag,
|
||||
}
|
||||
// httpEnabledFlag enable rpc server.
|
||||
httpEnabledFlag = cli.BoolFlag{
|
||||
Name: "http",
|
||||
Usage: "Enable the HTTP-RPC server",
|
||||
Value: false,
|
||||
}
|
||||
// httpListenAddrFlag set the http address.
|
||||
httpListenAddrFlag = cli.StringFlag{
|
||||
Name: "http.addr",
|
||||
Usage: "HTTP-RPC server listening interface",
|
||||
Value: "localhost",
|
||||
}
|
||||
// httpPortFlag set http.port.
|
||||
httpPortFlag = cli.IntFlag{
|
||||
Name: "http.port",
|
||||
Usage: "HTTP-RPC server listening port",
|
||||
Value: 8590,
|
||||
}
|
||||
)
|
||||
7
coordinator/cmd/proxy/main.go
Normal file
7
coordinator/cmd/proxy/main.go
Normal file
@@ -0,0 +1,7 @@
|
||||
package main
|
||||
|
||||
import "scroll-tech/coordinator/cmd/proxy/app"
|
||||
|
||||
func main() {
|
||||
app.Run()
|
||||
}
|
||||
@@ -19,6 +19,7 @@ import (
|
||||
)
|
||||
|
||||
var app *cli.App
|
||||
var cfg *config.Config
|
||||
|
||||
func init() {
|
||||
// Set up coordinator app info.
|
||||
@@ -29,16 +30,29 @@ func init() {
|
||||
app.Version = version.Version
|
||||
app.Flags = append(app.Flags, utils.CommonFlags...)
|
||||
app.Before = func(ctx *cli.Context) error {
|
||||
return utils.LogSetup(ctx)
|
||||
if err := utils.LogSetup(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cfgFile := ctx.String(utils.ConfigFileFlag.Name)
|
||||
var err error
|
||||
cfg, err = config.NewConfig(cfgFile)
|
||||
if err != nil {
|
||||
log.Crit("failed to load config file", "config file", cfgFile, "error", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
// sub commands
|
||||
app.Commands = []*cli.Command{
|
||||
{
|
||||
Name: "verify",
|
||||
Usage: "verify an proof, specified by [forkname] <type> <proof path>",
|
||||
Action: verify,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func action(ctx *cli.Context) error {
|
||||
cfgFile := ctx.String(utils.ConfigFileFlag.Name)
|
||||
cfg, err := config.NewConfig(cfgFile)
|
||||
if err != nil {
|
||||
log.Crit("failed to load config file", "config file", cfgFile, "error", err)
|
||||
}
|
||||
db, err := database.InitDB(cfg.DB)
|
||||
if err != nil {
|
||||
log.Crit("failed to init db connection", "err", err)
|
||||
|
||||
109
coordinator/cmd/tool/verify.go
Normal file
109
coordinator/cmd/tool/verify.go
Normal file
@@ -0,0 +1,109 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"scroll-tech/coordinator/internal/logic/verifier"
|
||||
|
||||
"scroll-tech/common/types/message"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"github.com/urfave/cli/v2"
|
||||
)
|
||||
|
||||
func verify(cCtx *cli.Context) error {
|
||||
var forkName, proofType, proofPath string
|
||||
if cCtx.Args().Len() <= 2 {
|
||||
forkName = cfg.ProverManager.Verifier.Verifiers[0].ForkName
|
||||
proofType = cCtx.Args().First()
|
||||
proofPath = cCtx.Args().Get(1)
|
||||
} else {
|
||||
forkName = cCtx.Args().First()
|
||||
proofType = cCtx.Args().Get(1)
|
||||
proofPath = cCtx.Args().Get(2)
|
||||
}
|
||||
log.Info("verify proof", "in", proofPath, "type", proofType, "forkName", forkName)
|
||||
|
||||
// Load the content of the proof file
|
||||
data, err := os.ReadFile(filepath.Clean(proofPath))
|
||||
if err != nil {
|
||||
return fmt.Errorf("error reading file: %w", err)
|
||||
}
|
||||
|
||||
vf, err := verifier.NewVerifier(cfg.ProverManager.Verifier, cfg.L2.ValidiumMode)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var ret bool
|
||||
switch strings.ToLower(proofType) {
|
||||
case "chunk":
|
||||
proof := &message.OpenVMChunkProof{}
|
||||
if err := json.Unmarshal(data, proof); err != nil {
|
||||
return err
|
||||
}
|
||||
vk, ok := vf.ChunkVk[forkName]
|
||||
if !ok {
|
||||
return fmt.Errorf("no vk loaded for fork %s", forkName)
|
||||
}
|
||||
if len(proof.Vk) != 0 {
|
||||
if !bytes.Equal(proof.Vk, vk) {
|
||||
return fmt.Errorf("unmatch vk with expected: expected %s, get %s",
|
||||
base64.StdEncoding.EncodeToString(vk),
|
||||
base64.StdEncoding.EncodeToString(proof.Vk),
|
||||
)
|
||||
}
|
||||
} else {
|
||||
proof.Vk = vk
|
||||
}
|
||||
|
||||
ret, err = vf.VerifyChunkProof(proof, forkName)
|
||||
case "batch":
|
||||
proof := &message.OpenVMBatchProof{}
|
||||
if err := json.Unmarshal(data, proof); err != nil {
|
||||
return err
|
||||
}
|
||||
vk, ok := vf.BatchVk[forkName]
|
||||
if !ok {
|
||||
return fmt.Errorf("no vk loaded for fork %s", forkName)
|
||||
}
|
||||
if len(proof.Vk) != 0 {
|
||||
if !bytes.Equal(proof.Vk, vk) {
|
||||
return fmt.Errorf("unmatch vk with expected: expected %s, get %s",
|
||||
base64.StdEncoding.EncodeToString(vk),
|
||||
base64.StdEncoding.EncodeToString(proof.Vk),
|
||||
)
|
||||
}
|
||||
} else {
|
||||
proof.Vk = vk
|
||||
}
|
||||
|
||||
ret, err = vf.VerifyBatchProof(proof, forkName)
|
||||
case "bundle":
|
||||
proof := &message.OpenVMBundleProof{}
|
||||
if err := json.Unmarshal(data, proof); err != nil {
|
||||
return err
|
||||
}
|
||||
vk, ok := vf.BundleVk[forkName]
|
||||
if !ok {
|
||||
return fmt.Errorf("no vk loaded for fork %s", forkName)
|
||||
}
|
||||
proof.Vk = vk
|
||||
|
||||
ret, err = vf.VerifyBundleProof(proof, forkName)
|
||||
default:
|
||||
return fmt.Errorf("unsupport proof type %s", proofType)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Info("verified:", "ret", ret)
|
||||
return nil
|
||||
}
|
||||
@@ -7,11 +7,22 @@
|
||||
"batch_collection_time_sec": 180,
|
||||
"chunk_collection_time_sec": 180,
|
||||
"verifier": {
|
||||
"high_version_circuit": {
|
||||
"assets_path": "assets",
|
||||
"fork_name": "euclidV2",
|
||||
"min_prover_version": "v4.4.45"
|
||||
}
|
||||
"min_prover_version": "v4.4.45",
|
||||
"verifiers": [
|
||||
{
|
||||
"features": "legacy_witness:openvm_13",
|
||||
"assets_path": "assets_feynman",
|
||||
"fork_name": "feynman"
|
||||
},
|
||||
{
|
||||
"assets_path": "assets",
|
||||
"fork_name": "galileo"
|
||||
},
|
||||
{
|
||||
"assets_path": "assets_v2",
|
||||
"fork_name": "galileoV2"
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"db": {
|
||||
@@ -21,7 +32,10 @@
|
||||
"maxIdleNum": 20
|
||||
},
|
||||
"l2": {
|
||||
"chain_id": 111
|
||||
"chain_id": 111,
|
||||
"l2geth": {
|
||||
"endpoint": "not need to specified for mocking"
|
||||
}
|
||||
},
|
||||
"auth": {
|
||||
"secret": "prover secret key",
|
||||
|
||||
31
coordinator/conf/config_proxy.json
Normal file
31
coordinator/conf/config_proxy.json
Normal file
@@ -0,0 +1,31 @@
|
||||
{
|
||||
"proxy_manager": {
|
||||
"proxy_cli": {
|
||||
"proxy_name": "proxy_name",
|
||||
"secret": "client private key"
|
||||
},
|
||||
"auth": {
|
||||
"secret": "proxy secret key",
|
||||
"challenge_expire_duration_sec": 3600,
|
||||
"login_expire_duration_sec": 3600
|
||||
},
|
||||
"verifier": {
|
||||
"min_prover_version": "v4.4.45",
|
||||
"verifiers": []
|
||||
},
|
||||
"db": {
|
||||
"driver_name": "postgres",
|
||||
"dsn": "postgres://localhost/scroll?sslmode=disable",
|
||||
"maxOpenNum": 200,
|
||||
"maxIdleNum": 20
|
||||
}
|
||||
},
|
||||
"coordinators": {
|
||||
"sepolia": {
|
||||
"base_url": "http://localhost:8555",
|
||||
"retry_count": 10,
|
||||
"retry_wait_time_sec": 10,
|
||||
"connection_timeout_sec": 30
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -9,8 +9,8 @@ require (
|
||||
github.com/google/uuid v1.6.0
|
||||
github.com/mitchellh/mapstructure v1.5.0
|
||||
github.com/prometheus/client_golang v1.19.0
|
||||
github.com/scroll-tech/da-codec v0.1.3-0.20250401062930-9f9f53898493
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20250305151038-478940e79601
|
||||
github.com/scroll-tech/da-codec v0.10.0
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20251128092113-8629f088d78f
|
||||
github.com/shopspring/decimal v1.3.1
|
||||
github.com/stretchr/testify v1.10.0
|
||||
github.com/urfave/cli/v2 v2.25.7
|
||||
@@ -46,6 +46,7 @@ require (
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/VictoriaMetrics/fastcache v1.12.2 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/bits-and-blooms/bitset v1.20.0 // indirect
|
||||
github.com/btcsuite/btcd v0.20.1-beta // indirect
|
||||
@@ -53,30 +54,59 @@ require (
|
||||
github.com/consensys/bavard v0.1.29 // indirect
|
||||
github.com/consensys/gnark-crypto v0.16.0 // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.3 // indirect
|
||||
github.com/crate-crypto/go-kzg-4844 v1.1.0 // indirect
|
||||
github.com/crate-crypto/go-eth-kzg v1.4.0 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/ethereum/c-kzg-4844 v1.0.3 // indirect
|
||||
github.com/deckarep/golang-set v0.0.0-20180603214616-504e848d77ea // indirect
|
||||
github.com/edsrzf/mmap-go v1.0.0 // indirect
|
||||
github.com/ethereum/c-kzg-4844/v2 v2.1.5 // indirect
|
||||
github.com/fjl/memsize v0.0.2 // indirect
|
||||
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff // indirect
|
||||
github.com/go-ole/go-ole v1.3.0 // indirect
|
||||
github.com/go-stack/stack v1.8.1 // indirect
|
||||
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect
|
||||
github.com/gorilla/websocket v1.4.2 // indirect
|
||||
github.com/hashicorp/go-bexpr v0.1.10 // indirect
|
||||
github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d // indirect
|
||||
github.com/holiman/bloomfilter/v2 v2.0.3 // indirect
|
||||
github.com/holiman/uint256 v1.3.2 // indirect
|
||||
github.com/huin/goupnp v1.0.2 // indirect
|
||||
github.com/iden3/go-iden3-crypto v0.0.17 // indirect
|
||||
github.com/jackpal/go-nat-pmp v1.0.2-0.20160603034137-1fa385a6f458 // indirect
|
||||
github.com/klauspost/compress v1.17.9 // indirect
|
||||
github.com/mattn/go-colorable v0.1.8 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.15 // indirect
|
||||
github.com/mitchellh/pointerstructure v1.2.0 // indirect
|
||||
github.com/mmcloughlin/addchain v0.4.0 // indirect
|
||||
github.com/olekukonko/tablewriter v0.0.5 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/prometheus/client_model v0.5.0 // indirect
|
||||
github.com/prometheus/common v0.48.0 // indirect
|
||||
github.com/prometheus/procfs v0.12.0 // indirect
|
||||
github.com/prometheus/tsdb v0.7.1 // indirect
|
||||
github.com/rivo/uniseg v0.4.4 // indirect
|
||||
github.com/rjeczalik/notify v0.9.1 // indirect
|
||||
github.com/rs/cors v1.7.0 // indirect
|
||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||
github.com/scroll-tech/zktrie v0.8.4 // indirect
|
||||
github.com/shirou/gopsutil v3.21.11+incompatible // indirect
|
||||
github.com/supranational/blst v0.3.13 // indirect
|
||||
github.com/sourcegraph/conc v0.3.0 // indirect
|
||||
github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4 // indirect
|
||||
github.com/supranational/blst v0.3.15 // indirect
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 // indirect
|
||||
github.com/tklauser/go-sysconf v0.3.14 // indirect
|
||||
github.com/tklauser/numcpus v0.9.0 // indirect
|
||||
github.com/tyler-smith/go-bip39 v1.0.1-0.20181017060643-dbb3b84ba2ef // indirect
|
||||
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
|
||||
github.com/yusufpapurcu/wmi v1.2.4 // indirect
|
||||
go.uber.org/atomic v1.7.0 // indirect
|
||||
go.uber.org/multierr v1.9.0 // indirect
|
||||
golang.org/x/crypto v0.32.0 // indirect
|
||||
golang.org/x/sync v0.11.0 // indirect
|
||||
golang.org/x/sys v0.30.0 // indirect
|
||||
golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba // indirect
|
||||
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect
|
||||
gopkg.in/urfave/cli.v1 v1.20.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
rsc.io/tmplfunc v0.0.3 // indirect
|
||||
)
|
||||
|
||||
@@ -1,12 +1,18 @@
|
||||
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
|
||||
github.com/VictoriaMetrics/fastcache v1.12.2 h1:N0y9ASrJ0F6h0QaC3o6uJb3NIZ9VKLjCM7NQbSmF7WI=
|
||||
github.com/VictoriaMetrics/fastcache v1.12.2/go.mod h1:AmC+Nzz1+3G2eCPapF6UcsnkThDcMsQicp4xDukwJYI=
|
||||
github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII=
|
||||
github.com/agiledragon/gomonkey/v2 v2.12.0 h1:ek0dYu9K1rSV+TgkW5LvNNPRWyDZVIxGMCFI6Pz9o38=
|
||||
github.com/agiledragon/gomonkey/v2 v2.12.0/go.mod h1:ap1AmDzcVOAz1YpeJ3TCzIgstoaWLA6jbbgxfB4w2iY=
|
||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156 h1:eMwmnE/GDgah4HI848JfFxHt+iPb26b4zyfspmqY0/8=
|
||||
github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM=
|
||||
github.com/appleboy/gin-jwt/v2 v2.9.1 h1:l29et8iLW6omcHltsOP6LLk4s3v4g2FbFs0koxGWVZs=
|
||||
github.com/appleboy/gin-jwt/v2 v2.9.1/go.mod h1:jwcPZJ92uoC9nOUTOKWoN/f6JZOgMSKlFSHw5/FrRUk=
|
||||
github.com/appleboy/gofight/v2 v2.1.2 h1:VOy3jow4vIK8BRQJoC/I9muxyYlJ2yb9ht2hZoS3rf4=
|
||||
github.com/appleboy/gofight/v2 v2.1.2/go.mod h1:frW+U1QZEdDgixycTj4CygQ48yLTUhplt43+Wczp3rw=
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/bits-and-blooms/bitset v1.20.0 h1:2F+rfL86jE2d/bmw7OhqUg2Sj/1rURkBn3MdfoPyRVU=
|
||||
@@ -24,6 +30,9 @@ github.com/bytedance/sonic v1.5.0/go.mod h1:ED5hyg4y6t3/9Ku1R6dU/4KyJ48DZ4jPhfY1
|
||||
github.com/bytedance/sonic v1.10.0-rc/go.mod h1:ElCzW+ufi8qKqNW0FY314xriJhyJhuoJ3gFZdAHF7NM=
|
||||
github.com/bytedance/sonic v1.10.1 h1:7a1wuFXL1cMy7a3f7/VFcEtriuXQnUBhtoVfOZiaysc=
|
||||
github.com/bytedance/sonic v1.10.1/go.mod h1:iZcSUejdk5aukTND/Eu/ivjQuEL0Cu9/rf50Hi0u/g4=
|
||||
github.com/cespare/cp v0.1.0 h1:SE+dxFebS7Iik5LK0tsi1k9ZCxEaFX4AjQmoyA+1dJk=
|
||||
github.com/cespare/cp v0.1.0/go.mod h1:SOGHArjBr4JWaSDEVpWpo/hNg6RoKrls6Oh40hiwW+s=
|
||||
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
|
||||
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
|
||||
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/chenzhuoyu/base64x v0.0.0-20211019084208-fb5309c8db06/go.mod h1:DH46F32mSOjUmXrMHnKwZdA8wcEefY7UVqBKYGjpdQY=
|
||||
@@ -38,23 +47,39 @@ github.com/consensys/gnark-crypto v0.16.0 h1:8Dl4eYmUWK9WmlP1Bj6je688gBRJCJbT8Mw
|
||||
github.com/consensys/gnark-crypto v0.16.0/go.mod h1:Ke3j06ndtPTVvo++PhGNgvm+lgpLvzbcE2MqljY7diU=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.3 h1:qMCsGGgs+MAzDFyp9LpAe1Lqy/fY/qCovCm0qnXZOBM=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/crate-crypto/go-kzg-4844 v1.1.0 h1:EN/u9k2TF6OWSHrCCDBBU6GLNMq88OspHHlMnHfoyU4=
|
||||
github.com/crate-crypto/go-kzg-4844 v1.1.0/go.mod h1:JolLjpSff1tCCJKaJx4psrlEdlXuJEC996PL3tTAFks=
|
||||
github.com/crate-crypto/go-eth-kzg v1.4.0 h1:WzDGjHk4gFg6YzV0rJOAsTK4z3Qkz5jd4RE3DAvPFkg=
|
||||
github.com/crate-crypto/go-eth-kzg v1.4.0/go.mod h1:J9/u5sWfznSObptgfa92Jq8rTswn6ahQWEuiLHOjCUI=
|
||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/ethereum/c-kzg-4844 v1.0.3 h1:IEnbOHwjixW2cTvKRUlAAUOeleV7nNM/umJR+qy4WDs=
|
||||
github.com/ethereum/c-kzg-4844 v1.0.3/go.mod h1:VewdlzQmpT5QSrVhbBuGoCdFJkpaJlO1aQputP83wc0=
|
||||
github.com/deckarep/golang-set v0.0.0-20180603214616-504e848d77ea h1:j4317fAZh7X6GqbFowYdYdI0L9bwxL07jyPZIdepyZ0=
|
||||
github.com/deckarep/golang-set v0.0.0-20180603214616-504e848d77ea/go.mod h1:93vsz/8Wt4joVM7c2AVqh+YRMiUSc14yDtF28KmMOgQ=
|
||||
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
|
||||
github.com/edsrzf/mmap-go v1.0.0 h1:CEBF7HpRnUCSJgGUb5h1Gm7e3VkmVDrR8lvWVLtrOFw=
|
||||
github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M=
|
||||
github.com/ethereum/c-kzg-4844/v2 v2.1.5 h1:aVtoLK5xwJ6c5RiqO8g8ptJ5KU+2Hdquf6G3aXiHh5s=
|
||||
github.com/ethereum/c-kzg-4844/v2 v2.1.5/go.mod h1:u59hRTTah4Co6i9fDWtiCjTrblJv0UwsqZKCc0GfgUs=
|
||||
github.com/fjl/memsize v0.0.2 h1:27txuSD9or+NZlnOWdKUxeBzTAUkWCVh+4Gf2dWFOzA=
|
||||
github.com/fjl/memsize v0.0.2/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4=
|
||||
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
|
||||
github.com/gabriel-vasile/mimetype v1.4.2 h1:w5qFW6JKBz9Y393Y4q372O9A7cUSequkh1Q7OhCmWKU=
|
||||
github.com/gabriel-vasile/mimetype v1.4.2/go.mod h1:zApsH/mKG4w07erKIaJPFiX0Tsq9BFQgN3qGY5GnNgA=
|
||||
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff h1:tY80oXqGNY4FhTFhk+o9oFHGINQ/+vhlm8HFzi6znCI=
|
||||
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff/go.mod h1:x7DCsMOv1taUwEWCzT4cmDeAkigA5/QCwUodaVOe8Ww=
|
||||
github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE=
|
||||
github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI=
|
||||
github.com/gin-gonic/gin v1.8.1/go.mod h1:ji8BvRH1azfM+SYow9zQ6SZMvR8qOMZHmsCuWR9tTTk=
|
||||
github.com/gin-gonic/gin v1.9.1 h1:4idEAncQnU5cB7BeOkPtxjfCSye0AAm1R0RVIqJ+Jmg=
|
||||
github.com/gin-gonic/gin v1.9.1/go.mod h1:hPrL7YrpYKXt5YId3A/Tnip5kqbEAP+KLuI3SUcPTeU=
|
||||
github.com/go-kit/kit v0.8.0 h1:Wz+5lgoB0kkuqLEc6NVmwRknTKP6dTGbSqvhZtBI/j0=
|
||||
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
||||
github.com/go-logfmt/logfmt v0.5.1 h1:otpy5pqBCBZ1ng9RQ0dPu4PN7ba75Y/aA+UpowDyNVA=
|
||||
github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs=
|
||||
github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
|
||||
github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE=
|
||||
github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78=
|
||||
@@ -73,19 +98,31 @@ github.com/go-playground/validator/v10 v10.15.5 h1:LEBecTWb/1j5TNY1YYG2RcOUN3R7N
|
||||
github.com/go-playground/validator/v10 v10.15.5/go.mod h1:9iXMNT7sEkjXb0I+enO7QXmzG6QCsPWY4zveKFVRSyU=
|
||||
github.com/go-resty/resty/v2 v2.7.0 h1:me+K9p3uhSmXtrBZ4k9jcEAfJmuC8IivWHwaLZwPrFY=
|
||||
github.com/go-resty/resty/v2 v2.7.0/go.mod h1:9PWDzw47qPphMRFfhsyk0NnSgvluHcljSMVIq3w7q0I=
|
||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||
github.com/go-stack/stack v1.8.1 h1:ntEHSVwIt7PNXNpgPmVfMrNhLtgjlmnZha2kOpuRiDw=
|
||||
github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP3XYfe4=
|
||||
github.com/goccy/go-json v0.9.7/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
|
||||
github.com/goccy/go-json v0.10.0/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
|
||||
github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU=
|
||||
github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
|
||||
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/golang-jwt/jwt/v4 v4.4.3/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
|
||||
github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg=
|
||||
github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
|
||||
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
|
||||
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
|
||||
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
|
||||
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
|
||||
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
|
||||
github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb h1:PBC98N2aIaM3XXiurYmW7fx4GZkL8feAMVq7nEjURHk=
|
||||
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
|
||||
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
@@ -93,13 +130,24 @@ github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/
|
||||
github.com/google/subcommands v1.2.0/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk=
|
||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc=
|
||||
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/hashicorp/go-bexpr v0.1.10 h1:9kuI5PFotCboP3dkDYFr/wi0gg0QVbSNz5oFRpxn4uE=
|
||||
github.com/hashicorp/go-bexpr v0.1.10/go.mod h1:oxlubA2vC/gFVfX1A6JGp7ls7uCDlfJn732ehYYg+g0=
|
||||
github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d h1:dg1dEPuWpEqDnvIw251EVy4zlP8gWbsGj4BsUKCRpYs=
|
||||
github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
|
||||
github.com/holiman/bloomfilter/v2 v2.0.3 h1:73e0e/V0tCydx14a0SCYS/EWCxgwLZ18CZcZKVu0fao=
|
||||
github.com/holiman/bloomfilter/v2 v2.0.3/go.mod h1:zpoh+gs7qcpqrHr3dB55AMiJwo0iURXE7ZOP9L9hSkA=
|
||||
github.com/holiman/uint256 v1.3.2 h1:a9EgMPSC1AAaj1SZL5zIQD3WbwTuHrMGOerLjGmM/TA=
|
||||
github.com/holiman/uint256 v1.3.2/go.mod h1:EOMSn4q6Nyt9P6efbI3bueV4e1b3dGlUCXeiRV4ng7E=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/huin/goupnp v1.0.2 h1:RfGLP+h3mvisuWEyybxNq5Eft3NWhHLPeUN72kpKZoI=
|
||||
github.com/huin/goupnp v1.0.2/go.mod h1:0dxJBVBHqTMjIUMkESDTNgOOx/Mw5wYIfyFmdzSamkM=
|
||||
github.com/huin/goutil v0.0.0-20170803182201-1ca381bf3150/go.mod h1:PpLOETDnJ0o3iZrZfqZzyLl6l7F3c6L1oWn7OICBi6o=
|
||||
github.com/iden3/go-iden3-crypto v0.0.17 h1:NdkceRLJo/pI4UpcjVah4lN/a3yzxRUGXqxbWcYh9mY=
|
||||
github.com/iden3/go-iden3-crypto v0.0.17/go.mod h1:dLpM4vEPJ3nDHzhWFXDjzkn1qHoBeOT/3UEhXsEsP3E=
|
||||
github.com/jackpal/go-nat-pmp v1.0.2-0.20160603034137-1fa385a6f458 h1:6OvNmYgJyexcZ3pYbTI9jWx5tHo1Dee/tWbLMfPe2TA=
|
||||
github.com/jackpal/go-nat-pmp v1.0.2-0.20160603034137-1fa385a6f458/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc=
|
||||
github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
|
||||
github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E=
|
||||
github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc=
|
||||
@@ -115,6 +163,7 @@ github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa02
|
||||
github.com/klauspost/cpuid/v2 v2.2.5 h1:0E5MSMDEoAulmXNFquVs//DdoomxaoTY1kUhbc/qbZg=
|
||||
github.com/klauspost/cpuid/v2 v2.2.5/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
|
||||
github.com/knz/go-libedit v1.10.1/go.mod h1:MZTVkCWyz0oBc7JOWP3wNAzd002ZbM/5hgShxwh4x8M=
|
||||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||
github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk=
|
||||
@@ -129,14 +178,22 @@ github.com/leanovate/gopter v0.2.11/go.mod h1:aK3tzZP/C+p1m3SPRE4SYZFGP7jjkuSI4f
|
||||
github.com/leodido/go-urn v1.2.1/go.mod h1:zt4jvISO2HfUBqxjfIshjdMTYS56ZS/qv49ictyFfxY=
|
||||
github.com/leodido/go-urn v1.2.4 h1:XlAE/cm/ms7TE/VMVoduSpNBoyc2dOxHs5MZSwAN63Q=
|
||||
github.com/leodido/go-urn v1.2.4/go.mod h1:7ZrI8mTSeBSHl/UaRyKQW1qZeMgak41ANeCNaVckg+4=
|
||||
github.com/mattn/go-colorable v0.1.8 h1:c1ghPdyEDarC70ftn0y+A/Ee++9zz8ljHG1b13eJ0s8=
|
||||
github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
|
||||
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
|
||||
github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
|
||||
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
|
||||
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
|
||||
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||
github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
|
||||
github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZgg3U=
|
||||
github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
|
||||
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||
github.com/mitchellh/pointerstructure v1.2.0 h1:O+i9nHnXS3l/9Wu7r4NrEdwA2VFTicjUEN1uBnDo34A=
|
||||
github.com/mitchellh/pointerstructure v1.2.0/go.mod h1:BRAsLI5zgXmw97Lf6s25bs8ohIXc3tViBH44KcwB2g4=
|
||||
github.com/mmcloughlin/addchain v0.4.0 h1:SobOdjm2xLj1KkXN5/n0xTIWyZA2+s99UCY1iPfkHRY=
|
||||
github.com/mmcloughlin/addchain v0.4.0/go.mod h1:A86O+tHqZLMNO4w6ZZ4FlVQEadcoqkyU72HC5wJ4RlU=
|
||||
github.com/mmcloughlin/profile v0.1.1/go.mod h1:IhHD7q1ooxgwTgjxQYkACGA77oFTDdFVejUS1/tS/qU=
|
||||
@@ -145,51 +202,76 @@ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
|
||||
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
|
||||
github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78=
|
||||
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
|
||||
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
|
||||
github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec=
|
||||
github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY=
|
||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
|
||||
github.com/onsi/ginkgo v1.14.0 h1:2mOpI4JVVPBN+WQRa0WKH2eXR+Ey+uK4n7Zj0aYpIQA=
|
||||
github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
|
||||
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
|
||||
github.com/onsi/gomega v1.10.1 h1:o0+MgICZLuZ7xjH7Vx6zS/zcu93/BEp1VwkIW1mEXCE=
|
||||
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
|
||||
github.com/pelletier/go-toml/v2 v2.0.1/go.mod h1:r9LEWfGN8R5k0VXJ+0BkIe7MYkRdwZOjgMj2KwnJFUo=
|
||||
github.com/pelletier/go-toml/v2 v2.0.6/go.mod h1:eumQOmlWiOPt5WriQQqoM5y18pDHwha2N+QD+EUNTek=
|
||||
github.com/pelletier/go-toml/v2 v2.1.0 h1:FnwAJ4oYMvbT/34k9zzHuZNrhlz48GB3/s6at6/MHO4=
|
||||
github.com/pelletier/go-toml/v2 v2.1.0/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc=
|
||||
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
|
||||
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||
github.com/prometheus/client_golang v1.19.0 h1:ygXvpU1AoN1MhdzckN+PyD9QJOSD4x7kmXYlnfbA6JU=
|
||||
github.com/prometheus/client_golang v1.19.0/go.mod h1:ZRM9uEAypZakd+q/x7+gmsvXdURP+DABIEIjnmDdp+k=
|
||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw=
|
||||
github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI=
|
||||
github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
|
||||
github.com/prometheus/common v0.48.0 h1:QO8U2CdOzSn1BBsmXJXduaaW+dY/5QLjfB8svtSzKKE=
|
||||
github.com/prometheus/common v0.48.0/go.mod h1:0/KsvlIEfPQCQ5I2iNSAWKPZziNCvRs5EC6ILDTlAPc=
|
||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo=
|
||||
github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo=
|
||||
github.com/prometheus/tsdb v0.7.1 h1:YZcsG11NqnK4czYLrWd9mpEuAJIHVQLwdrleYfszMAA=
|
||||
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
|
||||
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
|
||||
github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis=
|
||||
github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
|
||||
github.com/rjeczalik/notify v0.9.1 h1:CLCKso/QK1snAlnhNR/CNvNiFU2saUtjV0bx3EwNeCE=
|
||||
github.com/rjeczalik/notify v0.9.1/go.mod h1:rKwnCoCGeuQnwBtTSPL9Dad03Vh2n40ePRrjvIXnJho=
|
||||
github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
|
||||
github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE=
|
||||
github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
|
||||
github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
|
||||
github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik=
|
||||
github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
|
||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/scroll-tech/da-codec v0.1.3-0.20250401062930-9f9f53898493 h1:Ioc01J0WEMxuwFvEPGJeBKXdf2KY4Yc3XbFky/IxLlI=
|
||||
github.com/scroll-tech/da-codec v0.1.3-0.20250401062930-9f9f53898493/go.mod h1:yhTS9OVC0xQGhg7DN5iV5KZJvnSIlFWAxDdp+6jxQtY=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20250305151038-478940e79601 h1:NEsjCG6uSvLRBlsP3+x6PL1kM+Ojs3g8UGotIPgJSz8=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20250305151038-478940e79601/go.mod h1:OblWe1+QrZwdpwO0j/LY3BSGuKT3YPUFBDQQgvvfStQ=
|
||||
github.com/scroll-tech/da-codec v0.10.0 h1:IPHxyTyXTWPV0Q+DZ08cod2fWkhUvrfysmj/VBpB+WU=
|
||||
github.com/scroll-tech/da-codec v0.10.0/go.mod h1:MBlIP4wCXPcUDZ/Ci2B7n/2IbVU1WBo9OTFTZ5ffE0U=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20251128092113-8629f088d78f h1:j6SjP98MoWFFX9TwB1/nFYEkayqHQsrtE66Ll2C+oT0=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20251128092113-8629f088d78f/go.mod h1:Aa/kD1XB+OV/7rRxMQrjcPCB4b0pKyLH0gsTrtuHi38=
|
||||
github.com/scroll-tech/zktrie v0.8.4 h1:UagmnZ4Z3ITCk+aUq9NQZJNAwnWl4gSxsLb2Nl7IgRE=
|
||||
github.com/scroll-tech/zktrie v0.8.4/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
|
||||
github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKlUeu/erjjvaPEYiI=
|
||||
github.com/shirou/gopsutil v3.21.11+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA=
|
||||
github.com/shopspring/decimal v1.3.1 h1:2Usl1nmF/WZucqkFZhnfFYxxxu8LG21F6nPQBE5gKV8=
|
||||
github.com/shopspring/decimal v1.3.1/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o=
|
||||
github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo=
|
||||
github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0=
|
||||
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
||||
github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4 h1:Gb2Tyox57NRNuZ2d3rmvB3pcmbu7O1RS3m8WRx7ilrg=
|
||||
github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4/go.mod h1:RZLeN1LMWmRsyYjvAu+I6Dm9QmlDaIIt+Y+4Kd7Tp+Q=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
@@ -200,8 +282,8 @@ github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o
|
||||
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
|
||||
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/supranational/blst v0.3.13 h1:AYeSxdOMacwu7FBmpfloBz5pbFXDmJL33RuwnKtmTjk=
|
||||
github.com/supranational/blst v0.3.13/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw=
|
||||
github.com/supranational/blst v0.3.15 h1:rd9viN6tfARE5wv3KZJ9H8e1cg0jXW8syFCcsbHa76o=
|
||||
github.com/supranational/blst v0.3.15/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw=
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY=
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc=
|
||||
github.com/tidwall/gjson v1.14.3 h1:9jvXn7olKEHU1S9vwoMGliaT8jq1vJ7IH/n9zD9Dnlw=
|
||||
@@ -216,6 +298,8 @@ github.com/tklauser/numcpus v0.9.0 h1:lmyCHtANi8aRUgkckBgoDk1nHCux3n2cgkJLXdQGPD
|
||||
github.com/tklauser/numcpus v0.9.0/go.mod h1:SN6Nq1O3VychhC1npsWostA+oW+VOQTxZrS604NSRyI=
|
||||
github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS4MhqMhdFk5YI=
|
||||
github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08=
|
||||
github.com/tyler-smith/go-bip39 v1.0.1-0.20181017060643-dbb3b84ba2ef h1:wHSqTBrZW24CsNJDfeh9Ex6Pm0Rcpc7qrgKBiL44vF4=
|
||||
github.com/tyler-smith/go-bip39 v1.0.1-0.20181017060643-dbb3b84ba2ef/go.mod h1:sJ5fKU0s6JVwZjjcUEX2zFOnvq0ASQ2K9Zr6cf67kNs=
|
||||
github.com/ugorji/go v1.2.7/go.mod h1:nF9osbDWLy6bDVv/Rtoh6QgnvNDpmCalQV5urGCCS6M=
|
||||
github.com/ugorji/go/codec v1.2.7/go.mod h1:WGN1fab3R1fzQlVQTkfxVtIBhWDRqOviHU95kRgeqEY=
|
||||
github.com/ugorji/go/codec v1.2.11 h1:BMaWp1Bb6fHwEtbplGBGJ498wD+LKlNSl25MjdZY4dU=
|
||||
@@ -227,11 +311,16 @@ github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673/go.mod h1:N3UwUGtsr
|
||||
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
|
||||
github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0=
|
||||
github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
|
||||
go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw=
|
||||
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
|
||||
go.uber.org/multierr v1.9.0 h1:7fIwc/ZtS0q++VgcfqFDxSBZVv/Xo49/SYnDFupUwlI=
|
||||
go.uber.org/multierr v1.9.0/go.mod h1:X2jQV1h+kxSjClGpnseKVIxpmcjrj7MNnI0bnlfKTVQ=
|
||||
golang.org/x/arch v0.0.0-20210923205945-b76863e36670/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8=
|
||||
golang.org/x/arch v0.5.0 h1:jpGode6huXQxcskEIpOCvrU+tzo81b6+oFLUYXWtH/Y=
|
||||
golang.org/x/arch v0.5.0/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8=
|
||||
golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
@@ -240,7 +329,10 @@ golang.org/x/crypto v0.32.0 h1:euUpcYgM8WcP71gNpTqQCn6rC2t6ULUPiOzfWaXVVfc=
|
||||
golang.org/x/crypto v0.32.0/go.mod h1:ZnnJkOaASj8g0AjIduWNlq2NRxL0PlBrbKVyZ6V/Ugc=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20211029224645-99673261e6eb/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
@@ -250,13 +342,25 @@ golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE=
|
||||
golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs=
|
||||
golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.11.0 h1:GGz8+XQP4FvTTrjZPzNKTMFtSXH80RAzG+5ghFPgK9w=
|
||||
golang.org/x/sync v0.11.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
@@ -269,36 +373,56 @@ golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.30.0 h1:QjkSwP/36a20jFYWkSue1YwXzLmsV5Gfq7Eiy72C1uc=
|
||||
golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo=
|
||||
golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
|
||||
golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba h1:O8mE0/t419eoIwhTFpKVkHiTs/Igowgfkj25AcZrtiE=
|
||||
golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
|
||||
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
|
||||
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
||||
google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
||||
google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
||||
google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI=
|
||||
google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
|
||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce h1:+JknDZhAj8YMt7GC73Ei8pv4MzjDUNPHgQWJdtMAaDU=
|
||||
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce/go.mod h1:5AcXVHNjg+BDxry382+8OKon8SEWiKktQR07RKPsv1c=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
||||
gopkg.in/urfave/cli.v1 v1.20.0 h1:NdAVW6RYxDif9DhDHaAortIu956m2c0v+09AZBPTbE0=
|
||||
gopkg.in/urfave/cli.v1 v1.20.0/go.mod h1:vuBzUtMdQeixQj8LVd+/98pzhxNGQoyuPBlsXHOQNO0=
|
||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
||||
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
|
||||
@@ -28,10 +28,17 @@ type ProverManager struct {
|
||||
BundleCollectionTimeSec int `json:"bundle_collection_time_sec"`
|
||||
}
|
||||
|
||||
// l2geth client configuration items
|
||||
type L2Endpoint struct {
|
||||
Url string `json:"endpoint"`
|
||||
}
|
||||
|
||||
// L2 loads l2geth configuration items.
|
||||
type L2 struct {
|
||||
// l2geth chain_id.
|
||||
ChainID uint64 `json:"chain_id"`
|
||||
ChainID uint64 `json:"chain_id"`
|
||||
Endpoint *L2Endpoint `json:"l2geth"`
|
||||
ValidiumMode bool `json:"validium_mode"`
|
||||
}
|
||||
|
||||
// Auth provides the auth coordinator
|
||||
@@ -41,24 +48,34 @@ type Auth struct {
|
||||
LoginExpireDurationSec int `json:"login_expire_duration_sec"`
|
||||
}
|
||||
|
||||
// The sequencer controlled data
|
||||
type Sequencer struct {
|
||||
DecryptionKey string `json:"decryption_key"`
|
||||
}
|
||||
|
||||
// Config load configuration items.
|
||||
type Config struct {
|
||||
ProverManager *ProverManager `json:"prover_manager"`
|
||||
DB *database.Config `json:"db"`
|
||||
L2 *L2 `json:"l2"`
|
||||
Auth *Auth `json:"auth"`
|
||||
Sequencer *Sequencer `json:"sequencer"`
|
||||
}
|
||||
|
||||
// CircuitConfig circuit items.
|
||||
type CircuitConfig struct {
|
||||
// AssetConfig contain assets configurated for each fork, the defaul vkfile name is "OpenVmVk.json".
|
||||
type AssetConfig struct {
|
||||
AssetsPath string `json:"assets_path"`
|
||||
Version uint8 `json:"version,omitempty"`
|
||||
ForkName string `json:"fork_name"`
|
||||
MinProverVersion string `json:"min_prover_version"`
|
||||
Vkfile string `json:"vk_file,omitempty"`
|
||||
MinProverVersion string `json:"min_prover_version,omitempty"`
|
||||
Features string `json:"features,omitempty"`
|
||||
}
|
||||
|
||||
// VerifierConfig load zk verifier config.
|
||||
type VerifierConfig struct {
|
||||
HighVersionCircuit *CircuitConfig `json:"high_version_circuit"`
|
||||
MinProverVersion string `json:"min_prover_version"`
|
||||
Verifiers []AssetConfig `json:"verifiers"`
|
||||
}
|
||||
|
||||
// NewConfig returns a new instance of Config.
|
||||
|
||||
@@ -20,11 +20,11 @@ func TestConfig(t *testing.T) {
|
||||
"batch_collection_time_sec": 180,
|
||||
"chunk_collection_time_sec": 180,
|
||||
"verifier": {
|
||||
"high_version_circuit": {
|
||||
"min_prover_version": "v4.4.45",
|
||||
"verifiers": [{
|
||||
"assets_path": "assets",
|
||||
"fork_name": "euclidV2",
|
||||
"min_prover_version": "v4.4.45"
|
||||
}
|
||||
"fork_name": "galileo"
|
||||
}]
|
||||
},
|
||||
"max_verifier_workers": 4
|
||||
},
|
||||
@@ -35,13 +35,17 @@ func TestConfig(t *testing.T) {
|
||||
"maxIdleNum": 20
|
||||
},
|
||||
"l2": {
|
||||
"chain_id": 111
|
||||
"chain_id": 111,
|
||||
"validium_mode": false
|
||||
},
|
||||
"auth": {
|
||||
"secret": "prover secret key",
|
||||
"challenge_expire_duration_sec": 3600,
|
||||
"login_expire_duration_sec": 3600
|
||||
}
|
||||
},
|
||||
"sequencer": {
|
||||
"decryption_key": "sequencer decryption key"
|
||||
}
|
||||
}`
|
||||
|
||||
t.Run("Success Case", func(t *testing.T) {
|
||||
|
||||
74
coordinator/internal/config/proxy_config.go
Normal file
74
coordinator/internal/config/proxy_config.go
Normal file
@@ -0,0 +1,74 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"scroll-tech/common/database"
|
||||
"scroll-tech/common/utils"
|
||||
)
|
||||
|
||||
// Proxy loads proxy configuration items.
|
||||
type ProxyManager struct {
|
||||
// Zk verifier config help to confine the connected prover.
|
||||
Verifier *VerifierConfig `json:"verifier"`
|
||||
Client *ProxyClient `json:"proxy_cli"`
|
||||
Auth *Auth `json:"auth"`
|
||||
DB *database.Config `json:"db,omitempty"`
|
||||
}
|
||||
|
||||
func (m *ProxyManager) Normalize() {
|
||||
if m.Client.Secret == "" {
|
||||
m.Client.Secret = m.Auth.Secret
|
||||
}
|
||||
|
||||
if m.Client.ProxyVersion == "" {
|
||||
m.Client.ProxyVersion = m.Verifier.MinProverVersion
|
||||
}
|
||||
}
|
||||
|
||||
// Proxy client configuration for connect to upstream as a client
|
||||
type ProxyClient struct {
|
||||
ProxyName string `json:"proxy_name"`
|
||||
ProxyVersion string `json:"proxy_version,omitempty"`
|
||||
Secret string `json:"secret,omitempty"`
|
||||
}
|
||||
|
||||
// Coordinator configuration
|
||||
type UpStream struct {
|
||||
BaseUrl string `json:"base_url"`
|
||||
RetryCount uint `json:"retry_count"`
|
||||
RetryWaitTime uint `json:"retry_wait_time_sec"`
|
||||
ConnectionTimeoutSec uint `json:"connection_timeout_sec"`
|
||||
CompatibileMode bool `json:"compatible_mode,omitempty"`
|
||||
}
|
||||
|
||||
// Config load configuration items.
|
||||
type ProxyConfig struct {
|
||||
ProxyManager *ProxyManager `json:"proxy_manager"`
|
||||
ProxyName string `json:"proxy_name"`
|
||||
Coordinators map[string]*UpStream `json:"coordinators"`
|
||||
}
|
||||
|
||||
// NewConfig returns a new instance of Config.
|
||||
func NewProxyConfig(file string) (*ProxyConfig, error) {
|
||||
buf, err := os.ReadFile(filepath.Clean(file))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cfg := &ProxyConfig{}
|
||||
err = json.Unmarshal(buf, cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Override config with environment variables
|
||||
err = utils.OverrideConfigWithEnv(cfg, "SCROLL_COORDINATOR_PROXY")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return cfg, nil
|
||||
}
|
||||
@@ -19,28 +19,56 @@ type AuthController struct {
|
||||
loginLogic *auth.LoginLogic
|
||||
}
|
||||
|
||||
// NewAuthController returns an LoginController instance
|
||||
func NewAuthController(db *gorm.DB, cfg *config.Config, vf *verifier.Verifier) *AuthController {
|
||||
func NewAuthControllerWithLogic(loginLogic *auth.LoginLogic) *AuthController {
|
||||
return &AuthController{
|
||||
loginLogic: auth.NewLoginLogic(db, cfg, vf),
|
||||
loginLogic: loginLogic,
|
||||
}
|
||||
}
|
||||
|
||||
// Login the api controller for login
|
||||
// NewAuthController returns an LoginController instance
|
||||
func NewAuthController(db *gorm.DB, cfg *config.Config, vf *verifier.Verifier) *AuthController {
|
||||
return &AuthController{
|
||||
loginLogic: auth.NewLoginLogic(db, cfg.ProverManager.Verifier, vf),
|
||||
}
|
||||
}
|
||||
|
||||
// Login the api controller for login, used as the Authenticator in JWT
|
||||
// It can work in two mode: full process for normal login, or if login request
|
||||
// is posted from proxy, run a simpler process to login a client
|
||||
func (a *AuthController) Login(c *gin.Context) (interface{}, error) {
|
||||
|
||||
// check if the login is post by proxy
|
||||
var viaProxy bool
|
||||
if proverType, proverTypeExist := c.Get(types.ProverProviderTypeKey); proverTypeExist {
|
||||
proverType := uint8(proverType.(float64))
|
||||
viaProxy = proverType == types.ProverProviderTypeProxy
|
||||
}
|
||||
|
||||
var login types.LoginParameter
|
||||
if err := c.ShouldBind(&login); err != nil {
|
||||
return "", fmt.Errorf("missing the public_key, err:%w", err)
|
||||
}
|
||||
|
||||
// check login parameter's token is equal to bearer token, the Authorization must be existed
|
||||
// if not exist, the jwt token will intercept it
|
||||
brearToken := c.GetHeader("Authorization")
|
||||
if brearToken != "Bearer "+login.Message.Challenge {
|
||||
return "", errors.New("check challenge failure for the not equal challenge string")
|
||||
// if not, process with normal login
|
||||
if !viaProxy {
|
||||
// check login parameter's token is equal to bearer token, the Authorization must be existed
|
||||
// if not exist, the jwt token will intercept it
|
||||
brearToken := c.GetHeader("Authorization")
|
||||
if brearToken != "Bearer "+login.Message.Challenge {
|
||||
return "", errors.New("check challenge failure for the not equal challenge string")
|
||||
}
|
||||
|
||||
if err := auth.VerifyMsg(&login); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// check the challenge is used, if used, return failure
|
||||
if err := a.loginLogic.InsertChallengeString(c, login.Message.Challenge); err != nil {
|
||||
return "", fmt.Errorf("login insert challenge string failure:%w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := a.loginLogic.Check(&login); err != nil {
|
||||
if err := a.loginLogic.CompatiblityCheck(&login); err != nil {
|
||||
return "", fmt.Errorf("check the login parameter failure: %w", err)
|
||||
}
|
||||
|
||||
@@ -49,11 +77,6 @@ func (a *AuthController) Login(c *gin.Context) (interface{}, error) {
|
||||
return "", fmt.Errorf("prover hard fork name failure:%w", err)
|
||||
}
|
||||
|
||||
// check the challenge is used, if used, return failure
|
||||
if err := a.loginLogic.InsertChallengeString(c, login.Message.Challenge); err != nil {
|
||||
return "", fmt.Errorf("login insert challenge string failure:%w", err)
|
||||
}
|
||||
|
||||
returnData := types.LoginParameterWithHardForkName{
|
||||
HardForkName: hardForkNames,
|
||||
LoginParameter: login,
|
||||
@@ -85,10 +108,6 @@ func (a *AuthController) IdentityHandler(c *gin.Context) interface{} {
|
||||
c.Set(types.ProverName, proverName)
|
||||
}
|
||||
|
||||
if publicKey, ok := claims[types.PublicKey]; ok {
|
||||
c.Set(types.PublicKey, publicKey)
|
||||
}
|
||||
|
||||
if proverVersion, ok := claims[types.ProverVersion]; ok {
|
||||
c.Set(types.ProverVersion, proverVersion)
|
||||
}
|
||||
@@ -101,5 +120,9 @@ func (a *AuthController) IdentityHandler(c *gin.Context) interface{} {
|
||||
c.Set(types.ProverProviderTypeKey, providerType)
|
||||
}
|
||||
|
||||
if publicKey, ok := claims[types.PublicKey]; ok {
|
||||
return publicKey
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1,12 +1,15 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"github.com/scroll-tech/go-ethereum/params"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/coordinator/internal/config"
|
||||
"scroll-tech/coordinator/internal/logic/libzkp"
|
||||
"scroll-tech/coordinator/internal/logic/verifier"
|
||||
)
|
||||
|
||||
@@ -21,14 +24,27 @@ var (
|
||||
|
||||
// InitController inits Controller with database
|
||||
func InitController(cfg *config.Config, chainCfg *params.ChainConfig, db *gorm.DB, reg prometheus.Registerer) {
|
||||
vf, err := verifier.NewVerifier(cfg.ProverManager.Verifier)
|
||||
validiumMode := cfg.L2.ValidiumMode
|
||||
|
||||
vf, err := verifier.NewVerifier(cfg.ProverManager.Verifier, validiumMode)
|
||||
if err != nil {
|
||||
panic("proof receiver new verifier failure")
|
||||
}
|
||||
|
||||
log.Info("verifier created", "openVmVerifier", vf.OpenVMVkMap)
|
||||
|
||||
// TODO: enable this when the libzkp has been updated
|
||||
l2cfg := cfg.L2.Endpoint
|
||||
if l2cfg == nil {
|
||||
panic("l2geth is not specified")
|
||||
}
|
||||
l2cfgBytes, err := json.Marshal(l2cfg)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
libzkp.InitL2geth(string(l2cfgBytes))
|
||||
|
||||
Auth = NewAuthController(db, cfg, vf)
|
||||
GetTask = NewGetTaskController(cfg, chainCfg, db, reg)
|
||||
GetTask = NewGetTaskController(cfg, chainCfg, db, vf, reg)
|
||||
SubmitProof = NewSubmitProofController(cfg, chainCfg, db, vf, reg)
|
||||
}
|
||||
|
||||
@@ -17,6 +17,7 @@ import (
|
||||
|
||||
"scroll-tech/coordinator/internal/config"
|
||||
"scroll-tech/coordinator/internal/logic/provertask"
|
||||
"scroll-tech/coordinator/internal/logic/verifier"
|
||||
coordinatorType "scroll-tech/coordinator/internal/types"
|
||||
)
|
||||
|
||||
@@ -25,13 +26,15 @@ type GetTaskController struct {
|
||||
proverTasks map[message.ProofType]provertask.ProverTask
|
||||
|
||||
getTaskAccessCounter *prometheus.CounterVec
|
||||
|
||||
l2syncer *l2Syncer
|
||||
}
|
||||
|
||||
// NewGetTaskController create a get prover task controller
|
||||
func NewGetTaskController(cfg *config.Config, chainCfg *params.ChainConfig, db *gorm.DB, reg prometheus.Registerer) *GetTaskController {
|
||||
chunkProverTask := provertask.NewChunkProverTask(cfg, chainCfg, db, reg)
|
||||
batchProverTask := provertask.NewBatchProverTask(cfg, chainCfg, db, reg)
|
||||
bundleProverTask := provertask.NewBundleProverTask(cfg, chainCfg, db, reg)
|
||||
func NewGetTaskController(cfg *config.Config, chainCfg *params.ChainConfig, db *gorm.DB, verifier *verifier.Verifier, reg prometheus.Registerer) *GetTaskController {
|
||||
chunkProverTask := provertask.NewChunkProverTask(cfg, chainCfg, db, verifier.ChunkVk, reg)
|
||||
batchProverTask := provertask.NewBatchProverTask(cfg, chainCfg, db, verifier.BatchVk, reg)
|
||||
bundleProverTask := provertask.NewBundleProverTask(cfg, chainCfg, db, verifier.BundleVk, reg)
|
||||
|
||||
ptc := &GetTaskController{
|
||||
proverTasks: make(map[message.ProofType]provertask.ProverTask),
|
||||
@@ -44,6 +47,13 @@ func NewGetTaskController(cfg *config.Config, chainCfg *params.ChainConfig, db *
|
||||
ptc.proverTasks[message.ProofTypeChunk] = chunkProverTask
|
||||
ptc.proverTasks[message.ProofTypeBatch] = batchProverTask
|
||||
ptc.proverTasks[message.ProofTypeBundle] = bundleProverTask
|
||||
|
||||
if syncer, err := createL2Syncer(cfg); err != nil {
|
||||
log.Crit("can not init l2 syncer", "err", err)
|
||||
} else {
|
||||
ptc.l2syncer = syncer
|
||||
}
|
||||
|
||||
return ptc
|
||||
}
|
||||
|
||||
@@ -78,6 +88,17 @@ func (ptc *GetTaskController) GetTasks(ctx *gin.Context) {
|
||||
return
|
||||
}
|
||||
|
||||
if getTaskParameter.ProverHeight == 0 {
|
||||
// help update the prover height with internal l2geth
|
||||
if blk, err := ptc.l2syncer.getLatestBlockNumber(ctx); err == nil {
|
||||
getTaskParameter.ProverHeight = blk
|
||||
} else {
|
||||
nerr := fmt.Errorf("inner l2geth failure, err:%w", err)
|
||||
types.RenderFailure(ctx, types.InternalServerError, nerr)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
proofType := ptc.proofType(&getTaskParameter)
|
||||
proverTask, isExist := ptc.proverTasks[proofType]
|
||||
if !isExist {
|
||||
|
||||
71
coordinator/internal/controller/api/l2_syncer.go
Normal file
71
coordinator/internal/controller/api/l2_syncer.go
Normal file
@@ -0,0 +1,71 @@
|
||||
//go:build !mock_verifier
|
||||
|
||||
package api
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
|
||||
"scroll-tech/coordinator/internal/config"
|
||||
)
|
||||
|
||||
type l2Syncer struct {
|
||||
l2gethClient *ethclient.Client
|
||||
lastBlockNumber struct {
|
||||
sync.RWMutex
|
||||
data uint64
|
||||
t time.Time
|
||||
}
|
||||
}
|
||||
|
||||
func createL2Syncer(cfg *config.Config) (*l2Syncer, error) {
|
||||
|
||||
if cfg.L2 == nil || cfg.L2.Endpoint == nil {
|
||||
return nil, fmt.Errorf("l2 endpoint is not set in config")
|
||||
} else {
|
||||
l2gethClient, err := ethclient.Dial(cfg.L2.Endpoint.Url)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("dial l2geth endpoint fail, err: %s", err)
|
||||
}
|
||||
return &l2Syncer{
|
||||
l2gethClient: l2gethClient,
|
||||
}, nil
|
||||
}
|
||||
}
|
||||
|
||||
// getLatestBlockNumber gets the latest block number, using cache if available and not expired
|
||||
func (syncer *l2Syncer) getLatestBlockNumber(ctx *gin.Context) (uint64, error) {
|
||||
// First check if we have a cached value that's still valid
|
||||
syncer.lastBlockNumber.RLock()
|
||||
if !syncer.lastBlockNumber.t.IsZero() && time.Since(syncer.lastBlockNumber.t) < time.Second*10 {
|
||||
blockNumber := syncer.lastBlockNumber.data
|
||||
syncer.lastBlockNumber.RUnlock()
|
||||
return blockNumber, nil
|
||||
}
|
||||
syncer.lastBlockNumber.RUnlock()
|
||||
|
||||
// If not cached or expired, fetch from the client
|
||||
if syncer.l2gethClient == nil {
|
||||
return 0, errors.New("L2 geth client not initialized")
|
||||
}
|
||||
|
||||
blockNumber, err := syncer.l2gethClient.BlockNumber(ctx)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("failed to get latest block number: %w", err)
|
||||
}
|
||||
|
||||
// Update the cache
|
||||
syncer.lastBlockNumber.Lock()
|
||||
syncer.lastBlockNumber.data = blockNumber
|
||||
syncer.lastBlockNumber.t = time.Now()
|
||||
syncer.lastBlockNumber.Unlock()
|
||||
|
||||
log.Debug("updated block height reference", "height", blockNumber)
|
||||
return blockNumber, nil
|
||||
}
|
||||
20
coordinator/internal/controller/api/mock_syncer.go
Normal file
20
coordinator/internal/controller/api/mock_syncer.go
Normal file
@@ -0,0 +1,20 @@
|
||||
//go:build mock_verifier
|
||||
|
||||
package api
|
||||
|
||||
import (
|
||||
"scroll-tech/coordinator/internal/config"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
|
||||
type l2Syncer struct{}
|
||||
|
||||
func createL2Syncer(_ *config.Config) (*l2Syncer, error) {
|
||||
return &l2Syncer{}, nil
|
||||
}
|
||||
|
||||
// getLatestBlockNumber gets the latest block number, using cache if available and not expired
|
||||
func (syncer *l2Syncer) getLatestBlockNumber(_ *gin.Context) (uint64, error) {
|
||||
return 99999994, nil
|
||||
}
|
||||
150
coordinator/internal/controller/proxy/auth.go
Normal file
150
coordinator/internal/controller/proxy/auth.go
Normal file
@@ -0,0 +1,150 @@
|
||||
package proxy
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
jwt "github.com/appleboy/gin-jwt/v2"
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
|
||||
"scroll-tech/coordinator/internal/config"
|
||||
"scroll-tech/coordinator/internal/controller/api"
|
||||
"scroll-tech/coordinator/internal/logic/auth"
|
||||
"scroll-tech/coordinator/internal/logic/verifier"
|
||||
"scroll-tech/coordinator/internal/types"
|
||||
)
|
||||
|
||||
// AuthController is login API
|
||||
type AuthController struct {
|
||||
apiLogin *api.AuthController
|
||||
clients Clients
|
||||
proverMgr *ProverManager
|
||||
}
|
||||
|
||||
const upstreamConnTimeout = time.Second * 5
|
||||
const LoginParamCache = "login_param"
|
||||
const ProverTypesKey = "prover_types"
|
||||
const SignatureKey = "prover_signature"
|
||||
|
||||
// NewAuthController returns an LoginController instance
|
||||
func NewAuthController(cfg *config.ProxyConfig, clients Clients, proverMgr *ProverManager) *AuthController {
|
||||
|
||||
// use a dummy Verifier to create login logic (we do not use any information in verifier)
|
||||
dummyVf := verifier.Verifier{
|
||||
OpenVMVkMap: make(map[string]struct{}),
|
||||
}
|
||||
loginLogic := auth.NewLoginLogicWithSimpleDeduplicator(cfg.ProxyManager.Verifier, &dummyVf)
|
||||
|
||||
authController := &AuthController{
|
||||
apiLogin: api.NewAuthControllerWithLogic(loginLogic),
|
||||
clients: clients,
|
||||
proverMgr: proverMgr,
|
||||
}
|
||||
|
||||
return authController
|
||||
}
|
||||
|
||||
// Login extended the Login hander in api controller
|
||||
func (a *AuthController) Login(c *gin.Context) (interface{}, error) {
|
||||
|
||||
loginRes, err := a.apiLogin.Login(c)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
loginParam := loginRes.(types.LoginParameterWithHardForkName)
|
||||
|
||||
if loginParam.LoginParameter.Message.ProverProviderType == types.ProverProviderTypeProxy {
|
||||
return nil, fmt.Errorf("proxy do not support recursive login")
|
||||
}
|
||||
|
||||
session := a.proverMgr.GetOrCreate(loginParam.PublicKey)
|
||||
log.Debug("start handling login", "cli", loginParam.Message.ProverName)
|
||||
|
||||
loginCtx, cf := context.WithTimeout(context.Background(), upstreamConnTimeout)
|
||||
var wg sync.WaitGroup
|
||||
for _, cli := range a.clients {
|
||||
wg.Add(1)
|
||||
go func(cli Client) {
|
||||
defer wg.Done()
|
||||
if err := session.ProxyLogin(loginCtx, cli, &loginParam.LoginParameter); err != nil {
|
||||
log.Error("proxy login failed during token cache update",
|
||||
"userKey", loginParam.PublicKey,
|
||||
"upstream", cli.Name(),
|
||||
"error", err)
|
||||
}
|
||||
}(cli)
|
||||
}
|
||||
go func(cliName string) {
|
||||
wg.Wait()
|
||||
cf()
|
||||
log.Debug("first login attempt has completed", "cli", cliName)
|
||||
}(loginParam.Message.ProverName)
|
||||
|
||||
return loginParam.LoginParameter, nil
|
||||
}
|
||||
|
||||
// PayloadFunc returns jwt.MapClaims with {public key, prover name}.
|
||||
func (a *AuthController) PayloadFunc(data interface{}) jwt.MapClaims {
|
||||
v, ok := data.(types.LoginParameter)
|
||||
if !ok {
|
||||
log.Error("PayloadFunc received unexpected type", "type", fmt.Sprintf("%T", data))
|
||||
return jwt.MapClaims{}
|
||||
}
|
||||
|
||||
return jwt.MapClaims{
|
||||
types.PublicKey: v.PublicKey,
|
||||
types.ProverName: v.Message.ProverName,
|
||||
types.ProverVersion: v.Message.ProverVersion,
|
||||
types.ProverProviderTypeKey: v.Message.ProverProviderType,
|
||||
SignatureKey: v.Signature,
|
||||
ProverTypesKey: v.Message.ProverTypes,
|
||||
}
|
||||
}
|
||||
|
||||
// IdentityHandler replies to client for /login
|
||||
func (a *AuthController) IdentityHandler(c *gin.Context) interface{} {
|
||||
claims := jwt.ExtractClaims(c)
|
||||
loginParam := &types.LoginParameter{}
|
||||
|
||||
if proverName, ok := claims[types.ProverName]; ok {
|
||||
loginParam.Message.ProverName, _ = proverName.(string)
|
||||
}
|
||||
|
||||
if proverVersion, ok := claims[types.ProverVersion]; ok {
|
||||
loginParam.Message.ProverVersion, _ = proverVersion.(string)
|
||||
}
|
||||
|
||||
if providerType, ok := claims[types.ProverProviderTypeKey]; ok {
|
||||
num, _ := providerType.(float64)
|
||||
loginParam.Message.ProverProviderType = types.ProverProviderType(num)
|
||||
}
|
||||
|
||||
if signature, ok := claims[SignatureKey]; ok {
|
||||
loginParam.Signature, _ = signature.(string)
|
||||
}
|
||||
|
||||
if proverTypes, ok := claims[ProverTypesKey]; ok {
|
||||
arr, _ := proverTypes.([]any)
|
||||
for _, elm := range arr {
|
||||
num, _ := elm.(float64)
|
||||
loginParam.Message.ProverTypes = append(loginParam.Message.ProverTypes, types.ProverType(num))
|
||||
}
|
||||
}
|
||||
|
||||
if publicKey, ok := claims[types.PublicKey]; ok {
|
||||
loginParam.PublicKey, _ = publicKey.(string)
|
||||
}
|
||||
|
||||
if loginParam.PublicKey != "" {
|
||||
|
||||
c.Set(LoginParamCache, loginParam)
|
||||
c.Set(types.ProverName, loginParam.Message.ProverName)
|
||||
// publickey will also be set since we have specified public_key as identical key
|
||||
return loginParam.PublicKey
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
246
coordinator/internal/controller/proxy/client.go
Normal file
246
coordinator/internal/controller/proxy/client.go
Normal file
@@ -0,0 +1,246 @@
|
||||
//nolint:errcheck,bodyclose // body is closed in the following handleHttpResp call
|
||||
package proxy
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/crypto"
|
||||
|
||||
ctypes "scroll-tech/common/types"
|
||||
|
||||
"scroll-tech/coordinator/internal/config"
|
||||
"scroll-tech/coordinator/internal/types"
|
||||
)
|
||||
|
||||
type ProxyCli interface {
|
||||
Login(ctx context.Context, genLogin func(string) (*types.LoginParameter, error)) (*ctypes.Response, error)
|
||||
ProxyLogin(ctx context.Context, param *types.LoginParameter) (*ctypes.Response, error)
|
||||
Token() string
|
||||
Reset()
|
||||
}
|
||||
|
||||
type ProverCli interface {
|
||||
GetTask(ctx context.Context, param *types.GetTaskParameter) (*ctypes.Response, error)
|
||||
SubmitProof(ctx context.Context, param *types.SubmitProofParameter) (*ctypes.Response, error)
|
||||
}
|
||||
|
||||
// Client wraps an http client with a preset host for coordinator API calls
|
||||
type upClient struct {
|
||||
httpClient *http.Client
|
||||
baseURL string
|
||||
loginToken string
|
||||
compatibileMode bool
|
||||
resetFromMgr func()
|
||||
}
|
||||
|
||||
// NewClient creates a new Client with the specified host
|
||||
func newUpClient(cfg *config.UpStream) *upClient {
|
||||
return &upClient{
|
||||
httpClient: &http.Client{
|
||||
Timeout: time.Duration(cfg.ConnectionTimeoutSec) * time.Second,
|
||||
},
|
||||
baseURL: cfg.BaseUrl,
|
||||
compatibileMode: cfg.CompatibileMode,
|
||||
}
|
||||
}
|
||||
|
||||
func (c *upClient) Reset() {
|
||||
if c.resetFromMgr != nil {
|
||||
c.resetFromMgr()
|
||||
}
|
||||
}
|
||||
|
||||
func (c *upClient) Token() string {
|
||||
return c.loginToken
|
||||
}
|
||||
|
||||
// need a parsable schema definition
|
||||
type loginSchema struct {
|
||||
Time string `json:"time"`
|
||||
Token string `json:"token"`
|
||||
}
|
||||
|
||||
// Login performs the complete login process: get challenge then login
|
||||
func (c *upClient) Login(ctx context.Context, genLogin func(string) (*types.LoginParameter, error)) (*ctypes.Response, error) {
|
||||
// Step 1: Get challenge
|
||||
url := fmt.Sprintf("%s/coordinator/v1/challenge", c.baseURL)
|
||||
|
||||
req, err := http.NewRequest("GET", url, nil)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create challenge request: %w", err)
|
||||
}
|
||||
|
||||
challengeResp, err := c.httpClient.Do(req)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get challenge: %w", err)
|
||||
}
|
||||
|
||||
parsedResp, err := handleHttpResp(challengeResp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
} else if parsedResp.ErrCode != 0 {
|
||||
return nil, fmt.Errorf("challenge failed: %d (%s)", parsedResp.ErrCode, parsedResp.ErrMsg)
|
||||
}
|
||||
|
||||
// Ste p2: Parse challenge response
|
||||
var challengeSchema loginSchema
|
||||
if err := parsedResp.DecodeData(&challengeSchema); err != nil {
|
||||
return nil, fmt.Errorf("failed to parse challenge response: %w", err)
|
||||
}
|
||||
|
||||
// Step 3: Use the token from challenge as Bearer token for login
|
||||
url = fmt.Sprintf("%s/coordinator/v1/login", c.baseURL)
|
||||
|
||||
param, err := genLogin(challengeSchema.Token)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to setup login parameter: %w", err)
|
||||
}
|
||||
|
||||
jsonData, err := json.Marshal(param)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to marshal login parameter: %w", err)
|
||||
}
|
||||
|
||||
req, err = http.NewRequest("POST", url, bytes.NewBuffer(jsonData))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create login request: %w", err)
|
||||
}
|
||||
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
req.Header.Set("Authorization", "Bearer "+challengeSchema.Token)
|
||||
|
||||
loginResp, err := c.httpClient.Do(req)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to perform login request: %w", err)
|
||||
}
|
||||
return handleHttpResp(loginResp)
|
||||
}
|
||||
|
||||
func handleHttpResp(resp *http.Response) (*ctypes.Response, error) {
|
||||
if resp.StatusCode == http.StatusOK || resp.StatusCode == http.StatusUnauthorized {
|
||||
defer resp.Body.Close()
|
||||
var respWithData ctypes.Response
|
||||
// Note: Body is consumed after decoding, caller should not read it again
|
||||
if err := json.NewDecoder(resp.Body).Decode(&respWithData); err == nil {
|
||||
return &respWithData, nil
|
||||
} else {
|
||||
return nil, fmt.Errorf("login parsing expected response failed: %v", err)
|
||||
}
|
||||
|
||||
}
|
||||
return nil, fmt.Errorf("login request failed with status: %d", resp.StatusCode)
|
||||
}
|
||||
|
||||
func (c *upClient) proxyLoginCompatibleMode(ctx context.Context, param *types.LoginParameter) (*ctypes.Response, error) {
|
||||
mimePrivK, err := buildPrivateKey([]byte(param.PublicKey))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
mimePkHex := common.Bytes2Hex(crypto.CompressPubkey(&mimePrivK.PublicKey))
|
||||
|
||||
genLoginParam := func(challenge string) (*types.LoginParameter, error) {
|
||||
|
||||
// Create login parameter with proxy settings
|
||||
loginParam := &types.LoginParameter{
|
||||
Message: param.Message,
|
||||
PublicKey: mimePkHex,
|
||||
}
|
||||
loginParam.Message.Challenge = challenge
|
||||
|
||||
// Sign the message with the private key
|
||||
if err := loginParam.SignWithKey(mimePrivK); err != nil {
|
||||
return nil, fmt.Errorf("failed to sign login parameter: %w", err)
|
||||
}
|
||||
|
||||
return loginParam, nil
|
||||
}
|
||||
|
||||
return c.Login(ctx, genLoginParam)
|
||||
}
|
||||
|
||||
// ProxyLogin makes a POST request to /v1/proxy_login with LoginParameter
|
||||
func (c *upClient) ProxyLogin(ctx context.Context, param *types.LoginParameter) (*ctypes.Response, error) {
|
||||
|
||||
if c.compatibileMode {
|
||||
return c.proxyLoginCompatibleMode(ctx, param)
|
||||
}
|
||||
|
||||
url := fmt.Sprintf("%s/coordinator/v1/proxy_login", c.baseURL)
|
||||
|
||||
jsonData, err := json.Marshal(param)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to marshal proxy login parameter: %w", err)
|
||||
}
|
||||
|
||||
req, err := http.NewRequestWithContext(ctx, "POST", url, bytes.NewBuffer(jsonData))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create proxy login request: %w", err)
|
||||
}
|
||||
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
req.Header.Set("Authorization", "Bearer "+c.loginToken)
|
||||
|
||||
proxyLoginResp, err := c.httpClient.Do(req)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to perform proxy login request: %w", err)
|
||||
}
|
||||
return handleHttpResp(proxyLoginResp)
|
||||
}
|
||||
|
||||
// GetTask makes a POST request to /v1/get_task with GetTaskParameter
|
||||
func (c *upClient) GetTask(ctx context.Context, param *types.GetTaskParameter) (*ctypes.Response, error) {
|
||||
url := fmt.Sprintf("%s/coordinator/v1/get_task", c.baseURL)
|
||||
|
||||
jsonData, err := json.Marshal(param)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to marshal get task parameter: %w", err)
|
||||
}
|
||||
|
||||
req, err := http.NewRequestWithContext(ctx, "POST", url, bytes.NewBuffer(jsonData))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create get task request: %w", err)
|
||||
}
|
||||
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
if c.loginToken != "" {
|
||||
req.Header.Set("Authorization", "Bearer "+c.loginToken)
|
||||
}
|
||||
|
||||
resp, err := c.httpClient.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return handleHttpResp(resp)
|
||||
}
|
||||
|
||||
// SubmitProof makes a POST request to /v1/submit_proof with SubmitProofParameter
|
||||
func (c *upClient) SubmitProof(ctx context.Context, param *types.SubmitProofParameter) (*ctypes.Response, error) {
|
||||
url := fmt.Sprintf("%s/coordinator/v1/submit_proof", c.baseURL)
|
||||
|
||||
jsonData, err := json.Marshal(param)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to marshal submit proof parameter: %w", err)
|
||||
}
|
||||
|
||||
req, err := http.NewRequestWithContext(ctx, "POST", url, bytes.NewBuffer(jsonData))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create submit proof request: %w", err)
|
||||
}
|
||||
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
if c.loginToken != "" {
|
||||
req.Header.Set("Authorization", "Bearer "+c.loginToken)
|
||||
}
|
||||
|
||||
resp, err := c.httpClient.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return handleHttpResp(resp)
|
||||
}
|
||||
220
coordinator/internal/controller/proxy/client_manager.go
Normal file
220
coordinator/internal/controller/proxy/client_manager.go
Normal file
@@ -0,0 +1,220 @@
|
||||
package proxy
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/ecdsa"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/crypto"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
|
||||
"scroll-tech/common/version"
|
||||
|
||||
"scroll-tech/coordinator/internal/config"
|
||||
"scroll-tech/coordinator/internal/types"
|
||||
)
|
||||
|
||||
type Client interface {
|
||||
// a client to access upstream coordinator with specified identity
|
||||
// so prover can contact with coordinator as itself
|
||||
Client(string) ProverCli
|
||||
// the client to access upstream as proxy itself
|
||||
ClientAsProxy(context.Context) ProxyCli
|
||||
Name() string
|
||||
}
|
||||
|
||||
type ClientManager struct {
|
||||
name string
|
||||
cliCfg *config.ProxyClient
|
||||
cfg *config.UpStream
|
||||
privKey *ecdsa.PrivateKey
|
||||
|
||||
cachedCli struct {
|
||||
sync.RWMutex
|
||||
cli *upClient
|
||||
completionCtx context.Context
|
||||
}
|
||||
}
|
||||
|
||||
// transformToValidPrivateKey safely transforms arbitrary bytes into valid private key bytes
|
||||
func buildPrivateKey(inputBytes []byte) (*ecdsa.PrivateKey, error) {
|
||||
// Try appending bytes from 0x0 to 0x20 until we get a valid private key
|
||||
for appendByte := byte(0x0); appendByte <= 0x20; appendByte++ {
|
||||
// Append the byte to input
|
||||
extendedBytes := append(inputBytes, appendByte)
|
||||
|
||||
// Calculate 256-bit hash
|
||||
hash := crypto.Keccak256(extendedBytes)
|
||||
|
||||
// Try to create private key from hash
|
||||
if k, err := crypto.ToECDSA(hash); err == nil {
|
||||
return k, nil
|
||||
}
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("failed to generate valid private key from input bytes")
|
||||
}
|
||||
|
||||
func NewClientManager(name string, cliCfg *config.ProxyClient, cfg *config.UpStream) (*ClientManager, error) {
|
||||
|
||||
log.Info("init client", "name", name, "upcfg", cfg.BaseUrl, "compatible mode", cfg.CompatibileMode)
|
||||
privKey, err := buildPrivateKey([]byte(cliCfg.Secret))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &ClientManager{
|
||||
name: name,
|
||||
privKey: privKey,
|
||||
cfg: cfg,
|
||||
cliCfg: cliCfg,
|
||||
}, nil
|
||||
}
|
||||
|
||||
type ctxKeyType string
|
||||
|
||||
const loginCliKey ctxKeyType = "cli"
|
||||
|
||||
func (cliMgr *ClientManager) doLogin(ctx context.Context, loginCli *upClient) {
|
||||
if cliMgr.cfg.CompatibileMode {
|
||||
loginCli.loginToken = "dummy"
|
||||
log.Info("Skip login process for compatible mode")
|
||||
return
|
||||
}
|
||||
|
||||
// Calculate wait time between 2 seconds and cfg.RetryWaitTime
|
||||
minWait := 2 * time.Second
|
||||
waitDuration := time.Duration(cliMgr.cfg.RetryWaitTime) * time.Second
|
||||
if waitDuration < minWait {
|
||||
waitDuration = minWait
|
||||
}
|
||||
|
||||
for {
|
||||
log.Info("proxy attempting login to upstream coordinator", "name", cliMgr.name)
|
||||
loginResp, err := loginCli.Login(ctx, cliMgr.genLoginParam)
|
||||
if err == nil && loginResp.ErrCode == 0 {
|
||||
var loginResult loginSchema
|
||||
err = loginResp.DecodeData(&loginResult)
|
||||
if err != nil {
|
||||
log.Error("login parsing data fail", "error", err)
|
||||
} else {
|
||||
loginCli.loginToken = loginResult.Token
|
||||
log.Info("login to upstream coordinator successful", "name", cliMgr.name, "time", loginResult.Time)
|
||||
// TODO: we need to parse time if we start making use of it
|
||||
return
|
||||
}
|
||||
} else if err != nil {
|
||||
log.Error("login process fail", "error", err)
|
||||
} else {
|
||||
log.Error("login get fail resp", "code", loginResp.ErrCode, "msg", loginResp.ErrMsg)
|
||||
}
|
||||
|
||||
log.Info("login to upstream coordinator failed, retrying", "name", cliMgr.name, "error", err, "waitDuration", waitDuration)
|
||||
|
||||
timer := time.NewTimer(waitDuration)
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
timer.Stop()
|
||||
return
|
||||
case <-timer.C:
|
||||
// Continue to next retry
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (cliMgr *ClientManager) Name() string {
|
||||
return cliMgr.name
|
||||
}
|
||||
|
||||
func (cliMgr *ClientManager) Client(token string) ProverCli {
|
||||
loginCli := newUpClient(cliMgr.cfg)
|
||||
loginCli.loginToken = token
|
||||
return loginCli
|
||||
}
|
||||
|
||||
func (cliMgr *ClientManager) ClientAsProxy(ctx context.Context) ProxyCli {
|
||||
cliMgr.cachedCli.RLock()
|
||||
if cliMgr.cachedCli.cli != nil {
|
||||
defer cliMgr.cachedCli.RUnlock()
|
||||
return cliMgr.cachedCli.cli
|
||||
}
|
||||
cliMgr.cachedCli.RUnlock()
|
||||
|
||||
cliMgr.cachedCli.Lock()
|
||||
if cliMgr.cachedCli.cli != nil {
|
||||
defer cliMgr.cachedCli.Unlock()
|
||||
return cliMgr.cachedCli.cli
|
||||
}
|
||||
|
||||
var completionCtx context.Context
|
||||
// Check if completion context is set
|
||||
if cliMgr.cachedCli.completionCtx != nil {
|
||||
completionCtx = cliMgr.cachedCli.completionCtx
|
||||
} else {
|
||||
// Set new completion context and launch login goroutine
|
||||
ctx, completionDone := context.WithCancel(context.TODO())
|
||||
loginCli := newUpClient(cliMgr.cfg)
|
||||
loginCli.resetFromMgr = func() {
|
||||
cliMgr.cachedCli.Lock()
|
||||
if cliMgr.cachedCli.cli == loginCli {
|
||||
log.Info("cached client cleared", "name", cliMgr.name)
|
||||
cliMgr.cachedCli.cli = nil
|
||||
}
|
||||
cliMgr.cachedCli.Unlock()
|
||||
}
|
||||
completionCtx = context.WithValue(ctx, loginCliKey, loginCli)
|
||||
cliMgr.cachedCli.completionCtx = completionCtx
|
||||
|
||||
// Launch keep-login goroutine
|
||||
go func() {
|
||||
defer completionDone()
|
||||
cliMgr.doLogin(context.Background(), loginCli)
|
||||
|
||||
cliMgr.cachedCli.Lock()
|
||||
cliMgr.cachedCli.cli = loginCli
|
||||
cliMgr.cachedCli.completionCtx = nil
|
||||
|
||||
cliMgr.cachedCli.Unlock()
|
||||
|
||||
}()
|
||||
}
|
||||
cliMgr.cachedCli.Unlock()
|
||||
|
||||
// Wait for completion or request cancellation
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return nil
|
||||
case <-completionCtx.Done():
|
||||
cli := completionCtx.Value(loginCliKey).(*upClient)
|
||||
return cli
|
||||
}
|
||||
}
|
||||
|
||||
func (cliMgr *ClientManager) genLoginParam(challenge string) (*types.LoginParameter, error) {
|
||||
|
||||
// Generate public key string
|
||||
publicKeyHex := common.Bytes2Hex(crypto.CompressPubkey(&cliMgr.privKey.PublicKey))
|
||||
|
||||
// Create login parameter with proxy settings
|
||||
loginParam := &types.LoginParameter{
|
||||
Message: types.Message{
|
||||
Challenge: challenge,
|
||||
ProverName: cliMgr.cliCfg.ProxyName,
|
||||
ProverVersion: version.Version,
|
||||
ProverProviderType: types.ProverProviderTypeProxy,
|
||||
ProverTypes: []types.ProverType{}, // Default empty
|
||||
VKs: []string{}, // Default empty
|
||||
},
|
||||
PublicKey: publicKeyHex,
|
||||
}
|
||||
|
||||
// Sign the message with the private key
|
||||
if err := loginParam.SignWithKey(cliMgr.privKey); err != nil {
|
||||
return nil, fmt.Errorf("failed to sign login parameter: %w", err)
|
||||
}
|
||||
|
||||
return loginParam, nil
|
||||
}
|
||||
44
coordinator/internal/controller/proxy/controller.go
Normal file
44
coordinator/internal/controller/proxy/controller.go
Normal file
@@ -0,0 +1,44 @@
|
||||
package proxy
|
||||
|
||||
import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/coordinator/internal/config"
|
||||
)
|
||||
|
||||
var (
|
||||
// GetTask the prover task controller
|
||||
GetTask *GetTaskController
|
||||
// SubmitProof the submit proof controller
|
||||
SubmitProof *SubmitProofController
|
||||
// Auth the auth controller
|
||||
Auth *AuthController
|
||||
)
|
||||
|
||||
// Clients manager a series of thread-safe clients for requesting upstream
|
||||
// coordinators
|
||||
type Clients map[string]Client
|
||||
|
||||
// InitController inits Controller with database
|
||||
func InitController(cfg *config.ProxyConfig, db *gorm.DB, reg prometheus.Registerer) {
|
||||
// normalize cfg
|
||||
cfg.ProxyManager.Normalize()
|
||||
|
||||
clients := make(map[string]Client)
|
||||
|
||||
for nm, upCfg := range cfg.Coordinators {
|
||||
cli, err := NewClientManager(nm, cfg.ProxyManager.Client, upCfg)
|
||||
if err != nil {
|
||||
panic("create new client fail")
|
||||
}
|
||||
clients[cli.Name()] = cli
|
||||
}
|
||||
|
||||
proverManager := NewProverManagerWithPersistent(100, db)
|
||||
priorityManager := NewPriorityUpstreamManagerPersistent(db)
|
||||
|
||||
Auth = NewAuthController(cfg, clients, proverManager)
|
||||
GetTask = NewGetTaskController(cfg, clients, proverManager, priorityManager, reg)
|
||||
SubmitProof = NewSubmitProofController(cfg, clients, proverManager, priorityManager, reg)
|
||||
}
|
||||
229
coordinator/internal/controller/proxy/get_task.go
Normal file
229
coordinator/internal/controller/proxy/get_task.go
Normal file
@@ -0,0 +1,229 @@
|
||||
package proxy
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"sync"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/common/types"
|
||||
|
||||
"scroll-tech/coordinator/internal/config"
|
||||
coordinatorType "scroll-tech/coordinator/internal/types"
|
||||
)
|
||||
|
||||
func getSessionData(ctx *gin.Context) (string, string) {
|
||||
|
||||
publicKeyData, publicKeyExist := ctx.Get(coordinatorType.PublicKey)
|
||||
publicKey, castOk := publicKeyData.(string)
|
||||
if !publicKeyExist || !castOk {
|
||||
nerr := fmt.Errorf("no public key binding: %v", publicKeyData)
|
||||
log.Warn("get_task parameter fail", "error", nerr)
|
||||
|
||||
types.RenderFailure(ctx, types.ErrCoordinatorParameterInvalidNo, nerr)
|
||||
return "", ""
|
||||
}
|
||||
|
||||
publicNameData, publicNameExist := ctx.Get(coordinatorType.ProverName)
|
||||
publicName, castOk := publicNameData.(string)
|
||||
if !publicNameExist || !castOk {
|
||||
log.Error("no public name binding for unknown reason, but we still forward with name = 'unknown'", "data", publicNameData)
|
||||
publicName = "unknown"
|
||||
}
|
||||
|
||||
return publicKey, publicName
|
||||
}
|
||||
|
||||
// PriorityUpstreamManager manages priority upstream mappings with thread safety
|
||||
type PriorityUpstreamManager struct {
|
||||
sync.RWMutex
|
||||
*proverPriorityPersist
|
||||
data map[string]string
|
||||
}
|
||||
|
||||
// NewPriorityUpstreamManager creates a new PriorityUpstreamManager
|
||||
func NewPriorityUpstreamManager() *PriorityUpstreamManager {
|
||||
return &PriorityUpstreamManager{
|
||||
data: make(map[string]string),
|
||||
}
|
||||
}
|
||||
|
||||
// NewPriorityUpstreamManager creates a new PriorityUpstreamManager
|
||||
func NewPriorityUpstreamManagerPersistent(db *gorm.DB) *PriorityUpstreamManager {
|
||||
return &PriorityUpstreamManager{
|
||||
data: make(map[string]string),
|
||||
proverPriorityPersist: NewProverPriorityPersist(db),
|
||||
}
|
||||
}
|
||||
|
||||
// Get retrieves the priority upstream for a given key
|
||||
func (p *PriorityUpstreamManager) Get(key string) (string, bool) {
|
||||
|
||||
p.RLock()
|
||||
value, exists := p.data[key]
|
||||
p.RUnlock()
|
||||
|
||||
if !exists {
|
||||
if v, err := p.proverPriorityPersist.Get(key); err != nil {
|
||||
log.Error("persistent priority record read failure", "error", err, "key", key)
|
||||
} else if v != "" {
|
||||
log.Debug("restore record from persistent layer", "key", key, "value", v)
|
||||
return v, true
|
||||
}
|
||||
}
|
||||
|
||||
return value, exists
|
||||
}
|
||||
|
||||
// Set sets the priority upstream for a given key
|
||||
func (p *PriorityUpstreamManager) Set(key, value string) {
|
||||
defer func() {
|
||||
if err := p.proverPriorityPersist.Update(key, value); err != nil {
|
||||
log.Error("update priority record failure", "error", err, "key", key, "value", value)
|
||||
}
|
||||
}()
|
||||
p.Lock()
|
||||
defer p.Unlock()
|
||||
p.data[key] = value
|
||||
}
|
||||
|
||||
// Delete removes the priority upstream for a given key
|
||||
func (p *PriorityUpstreamManager) Delete(key string) {
|
||||
defer func() {
|
||||
if err := p.proverPriorityPersist.Del(key); err != nil {
|
||||
log.Error("delete priority record failure", "error", err, "key", key)
|
||||
}
|
||||
}()
|
||||
p.Lock()
|
||||
defer p.Unlock()
|
||||
delete(p.data, key)
|
||||
}
|
||||
|
||||
// GetTaskController the get prover task api controller
|
||||
type GetTaskController struct {
|
||||
proverMgr *ProverManager
|
||||
clients Clients
|
||||
priorityUpstream *PriorityUpstreamManager
|
||||
|
||||
//workingRnd *rand.Rand
|
||||
//getTaskAccessCounter *prometheus.CounterVec
|
||||
}
|
||||
|
||||
// NewGetTaskController create a get prover task controller
|
||||
func NewGetTaskController(cfg *config.ProxyConfig, clients Clients, proverMgr *ProverManager, priorityMgr *PriorityUpstreamManager, reg prometheus.Registerer) *GetTaskController {
|
||||
// TODO: implement proxy get task controller initialization
|
||||
return &GetTaskController{
|
||||
priorityUpstream: priorityMgr,
|
||||
proverMgr: proverMgr,
|
||||
clients: clients,
|
||||
}
|
||||
}
|
||||
|
||||
// func (ptc *GetTaskController) incGetTaskAccessCounter(ctx *gin.Context) error {
|
||||
// // TODO: implement proxy get task access counter
|
||||
// return nil
|
||||
// }
|
||||
|
||||
// GetTasks get assigned chunk/batch task
|
||||
func (ptc *GetTaskController) GetTasks(ctx *gin.Context) {
|
||||
|
||||
var getTaskParameter coordinatorType.GetTaskParameter
|
||||
if err := ctx.ShouldBind(&getTaskParameter); err != nil {
|
||||
nerr := fmt.Errorf("prover task parameter invalid, err:%w", err)
|
||||
types.RenderFailure(ctx, types.ErrCoordinatorParameterInvalidNo, nerr)
|
||||
return
|
||||
}
|
||||
|
||||
publicKey, proverName := getSessionData(ctx)
|
||||
if publicKey == "" {
|
||||
return
|
||||
}
|
||||
|
||||
session := ptc.proverMgr.Get(publicKey)
|
||||
if session == nil {
|
||||
nerr := fmt.Errorf("can not get session for prover %s", proverName)
|
||||
types.RenderFailure(ctx, types.InternalServerError, nerr)
|
||||
return
|
||||
}
|
||||
|
||||
getTask := func(cli Client) (error, int) {
|
||||
log.Debug("Start get task", "up", cli.Name(), "cli", proverName)
|
||||
upStream := cli.Name()
|
||||
resp, err := session.GetTask(ctx, &getTaskParameter, cli)
|
||||
if err != nil {
|
||||
log.Error("Upstream error for get task", "error", err, "up", upStream, "cli", proverName)
|
||||
return err, types.ErrCoordinatorGetTaskFailure
|
||||
} else if resp.ErrCode != types.ErrCoordinatorEmptyProofData {
|
||||
|
||||
if resp.ErrCode != 0 {
|
||||
// simply dispatch the error from upstream to prover
|
||||
log.Error("Upstream has error resp for get task", "code", resp.ErrCode, "msg", resp.ErrMsg, "up", upStream, "cli", proverName)
|
||||
return fmt.Errorf("upstream failure %s:", resp.ErrMsg), resp.ErrCode
|
||||
}
|
||||
|
||||
var task coordinatorType.GetTaskSchema
|
||||
if err = resp.DecodeData(&task); err == nil {
|
||||
task.TaskID = formUpstreamWithTaskName(upStream, task.TaskID)
|
||||
ptc.priorityUpstream.Set(publicKey, upStream)
|
||||
log.Debug("Upstream get task", "up", upStream, "cli", proverName, "taskID", task.TaskID, "taskType", task.TaskType)
|
||||
types.RenderSuccess(ctx, &task)
|
||||
return nil, 0
|
||||
} else {
|
||||
log.Error("Upstream has wrong data for get task", "error", err, "up", upStream, "cli", proverName)
|
||||
return fmt.Errorf("decode task fail: %v", err), types.InternalServerError
|
||||
}
|
||||
}
|
||||
|
||||
return nil, resp.ErrCode
|
||||
}
|
||||
|
||||
// if the priority upstream is set, we try this upstream first until get the task resp or no task resp
|
||||
priorityUpstream, exist := ptc.priorityUpstream.Get(publicKey)
|
||||
if exist {
|
||||
cli := ptc.clients[priorityUpstream]
|
||||
log.Debug("Try get task from priority stream", "up", priorityUpstream, "cli", proverName)
|
||||
if cli != nil {
|
||||
err, code := getTask(cli)
|
||||
if err != nil {
|
||||
types.RenderFailure(ctx, code, err)
|
||||
return
|
||||
} else if code == 0 {
|
||||
// get task done and rendered, return
|
||||
return
|
||||
}
|
||||
// only continue if get empty task (the task has been removed in upstream)
|
||||
log.Debug("can not get priority task from upstream", "up", priorityUpstream, "cli", proverName)
|
||||
|
||||
} else {
|
||||
log.Warn("A upstream is removed or lost for some reason while running", "up", priorityUpstream, "cli", proverName)
|
||||
}
|
||||
}
|
||||
ptc.priorityUpstream.Delete(publicKey)
|
||||
|
||||
// Create a slice to hold the keys
|
||||
keys := make([]string, 0, len(ptc.clients))
|
||||
for k := range ptc.clients {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
|
||||
// Shuffle the keys using a local RNG (avoid deprecated rand.Seed)
|
||||
rand.Shuffle(len(keys), func(i, j int) {
|
||||
keys[i], keys[j] = keys[j], keys[i]
|
||||
})
|
||||
|
||||
// Iterate over the shuffled keys
|
||||
for _, n := range keys {
|
||||
if err, code := getTask(ptc.clients[n]); err == nil && code == 0 {
|
||||
// get task done
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
log.Debug("get no task from upstream", "cli", proverName)
|
||||
// if all get task failed, throw empty proof resp
|
||||
types.RenderFailure(ctx, types.ErrCoordinatorEmptyProofData, fmt.Errorf("get empty prover task"))
|
||||
}
|
||||
125
coordinator/internal/controller/proxy/persistent.go
Normal file
125
coordinator/internal/controller/proxy/persistent.go
Normal file
@@ -0,0 +1,125 @@
|
||||
package proxy
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"gorm.io/gorm"
|
||||
"gorm.io/gorm/clause"
|
||||
|
||||
"scroll-tech/coordinator/internal/types"
|
||||
)
|
||||
|
||||
type proverDataPersist struct {
|
||||
db *gorm.DB
|
||||
}
|
||||
|
||||
// NewProverDataPersist creates a persistence instance backed by a gorm DB.
|
||||
func NewProverDataPersist(db *gorm.DB) *proverDataPersist {
|
||||
return &proverDataPersist{db: db}
|
||||
}
|
||||
|
||||
// gorm model mapping to table `prover_sessions`
|
||||
type proverSessionRecord struct {
|
||||
PublicKey string `gorm:"column:public_key;not null"`
|
||||
Upstream string `gorm:"column:upstream;not null"`
|
||||
UpToken string `gorm:"column:up_token;not null"`
|
||||
Expired time.Time `gorm:"column:expired;not null"`
|
||||
}
|
||||
|
||||
func (proverSessionRecord) TableName() string { return "prover_sessions" }
|
||||
|
||||
// priority_upstream model
|
||||
type priorityUpstreamRecord struct {
|
||||
PublicKey string `gorm:"column:public_key;not null"`
|
||||
Upstream string `gorm:"column:upstream;not null"`
|
||||
}
|
||||
|
||||
func (priorityUpstreamRecord) TableName() string { return "priority_upstream" }
|
||||
|
||||
// get retrieves ProverSession for a given user key, returns empty if still not exists
|
||||
func (p *proverDataPersist) Get(userKey string) (*proverSession, error) {
|
||||
if p == nil || p.db == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
var rows []proverSessionRecord
|
||||
if err := p.db.Where("public_key = ?", userKey).Find(&rows).Error; err != nil || len(rows) == 0 {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ret := &proverSession{
|
||||
proverToken: make(map[string]loginToken),
|
||||
}
|
||||
for _, r := range rows {
|
||||
ls := &types.LoginSchema{
|
||||
Token: r.UpToken,
|
||||
Time: r.Expired,
|
||||
}
|
||||
ret.proverToken[r.Upstream] = loginToken{LoginSchema: ls}
|
||||
}
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
func (p *proverDataPersist) Update(userKey, up string, login *types.LoginSchema) error {
|
||||
if p == nil || p.db == nil || login == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
rec := proverSessionRecord{
|
||||
PublicKey: userKey,
|
||||
Upstream: up,
|
||||
UpToken: login.Token,
|
||||
Expired: login.Time,
|
||||
}
|
||||
|
||||
return p.db.Clauses(
|
||||
clause.OnConflict{
|
||||
Columns: []clause.Column{{Name: "public_key"}, {Name: "upstream"}},
|
||||
DoUpdates: clause.AssignmentColumns([]string{"up_token", "expired"}),
|
||||
},
|
||||
).Create(&rec).Error
|
||||
}
|
||||
|
||||
type proverPriorityPersist struct {
|
||||
db *gorm.DB
|
||||
}
|
||||
|
||||
func NewProverPriorityPersist(db *gorm.DB) *proverPriorityPersist {
|
||||
return &proverPriorityPersist{db: db}
|
||||
}
|
||||
|
||||
func (p *proverPriorityPersist) Get(userKey string) (string, error) {
|
||||
if p == nil || p.db == nil {
|
||||
return "", nil
|
||||
}
|
||||
var rec priorityUpstreamRecord
|
||||
if err := p.db.Where("public_key = ?", userKey).First(&rec).Error; err != nil {
|
||||
if err != gorm.ErrRecordNotFound {
|
||||
return "", err
|
||||
} else {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
}
|
||||
return rec.Upstream, nil
|
||||
}
|
||||
|
||||
func (p *proverPriorityPersist) Update(userKey, up string) error {
|
||||
if p == nil || p.db == nil {
|
||||
return nil
|
||||
}
|
||||
rec := priorityUpstreamRecord{PublicKey: userKey, Upstream: up}
|
||||
return p.db.Clauses(
|
||||
clause.OnConflict{
|
||||
Columns: []clause.Column{{Name: "public_key"}},
|
||||
DoUpdates: clause.Assignments(map[string]interface{}{"upstream": up}),
|
||||
},
|
||||
).Create(&rec).Error
|
||||
}
|
||||
|
||||
func (p *proverPriorityPersist) Del(userKey string) error {
|
||||
if p == nil || p.db == nil {
|
||||
return nil
|
||||
}
|
||||
return p.db.Where("public_key = ?", userKey).Delete(&priorityUpstreamRecord{}).Error
|
||||
}
|
||||
285
coordinator/internal/controller/proxy/prover_session.go
Normal file
285
coordinator/internal/controller/proxy/prover_session.go
Normal file
@@ -0,0 +1,285 @@
|
||||
package proxy
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math"
|
||||
"sync"
|
||||
|
||||
"gorm.io/gorm"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
|
||||
ctypes "scroll-tech/common/types"
|
||||
|
||||
"scroll-tech/coordinator/internal/types"
|
||||
)
|
||||
|
||||
type ProverManager struct {
|
||||
sync.RWMutex
|
||||
data map[string]*proverSession
|
||||
willDeprecatedData map[string]*proverSession
|
||||
sizeLimit int
|
||||
persistent *proverDataPersist
|
||||
}
|
||||
|
||||
func NewProverManager(size int) *ProverManager {
|
||||
return &ProverManager{
|
||||
data: make(map[string]*proverSession),
|
||||
willDeprecatedData: make(map[string]*proverSession),
|
||||
sizeLimit: size,
|
||||
}
|
||||
}
|
||||
|
||||
func NewProverManagerWithPersistent(size int, db *gorm.DB) *ProverManager {
|
||||
return &ProverManager{
|
||||
data: make(map[string]*proverSession),
|
||||
willDeprecatedData: make(map[string]*proverSession),
|
||||
sizeLimit: size,
|
||||
persistent: NewProverDataPersist(db),
|
||||
}
|
||||
}
|
||||
|
||||
// get retrieves ProverSession for a given user key, returns empty if still not exists
|
||||
func (m *ProverManager) Get(userKey string) (ret *proverSession) {
|
||||
defer func() {
|
||||
if ret == nil {
|
||||
var err error
|
||||
ret, err = m.persistent.Get(userKey)
|
||||
if err != nil {
|
||||
log.Error("Get persistent layer for prover tokens fail", "error", err)
|
||||
} else if ret != nil {
|
||||
log.Debug("restore record from persistent", "key", userKey, "token", ret.proverToken)
|
||||
ret.persistent = m.persistent
|
||||
}
|
||||
}
|
||||
|
||||
if ret != nil {
|
||||
m.Lock()
|
||||
m.data[userKey] = ret
|
||||
m.Unlock()
|
||||
}
|
||||
}()
|
||||
|
||||
m.RLock()
|
||||
defer m.RUnlock()
|
||||
if r, existed := m.data[userKey]; existed {
|
||||
return r
|
||||
} else {
|
||||
return m.willDeprecatedData[userKey]
|
||||
}
|
||||
}
|
||||
|
||||
func (m *ProverManager) GetOrCreate(userKey string) *proverSession {
|
||||
|
||||
if ret := m.Get(userKey); ret != nil {
|
||||
return ret
|
||||
}
|
||||
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
|
||||
ret := &proverSession{
|
||||
proverToken: make(map[string]loginToken),
|
||||
persistent: m.persistent,
|
||||
}
|
||||
|
||||
if len(m.data) >= m.sizeLimit {
|
||||
m.willDeprecatedData = m.data
|
||||
m.data = make(map[string]*proverSession)
|
||||
}
|
||||
|
||||
m.data[userKey] = ret
|
||||
return ret
|
||||
}
|
||||
|
||||
type loginToken struct {
|
||||
*types.LoginSchema
|
||||
phase uint
|
||||
}
|
||||
|
||||
// Client wraps an http client with a preset host for coordinator API calls
|
||||
type proverSession struct {
|
||||
persistent *proverDataPersist
|
||||
|
||||
sync.RWMutex
|
||||
proverToken map[string]loginToken
|
||||
completionCtx context.Context
|
||||
}
|
||||
|
||||
func (c *proverSession) maintainLogin(ctx context.Context, cliMgr Client, up string, param *types.LoginParameter, phase uint) (result loginToken, nerr error) {
|
||||
c.Lock()
|
||||
curPhase := c.proverToken[up].phase
|
||||
if c.completionCtx != nil {
|
||||
waitctx := c.completionCtx
|
||||
c.Unlock()
|
||||
select {
|
||||
case <-waitctx.Done():
|
||||
return c.maintainLogin(ctx, cliMgr, up, param, phase)
|
||||
case <-ctx.Done():
|
||||
nerr = fmt.Errorf("ctx fail")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if phase < curPhase {
|
||||
// outdate login phase, give up
|
||||
log.Debug("drop outdated proxy login attempt", "upstream", up, "cli", param.Message.ProverName, "phase", phase, "now", curPhase)
|
||||
defer c.Unlock()
|
||||
return c.proverToken[up], nil
|
||||
}
|
||||
|
||||
// occupy the update slot
|
||||
completeCtx, cf := context.WithCancel(ctx)
|
||||
defer cf()
|
||||
c.completionCtx = completeCtx
|
||||
defer func() {
|
||||
c.Lock()
|
||||
c.completionCtx = nil
|
||||
if result.LoginSchema != nil {
|
||||
c.proverToken[up] = result
|
||||
log.Info("maintain login status", "upstream", up, "cli", param.Message.ProverName, "phase", curPhase+1)
|
||||
}
|
||||
c.Unlock()
|
||||
if nerr != nil {
|
||||
log.Error("maintain login fail", "error", nerr, "upstream", up, "cli", param.Message.ProverName, "phase", curPhase)
|
||||
}
|
||||
}()
|
||||
c.Unlock()
|
||||
|
||||
log.Debug("start proxy login process", "upstream", up, "cli", param.Message.ProverName)
|
||||
|
||||
cli := cliMgr.ClientAsProxy(ctx)
|
||||
if cli == nil {
|
||||
nerr = fmt.Errorf("get upstream cli fail")
|
||||
return
|
||||
}
|
||||
|
||||
resp, err := cli.ProxyLogin(ctx, param)
|
||||
if err != nil {
|
||||
nerr = fmt.Errorf("proxylogin fail: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
if resp.ErrCode == ctypes.ErrJWTTokenExpired {
|
||||
log.Info("up stream has expired, renew upstream connection", "up", up)
|
||||
cli.Reset()
|
||||
cli = cliMgr.ClientAsProxy(ctx)
|
||||
if cli == nil {
|
||||
nerr = fmt.Errorf("get upstream cli fail (secondary try)")
|
||||
return
|
||||
}
|
||||
|
||||
// like SDK, we would try one more time if the upstream token is expired
|
||||
resp, err = cli.ProxyLogin(ctx, param)
|
||||
if err != nil {
|
||||
nerr = fmt.Errorf("proxylogin fail: %v", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if resp.ErrCode != 0 {
|
||||
nerr = fmt.Errorf("upstream fail: %d (%s)", resp.ErrCode, resp.ErrMsg)
|
||||
return
|
||||
}
|
||||
|
||||
var loginResult loginSchema
|
||||
if err := resp.DecodeData(&loginResult); err != nil {
|
||||
nerr = err
|
||||
return
|
||||
}
|
||||
|
||||
log.Debug("Proxy login done", "upstream", up, "cli", param.Message.ProverName)
|
||||
result = loginToken{
|
||||
LoginSchema: &types.LoginSchema{
|
||||
Token: loginResult.Token,
|
||||
},
|
||||
phase: curPhase + 1,
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// const expireTolerant = 10 * time.Minute
|
||||
|
||||
// ProxyLogin makes a POST request to /v1/proxy_login with LoginParameter
|
||||
func (c *proverSession) ProxyLogin(ctx context.Context, cli Client, param *types.LoginParameter) error {
|
||||
up := cli.Name()
|
||||
c.RLock()
|
||||
existedToken := c.proverToken[up]
|
||||
c.RUnlock()
|
||||
|
||||
newtoken, err := c.maintainLogin(ctx, cli, up, param, math.MaxUint)
|
||||
if newtoken.phase > existedToken.phase {
|
||||
if err := c.persistent.Update(param.PublicKey, up, newtoken.LoginSchema); err != nil {
|
||||
log.Error("Update persistent layer for prover tokens fail", "error", err)
|
||||
}
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// GetTask makes a POST request to /v1/get_task with GetTaskParameter
|
||||
func (c *proverSession) GetTask(ctx context.Context, param *types.GetTaskParameter, cliMgr Client) (*ctypes.Response, error) {
|
||||
up := cliMgr.Name()
|
||||
c.RLock()
|
||||
log.Debug("call get task", "up", up, "tokens", c.proverToken)
|
||||
token := c.proverToken[up]
|
||||
c.RUnlock()
|
||||
|
||||
if token.LoginSchema != nil {
|
||||
resp, err := cliMgr.Client(token.Token).GetTask(ctx, param)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if resp.ErrCode != ctypes.ErrJWTTokenExpired {
|
||||
return resp, nil
|
||||
}
|
||||
}
|
||||
|
||||
// like SDK, we would try one more time if the upstream token is expired
|
||||
// get param from ctx
|
||||
loginParam, ok := ctx.Value(LoginParamCache).(*types.LoginParameter)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("Unexpected error, no loginparam ctx value")
|
||||
}
|
||||
|
||||
newToken, err := c.maintainLogin(ctx, cliMgr, up, loginParam, token.phase)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("update prover token fail: %v", err)
|
||||
}
|
||||
|
||||
return cliMgr.Client(newToken.Token).GetTask(ctx, param)
|
||||
|
||||
}
|
||||
|
||||
// SubmitProof makes a POST request to /v1/submit_proof with SubmitProofParameter
|
||||
func (c *proverSession) SubmitProof(ctx context.Context, param *types.SubmitProofParameter, cliMgr Client) (*ctypes.Response, error) {
|
||||
up := cliMgr.Name()
|
||||
c.RLock()
|
||||
token := c.proverToken[up]
|
||||
c.RUnlock()
|
||||
|
||||
if token.LoginSchema != nil {
|
||||
resp, err := cliMgr.Client(token.Token).SubmitProof(ctx, param)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if resp.ErrCode != ctypes.ErrJWTTokenExpired {
|
||||
return resp, nil
|
||||
}
|
||||
}
|
||||
|
||||
// like SDK, we would try one more time if the upstream token is expired
|
||||
// get param from ctx
|
||||
loginParam, ok := ctx.Value(LoginParamCache).(*types.LoginParameter)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("Unexpected error, no loginparam ctx value")
|
||||
}
|
||||
|
||||
newToken, err := c.maintainLogin(ctx, cliMgr, up, loginParam, token.phase)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("update prover token fail: %v", err)
|
||||
}
|
||||
|
||||
return cliMgr.Client(newToken.Token).SubmitProof(ctx, param)
|
||||
}
|
||||
107
coordinator/internal/controller/proxy/prover_session_test.go
Normal file
107
coordinator/internal/controller/proxy/prover_session_test.go
Normal file
@@ -0,0 +1,107 @@
|
||||
package proxy
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
// TestProverManagerGetAndCreate validates basic creation and retrieval semantics.
|
||||
func TestProverManagerGetAndCreate(t *testing.T) {
|
||||
pm := NewProverManager(2)
|
||||
|
||||
if got := pm.Get("user1"); got != nil {
|
||||
t.Fatalf("expected nil for non-existent key, got: %+v", got)
|
||||
}
|
||||
|
||||
sess1 := pm.GetOrCreate("user1")
|
||||
if sess1 == nil {
|
||||
t.Fatalf("expected non-nil session from GetOrCreate")
|
||||
}
|
||||
|
||||
// Should be stable on subsequent Get
|
||||
if got := pm.Get("user1"); got != sess1 {
|
||||
t.Fatalf("expected same session pointer on Get, got different instance: %p vs %p", got, sess1)
|
||||
}
|
||||
}
|
||||
|
||||
// TestProverManagerRolloverAndPromotion verifies rollover when sizeLimit is reached
|
||||
// and that old entries are accessible and promoted back to active data map.
|
||||
func TestProverManagerRolloverAndPromotion(t *testing.T) {
|
||||
pm := NewProverManager(2)
|
||||
|
||||
s1 := pm.GetOrCreate("u1")
|
||||
s2 := pm.GetOrCreate("u2")
|
||||
if s1 == nil || s2 == nil {
|
||||
t.Fatalf("expected sessions to be created for u1/u2")
|
||||
}
|
||||
|
||||
// Precondition: data should contain 2 entries, no deprecated yet.
|
||||
pm.RLock()
|
||||
if len(pm.data) != 2 {
|
||||
pm.RUnlock()
|
||||
t.Fatalf("expected data len=2 before rollover, got %d", len(pm.data))
|
||||
}
|
||||
if len(pm.willDeprecatedData) != 0 {
|
||||
pm.RUnlock()
|
||||
t.Fatalf("expected willDeprecatedData len=0 before rollover, got %d", len(pm.willDeprecatedData))
|
||||
}
|
||||
pm.RUnlock()
|
||||
|
||||
// Trigger rollover by creating a third key.
|
||||
s3 := pm.GetOrCreate("u3")
|
||||
if s3 == nil {
|
||||
t.Fatalf("expected session for u3 after rollover")
|
||||
}
|
||||
|
||||
// After rollover: current data should only have u3, deprecated should hold u1 and u2.
|
||||
pm.RLock()
|
||||
if len(pm.data) != 1 {
|
||||
pm.RUnlock()
|
||||
t.Fatalf("expected data len=1 after rollover (only u3), got %d", len(pm.data))
|
||||
}
|
||||
if _, ok := pm.data["u3"]; !ok {
|
||||
pm.RUnlock()
|
||||
t.Fatalf("expected 'u3' to be in active data after rollover")
|
||||
}
|
||||
if len(pm.willDeprecatedData) != 2 {
|
||||
pm.RUnlock()
|
||||
t.Fatalf("expected willDeprecatedData len=2 after rollover, got %d", len(pm.willDeprecatedData))
|
||||
}
|
||||
pm.RUnlock()
|
||||
|
||||
// Accessing an old key should return the same pointer and promote it to active data map.
|
||||
got1 := pm.Get("u1")
|
||||
if got1 != s1 {
|
||||
t.Fatalf("expected same pointer for u1 after promotion, got %p want %p", got1, s1)
|
||||
}
|
||||
|
||||
// The promotion should add it to active data (without enforcing size limit on promotion).
|
||||
pm.RLock()
|
||||
if _, ok := pm.data["u1"]; !ok {
|
||||
pm.RUnlock()
|
||||
t.Fatalf("expected 'u1' to be present in active data after promotion")
|
||||
}
|
||||
if len(pm.data) != 2 {
|
||||
// Now should contain u3 and u1
|
||||
pm.RUnlock()
|
||||
t.Fatalf("expected data len=2 after promotion of u1, got %d", len(pm.data))
|
||||
}
|
||||
pm.RUnlock()
|
||||
|
||||
// Access the other deprecated key and ensure behavior is consistent.
|
||||
got2 := pm.Get("u2")
|
||||
if got2 != s2 {
|
||||
t.Fatalf("expected same pointer for u2 after promotion, got %p want %p", got2, s2)
|
||||
}
|
||||
|
||||
pm.RLock()
|
||||
if _, ok := pm.data["u2"]; !ok {
|
||||
pm.RUnlock()
|
||||
t.Fatalf("expected 'u2' to be present in active data after promotion")
|
||||
}
|
||||
// Note: promotion does not enforce sizeLimit, so data can grow beyond sizeLimit after promotions.
|
||||
if len(pm.data) != 3 {
|
||||
pm.RUnlock()
|
||||
t.Fatalf("expected data len=3 after promoting both u1 and u2, got %d", len(pm.data))
|
||||
}
|
||||
pm.RUnlock()
|
||||
}
|
||||
94
coordinator/internal/controller/proxy/submit_proof.go
Normal file
94
coordinator/internal/controller/proxy/submit_proof.go
Normal file
@@ -0,0 +1,94 @@
|
||||
package proxy
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
|
||||
"scroll-tech/common/types"
|
||||
|
||||
"scroll-tech/coordinator/internal/config"
|
||||
coordinatorType "scroll-tech/coordinator/internal/types"
|
||||
)
|
||||
|
||||
// SubmitProofController the submit proof api controller
|
||||
type SubmitProofController struct {
|
||||
proverMgr *ProverManager
|
||||
clients Clients
|
||||
priorityUpstream *PriorityUpstreamManager
|
||||
}
|
||||
|
||||
// NewSubmitProofController create the submit proof api controller instance
|
||||
func NewSubmitProofController(cfg *config.ProxyConfig, clients Clients, proverMgr *ProverManager, priorityMgr *PriorityUpstreamManager, reg prometheus.Registerer) *SubmitProofController {
|
||||
return &SubmitProofController{
|
||||
proverMgr: proverMgr,
|
||||
clients: clients,
|
||||
priorityUpstream: priorityMgr,
|
||||
}
|
||||
}
|
||||
|
||||
func upstreamFromTaskName(taskID string) (string, string) {
|
||||
parts, rest, found := strings.Cut(taskID, ":")
|
||||
if found {
|
||||
return parts, rest
|
||||
}
|
||||
return "", parts
|
||||
}
|
||||
|
||||
func formUpstreamWithTaskName(upstream string, taskID string) string {
|
||||
return fmt.Sprintf("%s:%s", upstream, taskID)
|
||||
}
|
||||
|
||||
// SubmitProof prover submit the proof to coordinator
|
||||
func (spc *SubmitProofController) SubmitProof(ctx *gin.Context) {
|
||||
|
||||
var submitParameter coordinatorType.SubmitProofParameter
|
||||
if err := ctx.ShouldBind(&submitParameter); err != nil {
|
||||
nerr := fmt.Errorf("prover submitProof parameter invalid, err:%w", err)
|
||||
types.RenderFailure(ctx, types.ErrCoordinatorParameterInvalidNo, nerr)
|
||||
return
|
||||
}
|
||||
|
||||
publicKey, proverName := getSessionData(ctx)
|
||||
if publicKey == "" {
|
||||
return
|
||||
}
|
||||
|
||||
session := spc.proverMgr.Get(publicKey)
|
||||
if session == nil {
|
||||
nerr := fmt.Errorf("can not get session for prover %s", proverName)
|
||||
types.RenderFailure(ctx, types.InternalServerError, nerr)
|
||||
return
|
||||
}
|
||||
|
||||
upstream, realTaskID := upstreamFromTaskName(submitParameter.TaskID)
|
||||
cli, existed := spc.clients[upstream]
|
||||
if !existed {
|
||||
log.Warn("A upstream for submitting is removed or lost for some reason while running", "up", upstream)
|
||||
nerr := fmt.Errorf("Invalid upstream name (%s) from taskID %s", upstream, submitParameter.TaskID)
|
||||
types.RenderFailure(ctx, types.ErrCoordinatorParameterInvalidNo, nerr)
|
||||
return
|
||||
}
|
||||
log.Debug("Start submitting", "up", upstream, "cli", proverName, "id", realTaskID, "status", submitParameter.Status)
|
||||
submitParameter.TaskID = realTaskID
|
||||
|
||||
resp, err := session.SubmitProof(ctx, &submitParameter, cli)
|
||||
if err != nil {
|
||||
log.Error("Upstream has error resp for submit", "error", err, "up", upstream, "cli", proverName, "taskID", realTaskID)
|
||||
types.RenderFailure(ctx, types.ErrCoordinatorGetTaskFailure, err)
|
||||
return
|
||||
} else if resp.ErrCode != 0 {
|
||||
log.Error("Upstream has error resp for get task", "code", resp.ErrCode, "msg", resp.ErrMsg, "up", upstream, "cli", proverName, "taskID", realTaskID)
|
||||
// simply dispatch the error from upstream to prover
|
||||
types.RenderFailure(ctx, resp.ErrCode, fmt.Errorf("%s", resp.ErrMsg))
|
||||
return
|
||||
} else {
|
||||
log.Debug("Submit proof to upstream", "up", upstream, "cli", proverName, "taskID", realTaskID)
|
||||
spc.priorityUpstream.Delete(publicKey)
|
||||
types.RenderSuccess(ctx, resp.Data)
|
||||
return
|
||||
}
|
||||
}
|
||||
@@ -1,6 +1,7 @@
|
||||
package auth
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
@@ -9,7 +10,6 @@ import (
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/common/types/message"
|
||||
"scroll-tech/common/version"
|
||||
|
||||
"scroll-tech/coordinator/internal/config"
|
||||
@@ -20,46 +20,72 @@ import (
|
||||
|
||||
// LoginLogic the auth logic
|
||||
type LoginLogic struct {
|
||||
cfg *config.Config
|
||||
challengeOrm *orm.Challenge
|
||||
cfg *config.VerifierConfig
|
||||
deduplicator ChallengeDeduplicator
|
||||
|
||||
openVmVks map[string]struct{}
|
||||
|
||||
proverVersionHardForkMap map[string][]string
|
||||
proverVersionHardForkMap map[string]string
|
||||
}
|
||||
|
||||
type ChallengeDeduplicator interface {
|
||||
InsertChallenge(ctx context.Context, challengeString string) error
|
||||
}
|
||||
|
||||
type SimpleDeduplicator struct {
|
||||
}
|
||||
|
||||
func (s *SimpleDeduplicator) InsertChallenge(ctx context.Context, challengeString string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewLoginLogicWithSimpleDEduplicator new a LoginLogic, do not use db to deduplicate challenge
|
||||
func NewLoginLogicWithSimpleDeduplicator(vcfg *config.VerifierConfig, vf *verifier.Verifier) *LoginLogic {
|
||||
return newLoginLogic(&SimpleDeduplicator{}, vcfg, vf)
|
||||
}
|
||||
|
||||
// NewLoginLogic new a LoginLogic
|
||||
func NewLoginLogic(db *gorm.DB, cfg *config.Config, vf *verifier.Verifier) *LoginLogic {
|
||||
proverVersionHardForkMap := make(map[string][]string)
|
||||
func NewLoginLogic(db *gorm.DB, vcfg *config.VerifierConfig, vf *verifier.Verifier) *LoginLogic {
|
||||
return newLoginLogic(orm.NewChallenge(db), vcfg, vf)
|
||||
}
|
||||
|
||||
var highHardForks []string
|
||||
highHardForks = append(highHardForks, cfg.ProverManager.Verifier.HighVersionCircuit.ForkName)
|
||||
highHardForks = append(highHardForks, message.EuclidFork, message.EuclidV2Fork)
|
||||
proverVersionHardForkMap[cfg.ProverManager.Verifier.HighVersionCircuit.MinProverVersion] = highHardForks
|
||||
func newLoginLogic(deduplicator ChallengeDeduplicator, vcfg *config.VerifierConfig, vf *verifier.Verifier) *LoginLogic {
|
||||
|
||||
proverVersionHardForkMap := make(map[string]string)
|
||||
|
||||
for _, cfg := range vcfg.Verifiers {
|
||||
proverVersionHardForkMap[cfg.ForkName] = cfg.MinProverVersion
|
||||
}
|
||||
|
||||
return &LoginLogic{
|
||||
cfg: cfg,
|
||||
cfg: vcfg,
|
||||
openVmVks: vf.OpenVMVkMap,
|
||||
challengeOrm: orm.NewChallenge(db),
|
||||
deduplicator: deduplicator,
|
||||
proverVersionHardForkMap: proverVersionHardForkMap,
|
||||
}
|
||||
}
|
||||
|
||||
// InsertChallengeString insert and check the challenge string is existed
|
||||
func (l *LoginLogic) InsertChallengeString(ctx *gin.Context, challenge string) error {
|
||||
return l.challengeOrm.InsertChallenge(ctx.Copy(), challenge)
|
||||
}
|
||||
|
||||
func (l *LoginLogic) Check(login *types.LoginParameter) error {
|
||||
// Verify the completeness of login message
|
||||
func VerifyMsg(login *types.LoginParameter) error {
|
||||
verify, err := login.Verify()
|
||||
if err != nil || !verify {
|
||||
log.Error("auth message verify failure", "prover_name", login.Message.ProverName,
|
||||
"prover_version", login.Message.ProverVersion, "message", login.Message)
|
||||
return errors.New("auth message verify failure")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
if !version.CheckScrollRepoVersion(login.Message.ProverVersion, l.cfg.ProverManager.Verifier.HighVersionCircuit.MinProverVersion) {
|
||||
return fmt.Errorf("incompatible prover version. please upgrade your prover, minimum allowed version: %s, actual version: %s", l.cfg.ProverManager.Verifier.HighVersionCircuit.MinProverVersion, login.Message.ProverVersion)
|
||||
// InsertChallengeString insert and check the challenge string is existed
|
||||
func (l *LoginLogic) InsertChallengeString(ctx *gin.Context, challenge string) error {
|
||||
return l.deduplicator.InsertChallenge(ctx.Copy(), challenge)
|
||||
}
|
||||
|
||||
// Check if the login client is compatible with the setting in coordinator
|
||||
func (l *LoginLogic) CompatiblityCheck(login *types.LoginParameter) error {
|
||||
|
||||
if !version.CheckScrollRepoVersion(login.Message.ProverVersion, l.cfg.MinProverVersion) {
|
||||
return fmt.Errorf("incompatible prover version. please upgrade your prover, minimum allowed version: %s, actual version: %s", l.cfg.MinProverVersion, login.Message.ProverVersion)
|
||||
}
|
||||
|
||||
vks := make(map[string]struct{})
|
||||
@@ -67,27 +93,32 @@ func (l *LoginLogic) Check(login *types.LoginParameter) error {
|
||||
vks[vk] = struct{}{}
|
||||
}
|
||||
|
||||
for _, vk := range login.Message.VKs {
|
||||
if _, ok := vks[vk]; !ok {
|
||||
log.Error("vk inconsistency", "prover vk", vk, "prover name", login.Message.ProverName,
|
||||
"prover_version", login.Message.ProverVersion, "message", login.Message)
|
||||
if !version.CheckScrollProverVersion(login.Message.ProverVersion) {
|
||||
return fmt.Errorf("incompatible prover version. please upgrade your prover, expect version: %s, actual version: %s",
|
||||
version.Version, login.Message.ProverVersion)
|
||||
// new coordinator / proxy do not check vks while login, code only for backward compatibility
|
||||
if len(vks) != 0 {
|
||||
for _, vk := range login.Message.VKs {
|
||||
if _, ok := vks[vk]; !ok {
|
||||
log.Error("vk inconsistency", "prover vk", vk, "prover name", login.Message.ProverName,
|
||||
"prover_version", login.Message.ProverVersion, "message", login.Message)
|
||||
if !version.CheckScrollProverVersion(login.Message.ProverVersion) {
|
||||
return fmt.Errorf("incompatible prover version. please upgrade your prover, expect version: %s, actual version: %s",
|
||||
version.Version, login.Message.ProverVersion)
|
||||
}
|
||||
// if the prover reports a same prover version
|
||||
return errors.New("incompatible vk. please check your params files or config files")
|
||||
}
|
||||
// if the prover reports a same prover version
|
||||
return errors.New("incompatible vk. please check your params files or config files")
|
||||
}
|
||||
}
|
||||
|
||||
if login.Message.ProverProviderType != types.ProverProviderTypeInternal && login.Message.ProverProviderType != types.ProverProviderTypeExternal {
|
||||
switch login.Message.ProverProviderType {
|
||||
case types.ProverProviderTypeInternal:
|
||||
case types.ProverProviderTypeExternal:
|
||||
case types.ProverProviderTypeProxy:
|
||||
case types.ProverProviderTypeUndefined:
|
||||
// for backward compatibility, set ProverProviderType as internal
|
||||
if login.Message.ProverProviderType == types.ProverProviderTypeUndefined {
|
||||
login.Message.ProverProviderType = types.ProverProviderTypeInternal
|
||||
} else {
|
||||
log.Error("invalid prover_provider_type", "value", login.Message.ProverProviderType, "prover name", login.Message.ProverName, "prover version", login.Message.ProverVersion)
|
||||
return errors.New("invalid prover provider type.")
|
||||
}
|
||||
login.Message.ProverProviderType = types.ProverProviderTypeInternal
|
||||
default:
|
||||
log.Error("invalid prover_provider_type", "value", login.Message.ProverProviderType, "prover name", login.Message.ProverName, "prover version", login.Message.ProverVersion)
|
||||
return errors.New("invalid prover provider type.")
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -101,9 +132,15 @@ func (l *LoginLogic) ProverHardForkName(login *types.LoginParameter) (string, er
|
||||
}
|
||||
|
||||
proverVersion := proverVersionSplits[0]
|
||||
if hardForkNames, ok := l.proverVersionHardForkMap[proverVersion]; ok {
|
||||
return strings.Join(hardForkNames, ","), nil
|
||||
var hardForkNames []string
|
||||
for n, minVersion := range l.proverVersionHardForkMap {
|
||||
if minVersion == "" || version.CheckScrollRepoVersion(proverVersion, minVersion) {
|
||||
hardForkNames = append(hardForkNames, n)
|
||||
}
|
||||
}
|
||||
if len(hardForkNames) == 0 {
|
||||
return "", fmt.Errorf("invalid prover prover_version:%s", login.Message.ProverVersion)
|
||||
}
|
||||
|
||||
return "", fmt.Errorf("invalid prover prover_version:%s", login.Message.ProverVersion)
|
||||
return strings.Join(hardForkNames, ","), nil
|
||||
}
|
||||
|
||||
17
coordinator/internal/logic/libzkp/Makefile
Normal file
17
coordinator/internal/logic/libzkp/Makefile
Normal file
@@ -0,0 +1,17 @@
|
||||
.PHONY: help fmt clippy test test-ci test-all
|
||||
|
||||
build:
|
||||
@cargo build --release -p libzkp-c
|
||||
@mkdir -p lib
|
||||
@cp -f ../../../../target/release/libzkp.so lib/
|
||||
|
||||
fmt:
|
||||
@cargo fmt --all -- --check
|
||||
|
||||
clean:
|
||||
@cargo clean --release -p libzkp -p libzkp-c -p l2geth
|
||||
@rm -f lib/libzkp.so
|
||||
|
||||
clippy:
|
||||
@cargo check --release --all-features
|
||||
@cargo clippy --release -- -D warnings
|
||||
@@ -20,7 +20,7 @@ function build_test_bins() {
|
||||
cd $REPO/coordinator
|
||||
make libzkp
|
||||
go test -tags="gpu ffi" -timeout 0 -c ./internal/logic/verifier
|
||||
cd $REPO/common/libzkp
|
||||
cd $REPO/coordinator/internal/logic/libzkp
|
||||
}
|
||||
|
||||
build_test_bins
|
||||
134
coordinator/internal/logic/libzkp/lib.go
Normal file
134
coordinator/internal/logic/libzkp/lib.go
Normal file
@@ -0,0 +1,134 @@
|
||||
//go:build !mock_verifier
|
||||
|
||||
package libzkp
|
||||
|
||||
/*
|
||||
#cgo LDFLAGS: -lzkp -lm -ldl -L${SRCDIR}/lib -Wl,-rpath=${SRCDIR}/lib
|
||||
#cgo gpu LDFLAGS: -lzkp -lm -ldl -lgmp -lstdc++ -lprocps -L/usr/local/cuda/lib64/ -lcudart -L${SRCDIR}/lib/ -Wl,-rpath=${SRCDIR}/lib
|
||||
#include <stdlib.h>
|
||||
#include "libzkp.h"
|
||||
*/
|
||||
import "C" //nolint:typecheck
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
func init() {
|
||||
C.init_tracing()
|
||||
}
|
||||
|
||||
// Helper function to convert Go string to C string and handle cleanup
|
||||
func goToCString(s string) *C.char {
|
||||
return C.CString(s)
|
||||
}
|
||||
|
||||
// Helper function to free C string
|
||||
func freeCString(s *C.char) {
|
||||
C.free(unsafe.Pointer(s))
|
||||
}
|
||||
|
||||
// Initialize the verifier
|
||||
func InitVerifier(configJSON string) {
|
||||
cConfig := goToCString(configJSON)
|
||||
defer freeCString(cConfig)
|
||||
|
||||
C.init_verifier(cConfig)
|
||||
}
|
||||
|
||||
// Verify a chunk proof
|
||||
func VerifyChunkProof(proofData, forkName string) bool {
|
||||
cProof := goToCString(proofData)
|
||||
cForkName := goToCString(strings.ToLower(forkName))
|
||||
defer freeCString(cProof)
|
||||
defer freeCString(cForkName)
|
||||
|
||||
result := C.verify_chunk_proof(cProof, cForkName)
|
||||
return result != 0
|
||||
}
|
||||
|
||||
// Verify a batch proof
|
||||
func VerifyBatchProof(proofData, forkName string) bool {
|
||||
cProof := goToCString(proofData)
|
||||
cForkName := goToCString(strings.ToLower(forkName))
|
||||
defer freeCString(cProof)
|
||||
defer freeCString(cForkName)
|
||||
|
||||
result := C.verify_batch_proof(cProof, cForkName)
|
||||
return result != 0
|
||||
}
|
||||
|
||||
// Verify a bundle proof
|
||||
func VerifyBundleProof(proofData, forkName string) bool {
|
||||
cProof := goToCString(proofData)
|
||||
cForkName := goToCString(strings.ToLower(forkName))
|
||||
defer freeCString(cProof)
|
||||
defer freeCString(cForkName)
|
||||
|
||||
result := C.verify_bundle_proof(cProof, cForkName)
|
||||
return result != 0
|
||||
}
|
||||
|
||||
// Generate wrapped proof
|
||||
func GenerateWrappedProof(proofJSON, metadata string, vkData []byte) string {
|
||||
cProofJSON := goToCString(proofJSON)
|
||||
cMetadata := goToCString(metadata)
|
||||
defer freeCString(cProofJSON)
|
||||
defer freeCString(cMetadata)
|
||||
|
||||
// Create a C array from Go slice
|
||||
var cVkData *C.char
|
||||
if len(vkData) > 0 {
|
||||
cVkData = (*C.char)(unsafe.Pointer(&vkData[0]))
|
||||
}
|
||||
|
||||
resultPtr := C.gen_wrapped_proof(cProofJSON, cMetadata, cVkData, C.size_t(len(vkData)))
|
||||
if resultPtr == nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
// Convert result to Go string and free C memory
|
||||
result := C.GoString(resultPtr)
|
||||
C.release_string(resultPtr)
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
// Dumps a verification key to a file
|
||||
func DumpVk(forkName, filePath string) error {
|
||||
cForkName := goToCString(strings.ToLower(forkName))
|
||||
cFilePath := goToCString(filePath)
|
||||
defer freeCString(cForkName)
|
||||
defer freeCString(cFilePath)
|
||||
|
||||
// Call the C function to dump the verification key
|
||||
C.dump_vk(cForkName, cFilePath)
|
||||
|
||||
// Check if the file was created successfully
|
||||
// Note: The C function doesn't return an error code, so we check if the file exists
|
||||
if _, err := os.Stat(filePath); os.IsNotExist(err) {
|
||||
return fmt.Errorf("failed to dump verification key: file %s was not created", filePath)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// UnivTaskCompatibilityFix calls the universal task compatibility fix function
|
||||
func UniversalTaskCompatibilityFix(taskJSON string) (string, error) {
|
||||
cTaskJSON := goToCString(taskJSON)
|
||||
defer freeCString(cTaskJSON)
|
||||
|
||||
resultPtr := C.univ_task_compatibility_fix(cTaskJSON)
|
||||
if resultPtr == nil {
|
||||
return "", fmt.Errorf("univ_task_compatibility_fix failed")
|
||||
}
|
||||
|
||||
// Convert result to Go string and free C memory
|
||||
result := C.GoString(resultPtr)
|
||||
C.release_string(resultPtr)
|
||||
|
||||
return result, nil
|
||||
}
|
||||
57
coordinator/internal/logic/libzkp/lib_mock.go
Normal file
57
coordinator/internal/logic/libzkp/lib_mock.go
Normal file
@@ -0,0 +1,57 @@
|
||||
//go:build mock_verifier
|
||||
|
||||
package libzkp
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
)
|
||||
|
||||
// // InitVerifier is a no-op in the mock.
|
||||
// func InitVerifier(configJSON string) {}
|
||||
|
||||
// // VerifyChunkProof returns a fixed success in the mock.
|
||||
// func VerifyChunkProof(proofData, forkName string) bool {
|
||||
// return true
|
||||
// }
|
||||
|
||||
// // VerifyBatchProof returns a fixed success in the mock.
|
||||
// func VerifyBatchProof(proofData, forkName string) bool {
|
||||
// return true
|
||||
// }
|
||||
|
||||
// // VerifyBundleProof returns a fixed success in the mock.
|
||||
// func VerifyBundleProof(proofData, forkName string) bool {
|
||||
// return true
|
||||
// }
|
||||
|
||||
func UniversalTaskCompatibilityFix(taskJSON string) (string, error) {
|
||||
panic("should not run here")
|
||||
}
|
||||
|
||||
// GenerateWrappedProof returns a fixed dummy proof string in the mock.
|
||||
func GenerateWrappedProof(proofJSON, metadata string, vkData []byte) string {
|
||||
|
||||
payload := struct {
|
||||
Metadata json.RawMessage `json:"metadata"`
|
||||
Proof json.RawMessage `json:"proof"`
|
||||
GitVersion string `json:"git_version"`
|
||||
}{
|
||||
Metadata: json.RawMessage(metadata),
|
||||
Proof: json.RawMessage(proofJSON),
|
||||
GitVersion: "mock-git-version",
|
||||
}
|
||||
|
||||
out, err := json.Marshal(payload)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return string(out)
|
||||
}
|
||||
|
||||
// DumpVk is a no-op and returns nil in the mock.
|
||||
func DumpVk(forkName, filePath string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetDynamicFeature is a no-op in the mock.
|
||||
func SetDynamicFeature(feats string) {}
|
||||
62
coordinator/internal/logic/libzkp/libzkp.h
Normal file
62
coordinator/internal/logic/libzkp/libzkp.h
Normal file
@@ -0,0 +1,62 @@
|
||||
// Verifier is used to:
|
||||
// - Verify a batch proof
|
||||
// - Verify a bundle proof
|
||||
// - Verify a chunk proof
|
||||
|
||||
#ifndef LIBZKP_H
|
||||
#define LIBZKP_H
|
||||
|
||||
#include <stddef.h> // For size_t
|
||||
|
||||
// Init log tracing
|
||||
void init_tracing();
|
||||
|
||||
// Initialize the verifier with configuration
|
||||
void init_verifier(char* config);
|
||||
|
||||
// Initialize the l2geth with configuration
|
||||
void init_l2geth(char* config);
|
||||
|
||||
// Verify proofs - returns non-zero for success, zero for failure
|
||||
char verify_batch_proof(char* proof, char* fork_name);
|
||||
char verify_bundle_proof(char* proof, char* fork_name);
|
||||
char verify_chunk_proof(char* proof, char* fork_name);
|
||||
|
||||
// Dump verification key to file
|
||||
void dump_vk(char* fork_name, char* file);
|
||||
|
||||
// The result struct to hold data from handling a proving task
|
||||
typedef struct {
|
||||
char ok;
|
||||
char* universal_task;
|
||||
char* metadata;
|
||||
char expected_pi_hash[32];
|
||||
} HandlingResult;
|
||||
|
||||
// Generate a universal task based on task type and input JSON
|
||||
// Returns a struct containing task data, metadata, and expected proof hash
|
||||
HandlingResult gen_universal_task(
|
||||
int task_type,
|
||||
char* task,
|
||||
char* fork_name,
|
||||
const unsigned char* expected_vk,
|
||||
size_t expected_vk_len,
|
||||
const unsigned char* decryption_key,
|
||||
size_t decryption_key_len
|
||||
);
|
||||
|
||||
// Release memory allocated for a HandlingResult returned by gen_universal_task
|
||||
void release_task_result(HandlingResult result);
|
||||
|
||||
// Generate a wrapped proof from the universal prover output and metadata
|
||||
// Returns a JSON string containing the wrapped proof, or NULL on error
|
||||
// The caller must call release_string on the returned pointer when done
|
||||
char* gen_wrapped_proof(char* proof_json, char* metadata, char* vk, size_t vk_len);
|
||||
|
||||
// Release memory allocated for a string returned by gen_wrapped_proof
|
||||
void release_string(char* string_ptr);
|
||||
|
||||
// Universal task compatibility fix function
|
||||
char* univ_task_compatibility_fix(char* task_json);
|
||||
|
||||
#endif /* LIBZKP_H */
|
||||
27
coordinator/internal/logic/libzkp/message_types.go
Normal file
27
coordinator/internal/logic/libzkp/message_types.go
Normal file
@@ -0,0 +1,27 @@
|
||||
package libzkp
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"scroll-tech/common/types/message"
|
||||
)
|
||||
|
||||
// TaskType enum values matching the Rust enum
|
||||
const (
|
||||
TaskTypeChunk = 0
|
||||
TaskTypeBatch = 1
|
||||
TaskTypeBundle = 2
|
||||
)
|
||||
|
||||
func fromMessageTaskType(taskType int) int {
|
||||
switch message.ProofType(taskType) {
|
||||
case message.ProofTypeChunk:
|
||||
return TaskTypeChunk
|
||||
case message.ProofTypeBatch:
|
||||
return TaskTypeBatch
|
||||
case message.ProofTypeBundle:
|
||||
return TaskTypeBundle
|
||||
default:
|
||||
panic(fmt.Sprintf("unsupported proof type: %d", taskType))
|
||||
}
|
||||
}
|
||||
50
coordinator/internal/logic/libzkp/mock_universal_task.go
Normal file
50
coordinator/internal/logic/libzkp/mock_universal_task.go
Normal file
@@ -0,0 +1,50 @@
|
||||
//go:build mock_verifier
|
||||
|
||||
package libzkp
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"scroll-tech/common/types/message"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
)
|
||||
|
||||
func InitL2geth(configJSON string) {
|
||||
}
|
||||
|
||||
func GenerateUniversalTask(taskType int, taskJSON, forkName string, expectedVk []byte, decryptionKey []byte) (bool, string, string, []byte) {
|
||||
return generateUniversalTask(fromMessageTaskType(taskType), taskJSON, strings.ToLower(forkName), expectedVk, decryptionKey)
|
||||
}
|
||||
|
||||
func generateUniversalTask(taskType int, taskJSON, forkName string, expectedVk []byte, decryptionKey []byte) (bool, string, string, []byte) {
|
||||
|
||||
fmt.Printf("call mocked generate universal task %d, taskJson %s\n", taskType, taskJSON)
|
||||
var metadata interface{}
|
||||
switch taskType {
|
||||
case TaskTypeChunk:
|
||||
metadata = struct {
|
||||
ChunkInfo *message.ChunkInfo `json:"chunk_info"`
|
||||
}{ChunkInfo: &message.ChunkInfo{}}
|
||||
case TaskTypeBatch:
|
||||
metadata = struct {
|
||||
BatchInfo *message.OpenVMBatchInfo `json:"batch_info"`
|
||||
BatchHash common.Hash `json:"batch_hash"`
|
||||
}{BatchInfo: &message.OpenVMBatchInfo{}}
|
||||
case TaskTypeBundle:
|
||||
metadata = struct {
|
||||
BundleInfo *message.OpenVMBundleInfo `json:"bundle_info"`
|
||||
BundlePIHash common.Hash `json:"bundle_pi_hash"`
|
||||
}{BundleInfo: &message.OpenVMBundleInfo{}}
|
||||
}
|
||||
|
||||
encodeData, err := json.Marshal(metadata)
|
||||
if err != nil {
|
||||
fmt.Println("mock encoding json fail:", err)
|
||||
return false, "", "", nil
|
||||
}
|
||||
|
||||
return true, "UniversalTask data is not parsed", string(encodeData), []byte{0}
|
||||
}
|
||||
65
coordinator/internal/logic/libzkp/universal_task.go
Normal file
65
coordinator/internal/logic/libzkp/universal_task.go
Normal file
@@ -0,0 +1,65 @@
|
||||
//go:build !mock_verifier
|
||||
|
||||
package libzkp
|
||||
|
||||
/*
|
||||
#include <stdlib.h>
|
||||
#include "libzkp.h"
|
||||
*/
|
||||
import "C" //nolint:typecheck
|
||||
import (
|
||||
"strings"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// Initialize the handler for universal task
|
||||
func InitL2geth(configJSON string) {
|
||||
cConfig := goToCString(configJSON)
|
||||
defer freeCString(cConfig)
|
||||
|
||||
C.init_l2geth(cConfig)
|
||||
}
|
||||
|
||||
// Generate a universal task
|
||||
func GenerateUniversalTask(taskType int, taskJSON, forkName string, expectedVk []byte, decryptionKey []byte) (bool, string, string, []byte) {
|
||||
return generateUniversalTask(fromMessageTaskType(taskType), taskJSON, strings.ToLower(forkName), expectedVk, decryptionKey)
|
||||
}
|
||||
|
||||
func generateUniversalTask(taskType int, taskJSON, forkName string, expectedVk []byte, decryptionKey []byte) (bool, string, string, []byte) {
|
||||
cTask := goToCString(taskJSON)
|
||||
cForkName := goToCString(forkName)
|
||||
defer freeCString(cTask)
|
||||
defer freeCString(cForkName)
|
||||
|
||||
// Create a C array from Go slice
|
||||
var cVk *C.uchar
|
||||
if len(expectedVk) > 0 {
|
||||
cVk = (*C.uchar)(unsafe.Pointer(&expectedVk[0]))
|
||||
}
|
||||
|
||||
// Create a C array from Go slice
|
||||
var cDk *C.uchar
|
||||
if len(decryptionKey) > 0 {
|
||||
cDk = (*C.uchar)(unsafe.Pointer(&decryptionKey[0]))
|
||||
}
|
||||
|
||||
result := C.gen_universal_task(C.int(taskType), cTask, cForkName, cVk, C.size_t(len(expectedVk)), cDk, C.size_t(len(decryptionKey)))
|
||||
defer C.release_task_result(result)
|
||||
|
||||
// Check if the operation was successful
|
||||
if result.ok == 0 {
|
||||
return false, "", "", nil
|
||||
}
|
||||
|
||||
// Convert C strings to Go strings
|
||||
universalTask := C.GoString(result.universal_task)
|
||||
metadata := C.GoString(result.metadata)
|
||||
|
||||
// Convert C array to Go slice
|
||||
piHash := make([]byte, 32)
|
||||
for i := 0; i < 32; i++ {
|
||||
piHash[i] = byte(result.expected_pi_hash[i])
|
||||
}
|
||||
|
||||
return true, universalTask, metadata, piHash
|
||||
}
|
||||
@@ -36,12 +36,13 @@ type BatchProverTask struct {
|
||||
}
|
||||
|
||||
// NewBatchProverTask new a batch collector
|
||||
func NewBatchProverTask(cfg *config.Config, chainCfg *params.ChainConfig, db *gorm.DB, reg prometheus.Registerer) *BatchProverTask {
|
||||
func NewBatchProverTask(cfg *config.Config, chainCfg *params.ChainConfig, db *gorm.DB, expectedVk map[string][]byte, reg prometheus.Registerer) *BatchProverTask {
|
||||
bp := &BatchProverTask{
|
||||
BaseProverTask: BaseProverTask{
|
||||
db: db,
|
||||
cfg: cfg,
|
||||
chainCfg: chainCfg,
|
||||
expectedVk: expectedVk,
|
||||
blockOrm: orm.NewL2Block(db),
|
||||
chunkOrm: orm.NewChunk(db),
|
||||
batchOrm: orm.NewBatch(db),
|
||||
@@ -83,10 +84,37 @@ func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
|
||||
for i := 0; i < 5; i++ {
|
||||
var getTaskError error
|
||||
var tmpBatchTask *orm.Batch
|
||||
tmpBatchTask, getTaskError = bp.batchOrm.GetAssignedBatch(ctx.Copy(), maxActiveAttempts, maxTotalAttempts)
|
||||
if getTaskError != nil {
|
||||
log.Error("failed to get assigned batch proving tasks", "height", getTaskParameter.ProverHeight, "err", getTaskError)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
|
||||
if taskCtx.hasAssignedTask != nil {
|
||||
if taskCtx.hasAssignedTask.TaskType != int16(message.ProofTypeBatch) {
|
||||
return nil, fmt.Errorf("prover with publicKey %s is already assigned a task. ProverName: %s, ProverVersion: %s", taskCtx.PublicKey, taskCtx.ProverName, taskCtx.ProverVersion)
|
||||
}
|
||||
|
||||
tmpBatchTask, getTaskError = bp.batchOrm.GetBatchByHash(ctx.Copy(), taskCtx.hasAssignedTask.TaskID)
|
||||
if getTaskError != nil {
|
||||
log.Error("failed to get batch has assigned to prover", "taskID", taskCtx.hasAssignedTask.TaskID, "err", getTaskError)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
} else if tmpBatchTask == nil {
|
||||
// if the assigned batch dropped, there would be too much issue to assign another
|
||||
return nil, fmt.Errorf("prover with publicKey %s is already assigned a dropped batch. ProverName: %s, ProverVersion: %s",
|
||||
taskCtx.PublicKey, taskCtx.ProverName, taskCtx.ProverVersion)
|
||||
}
|
||||
} else if getTaskParameter.TaskID != "" {
|
||||
tmpBatchTask, getTaskError = bp.batchOrm.GetBatchByHash(ctx.Copy(), getTaskParameter.TaskID)
|
||||
if getTaskError != nil {
|
||||
log.Error("failed to get expected batch", "taskID", getTaskParameter.TaskID, "err", getTaskError)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
} else if tmpBatchTask == nil {
|
||||
return nil, fmt.Errorf("Expected task (%s) is already dropped", getTaskParameter.TaskID)
|
||||
}
|
||||
}
|
||||
|
||||
if tmpBatchTask == nil {
|
||||
tmpBatchTask, getTaskError = bp.batchOrm.GetAssignedBatch(ctx.Copy(), maxActiveAttempts, maxTotalAttempts)
|
||||
if getTaskError != nil {
|
||||
log.Error("failed to get assigned batch proving tasks", "height", getTaskParameter.ProverHeight, "err", getTaskError)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
}
|
||||
}
|
||||
|
||||
// Why here need get again? In order to support a task can assign to multiple prover, need also assign `ProvingTaskAssigned`
|
||||
@@ -114,29 +142,32 @@ func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Don't dispatch the same failing job to the same prover
|
||||
proverTasks, getFailedTaskError := bp.proverTaskOrm.GetFailedProverTasksByHash(ctx.Copy(), message.ProofTypeBatch, tmpBatchTask.Hash, 2)
|
||||
if getFailedTaskError != nil {
|
||||
log.Error("failed to get prover tasks", "proof type", message.ProofTypeBatch.String(), "task ID", tmpBatchTask.Hash, "error", getFailedTaskError)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
}
|
||||
for i := 0; i < len(proverTasks); i++ {
|
||||
if proverTasks[i].ProverPublicKey == taskCtx.PublicKey ||
|
||||
taskCtx.ProverProviderType == uint8(coordinatorType.ProverProviderTypeExternal) && cutils.IsExternalProverNameMatch(proverTasks[i].ProverName, taskCtx.ProverName) {
|
||||
log.Debug("get empty batch, the prover already failed this task", "height", getTaskParameter.ProverHeight, "task ID", tmpBatchTask.Hash, "prover name", taskCtx.ProverName, "prover public key", taskCtx.PublicKey)
|
||||
return nil, nil
|
||||
// we are simply pick the chunk which has been assigned, so don't bother to update attempts or check failed before
|
||||
if taskCtx.hasAssignedTask == nil {
|
||||
// Don't dispatch the same failing job to the same prover
|
||||
proverTasks, getFailedTaskError := bp.proverTaskOrm.GetFailedProverTasksByHash(ctx.Copy(), message.ProofTypeBatch, tmpBatchTask.Hash, 2)
|
||||
if getFailedTaskError != nil {
|
||||
log.Error("failed to get prover tasks", "proof type", message.ProofTypeBatch.String(), "task ID", tmpBatchTask.Hash, "error", getFailedTaskError)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
}
|
||||
for i := 0; i < len(proverTasks); i++ {
|
||||
if proverTasks[i].ProverPublicKey == taskCtx.PublicKey ||
|
||||
taskCtx.ProverProviderType == uint8(coordinatorType.ProverProviderTypeExternal) && cutils.IsExternalProverNameMatch(proverTasks[i].ProverName, taskCtx.ProverName) {
|
||||
log.Debug("get empty batch, the prover already failed this task", "height", getTaskParameter.ProverHeight, "task ID", tmpBatchTask.Hash, "prover name", taskCtx.ProverName, "prover public key", taskCtx.PublicKey)
|
||||
return nil, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
rowsAffected, updateAttemptsErr := bp.batchOrm.UpdateBatchAttempts(ctx.Copy(), tmpBatchTask.Index, tmpBatchTask.ActiveAttempts, tmpBatchTask.TotalAttempts)
|
||||
if updateAttemptsErr != nil {
|
||||
log.Error("failed to update batch attempts", "height", getTaskParameter.ProverHeight, "err", updateAttemptsErr)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
}
|
||||
rowsAffected, updateAttemptsErr := bp.batchOrm.UpdateBatchAttempts(ctx.Copy(), tmpBatchTask.Index, tmpBatchTask.ActiveAttempts, tmpBatchTask.TotalAttempts)
|
||||
if updateAttemptsErr != nil {
|
||||
log.Error("failed to update batch attempts", "height", getTaskParameter.ProverHeight, "err", updateAttemptsErr)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
}
|
||||
|
||||
if rowsAffected == 0 {
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
continue
|
||||
if rowsAffected == 0 {
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
batchTask = tmpBatchTask
|
||||
@@ -149,31 +180,59 @@ func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
|
||||
}
|
||||
|
||||
log.Info("start batch proof generation session", "task_id", batchTask.Hash, "public key", taskCtx.PublicKey, "prover name", taskCtx.ProverName)
|
||||
proverTask := orm.ProverTask{
|
||||
TaskID: batchTask.Hash,
|
||||
ProverPublicKey: taskCtx.PublicKey,
|
||||
TaskType: int16(message.ProofTypeBatch),
|
||||
ProverName: taskCtx.ProverName,
|
||||
ProverVersion: taskCtx.ProverVersion,
|
||||
ProvingStatus: int16(types.ProverAssigned),
|
||||
FailureType: int16(types.ProverTaskFailureTypeUndefined),
|
||||
// here why need use UTC time. see scroll/common/database/db.go
|
||||
AssignedAt: utils.NowUTC(),
|
||||
var proverTask *orm.ProverTask
|
||||
if taskCtx.hasAssignedTask == nil {
|
||||
proverTask = &orm.ProverTask{
|
||||
TaskID: batchTask.Hash,
|
||||
ProverPublicKey: taskCtx.PublicKey,
|
||||
TaskType: int16(message.ProofTypeBatch),
|
||||
ProverName: taskCtx.ProverName,
|
||||
ProverVersion: taskCtx.ProverVersion,
|
||||
ProvingStatus: int16(types.ProverAssigned),
|
||||
FailureType: int16(types.ProverTaskFailureTypeUndefined),
|
||||
// here why need use UTC time. see scroll/common/database/db.go
|
||||
AssignedAt: utils.NowUTC(),
|
||||
}
|
||||
} else {
|
||||
proverTask = taskCtx.hasAssignedTask
|
||||
}
|
||||
|
||||
// Store session info.
|
||||
if err = bp.proverTaskOrm.InsertProverTask(ctx.Copy(), &proverTask); err != nil {
|
||||
bp.recoverActiveAttempts(ctx, batchTask)
|
||||
log.Error("insert batch prover task info fail", "task_id", batchTask.Hash, "publicKey", taskCtx.PublicKey, "err", err)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
}
|
||||
|
||||
taskMsg, err := bp.formatProverTask(ctx.Copy(), &proverTask, batchTask, hardForkName)
|
||||
taskMsg, err := bp.formatProverTask(ctx.Copy(), proverTask, batchTask, hardForkName)
|
||||
if err != nil {
|
||||
bp.recoverActiveAttempts(ctx, batchTask)
|
||||
log.Error("format prover task failure", "task_id", batchTask.Hash, "err", err)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
}
|
||||
if getTaskParameter.Universal {
|
||||
var metadata []byte
|
||||
|
||||
taskMsg, metadata, err = bp.applyUniversal(taskMsg)
|
||||
if err != nil {
|
||||
bp.recoverActiveAttempts(ctx, batchTask)
|
||||
log.Error("Generate universal prover task failure", "task_id", batchTask.Hash, "type", "batch", "err", err)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
}
|
||||
proverTask.Metadata = metadata
|
||||
|
||||
if isCompatibilityFixingVersion(taskCtx.ProverVersion) {
|
||||
log.Info("Apply compatibility fixing for prover", "version", taskCtx.ProverVersion)
|
||||
if err := fixCompatibility(taskMsg); err != nil {
|
||||
log.Error("apply compatibility failure", "err", err)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Store session info.
|
||||
if taskCtx.hasAssignedTask == nil {
|
||||
if err = bp.proverTaskOrm.InsertProverTask(ctx.Copy(), proverTask); err != nil {
|
||||
bp.recoverActiveAttempts(ctx, batchTask)
|
||||
log.Error("insert batch prover task info fail", "task_id", batchTask.Hash, "publicKey", taskCtx.PublicKey, "err", err)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
}
|
||||
}
|
||||
// notice uuid is set as a side effect of InsertProverTask
|
||||
taskMsg.UUID = proverTask.UUID.String()
|
||||
|
||||
bp.batchTaskGetTaskTotal.WithLabelValues(hardForkName).Inc()
|
||||
bp.batchTaskGetTaskProver.With(prometheus.Labels{
|
||||
@@ -198,45 +257,29 @@ func (bp *BatchProverTask) formatProverTask(ctx context.Context, task *orm.Prove
|
||||
}
|
||||
|
||||
var chunkProofs []*message.OpenVMChunkProof
|
||||
var chunkInfos []*message.ChunkInfo
|
||||
// var chunkInfos []*message.ChunkInfo
|
||||
for _, chunk := range chunks {
|
||||
var proof message.OpenVMChunkProof
|
||||
if encodeErr := json.Unmarshal(chunk.Proof, &proof); encodeErr != nil {
|
||||
return nil, fmt.Errorf("Chunk.GetProofsByBatchHash unmarshal proof error: %w, batch hash: %v, chunk hash: %v", encodeErr, task.TaskID, chunk.Hash)
|
||||
}
|
||||
chunkProofs = append(chunkProofs, &proof)
|
||||
|
||||
chunkInfo := message.ChunkInfo{
|
||||
ChainID: bp.cfg.L2.ChainID,
|
||||
PrevStateRoot: common.HexToHash(chunk.ParentChunkStateRoot),
|
||||
PostStateRoot: common.HexToHash(chunk.StateRoot),
|
||||
WithdrawRoot: common.HexToHash(chunk.WithdrawRoot),
|
||||
DataHash: common.HexToHash(chunk.Hash),
|
||||
PrevMsgQueueHash: common.HexToHash(chunk.PrevL1MessageQueueHash),
|
||||
PostMsgQueueHash: common.HexToHash(chunk.PostL1MessageQueueHash),
|
||||
IsPadding: false,
|
||||
InitialBlockNumber: proof.MetaData.ChunkInfo.InitialBlockNumber,
|
||||
BlockCtxs: proof.MetaData.ChunkInfo.BlockCtxs,
|
||||
TxDataLength: proof.MetaData.ChunkInfo.TxDataLength,
|
||||
}
|
||||
chunkInfos = append(chunkInfos, &chunkInfo)
|
||||
}
|
||||
|
||||
taskDetail, err := bp.getBatchTaskDetail(batch, chunkInfos, chunkProofs, hardForkName)
|
||||
taskDetail, err := bp.getBatchTaskDetail(batch, chunkProofs, hardForkName)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get batch task detail, taskID:%s err:%w", task.TaskID, err)
|
||||
}
|
||||
|
||||
chunkProofsBytes, err := json.Marshal(taskDetail)
|
||||
taskBytesWithchunkProofs, err := json.Marshal(taskDetail)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to marshal chunk proofs, taskID:%s err:%w", task.TaskID, err)
|
||||
}
|
||||
|
||||
taskMsg := &coordinatorType.GetTaskSchema{
|
||||
UUID: task.UUID.String(),
|
||||
TaskID: task.TaskID,
|
||||
TaskType: int(message.ProofTypeBatch),
|
||||
TaskData: string(chunkProofsBytes),
|
||||
TaskData: string(taskBytesWithchunkProofs),
|
||||
HardForkName: hardForkName,
|
||||
}
|
||||
|
||||
@@ -251,44 +294,59 @@ func (bp *BatchProverTask) recoverActiveAttempts(ctx *gin.Context, batchTask *or
|
||||
}
|
||||
}
|
||||
|
||||
func (bp *BatchProverTask) getBatchTaskDetail(dbBatch *orm.Batch, chunkInfos []*message.ChunkInfo, chunkProofs []*message.OpenVMChunkProof, hardForkName string) (*message.BatchTaskDetail, error) {
|
||||
taskDetail := &message.BatchTaskDetail{
|
||||
ChunkInfos: chunkInfos,
|
||||
ChunkProofs: chunkProofs,
|
||||
}
|
||||
|
||||
if hardForkName == message.EuclidV2Fork {
|
||||
taskDetail.ForkName = message.EuclidV2ForkNameForProver
|
||||
} else {
|
||||
log.Error("unsupported hard fork name", "hard_fork_name", hardForkName)
|
||||
return nil, fmt.Errorf("unsupported hard fork name: %s", hardForkName)
|
||||
}
|
||||
|
||||
dbBatchCodecVersion := encoding.CodecVersion(dbBatch.CodecVersion)
|
||||
switch dbBatchCodecVersion {
|
||||
case encoding.CodecV3, encoding.CodecV4, encoding.CodecV6, encoding.CodecV7:
|
||||
default:
|
||||
return taskDetail, nil
|
||||
}
|
||||
|
||||
codec, err := encoding.CodecFromVersion(encoding.CodecVersion(dbBatch.CodecVersion))
|
||||
func (bp *BatchProverTask) getBatchTaskDetail(dbBatch *orm.Batch, chunkProofs []*message.OpenVMChunkProof, hardForkName string) (*message.BatchTaskDetail, error) {
|
||||
// Get the version byte.
|
||||
version, err := bp.version(hardForkName)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get codec from version %d, err: %w", dbBatch.CodecVersion, err)
|
||||
return nil, fmt.Errorf("failed to decode version byte: %w", err)
|
||||
}
|
||||
|
||||
batchHeader, decodeErr := codec.NewDABatchFromBytes(dbBatch.BatchHeader)
|
||||
if decodeErr != nil {
|
||||
return nil, fmt.Errorf("failed to decode batch header version %d: %w", dbBatch.CodecVersion, decodeErr)
|
||||
taskDetail := &message.BatchTaskDetail{
|
||||
Version: version,
|
||||
ChunkProofs: chunkProofs,
|
||||
ForkName: hardForkName,
|
||||
}
|
||||
taskDetail.BatchHeader = batchHeader
|
||||
|
||||
taskDetail.BlobBytes = dbBatch.BlobBytes
|
||||
taskDetail.ChallengeDigest = common.HexToHash(dbBatch.ChallengeDigest)
|
||||
// Memory layout of `BlobDataProof`: used in Codec.BlobDataProofForPointEvaluation()
|
||||
// | z | y | kzg_commitment | kzg_proof |
|
||||
// |---------|---------|----------------|-----------|
|
||||
// | bytes32 | bytes32 | bytes48 | bytes48 |
|
||||
taskDetail.KzgProof = message.Byte48{Big: hexutil.Big(*new(big.Int).SetBytes(dbBatch.BlobDataProof[112:160]))}
|
||||
taskDetail.KzgCommitment = message.Byte48{Big: hexutil.Big(*new(big.Int).SetBytes(dbBatch.BlobDataProof[64:112]))}
|
||||
if !bp.validiumMode() {
|
||||
dbBatchCodecVersion := encoding.CodecVersion(dbBatch.CodecVersion)
|
||||
switch dbBatchCodecVersion {
|
||||
case 0:
|
||||
log.Warn("the codec version is 0, if it is not under integration test we have encountered an error here")
|
||||
return taskDetail, nil
|
||||
case encoding.CodecV3, encoding.CodecV4, encoding.CodecV6, encoding.CodecV7, encoding.CodecV8, encoding.CodecV9, encoding.CodecV10:
|
||||
default:
|
||||
return nil, fmt.Errorf("Unsupported codec version <%d>", dbBatchCodecVersion)
|
||||
}
|
||||
|
||||
codec, err := encoding.CodecFromVersion(encoding.CodecVersion(dbBatch.CodecVersion))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get codec from version %d, err: %w", dbBatch.CodecVersion, err)
|
||||
}
|
||||
|
||||
batchHeader, decodeErr := codec.NewDABatchFromBytes(dbBatch.BatchHeader)
|
||||
if decodeErr != nil {
|
||||
return nil, fmt.Errorf("failed to decode batch header version %d: %w", dbBatch.CodecVersion, decodeErr)
|
||||
}
|
||||
taskDetail.BatchHeader = batchHeader
|
||||
|
||||
taskDetail.ChallengeDigest = common.HexToHash(dbBatch.ChallengeDigest)
|
||||
// Memory layout of `BlobDataProof`: used in Codec.BlobDataProofForPointEvaluation()
|
||||
// | z | y | kzg_commitment | kzg_proof |
|
||||
// |---------|---------|----------------|-----------|
|
||||
// | bytes32 | bytes32 | bytes48 | bytes48 |
|
||||
taskDetail.KzgProof = &message.Byte48{Big: hexutil.Big(*new(big.Int).SetBytes(dbBatch.BlobDataProof[112:160]))}
|
||||
taskDetail.KzgCommitment = &message.Byte48{Big: hexutil.Big(*new(big.Int).SetBytes(dbBatch.BlobDataProof[64:112]))}
|
||||
} else {
|
||||
log.Info("Apply validium mode for batch proving task")
|
||||
codec := cutils.FromVersion(version)
|
||||
batchHeader, decodeErr := codec.DABatchForTaskFromBytes(dbBatch.BatchHeader)
|
||||
if decodeErr != nil {
|
||||
return nil, fmt.Errorf("failed to decode batch header version %d: %w", dbBatch.CodecVersion, decodeErr)
|
||||
}
|
||||
batchHeader.SetHash(common.HexToHash(dbBatch.Hash))
|
||||
taskDetail.BatchHeader = batchHeader
|
||||
}
|
||||
|
||||
return taskDetail, nil
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user